text
stringlengths
4
1.02M
meta
dict
from pyramid.path import ( DottedNameResolver ) from zope.interface import Interface from pyramid_urireferencer import protected_resources from .referencer import Referencer from .renderers import json_renderer class IReferencer(Interface): pass def includeme(config): """this function adds some configuration for the application""" config.add_route('references', '/references') _add_referencer(config.registry) config.add_view_deriver(protected_resources.protected_view) config.add_renderer('json_item', json_renderer) config.scan() def _add_referencer(registry): """ Gets the Referencer from config and adds it to the registry. """ referencer = registry.queryUtility(IReferencer) if referencer is not None: return referencer ref = registry.settings['urireferencer.referencer'] url = registry.settings['urireferencer.registry_url'] r = DottedNameResolver() registry.registerUtility(r.resolve(ref)(url), IReferencer) return registry.queryUtility(IReferencer) def get_referencer(registry): """ Get the referencer class :rtype: pyramid_urireferencer.referencer.AbstractReferencer """ # Argument might be a config or request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(IReferencer)
{ "content_hash": "ffb80f1cc93b6d9c47277079f80f4339", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 67, "avg_line_length": 28.541666666666668, "alnum_prop": 0.7277372262773723, "repo_name": "OnroerendErfgoed/pyramid_urireferencer", "id": "ec682373d98d35939612e33b7d0cf6b93e285ec7", "size": "1395", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyramid_urireferencer/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "43127" } ], "symlink_target": "" }
def extractRealmOfChaos(item): """ #'Realm of Chaos' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None if 'Myriad of Shades' in item['tags']: names = [tmp for tmp in item['tags'] if tmp in ['Celest Ambrosia', 'Kiriko', 'Melanie Ambrosia', 'Shana Bonnet', 'Silvia', 'XCrossJ', 'Ghost']] postfix_out = ', '.join(names) if postfix: postfix_out += ' - ' + postfix return buildReleaseMessageWithType(item, 'Myriad of Shades', vol, chp, frag=frag, postfix=postfix_out, tl_type='oel') return False
{ "content_hash": "9b1c378c0801d5487732e89456166eee", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 145, "avg_line_length": 44.142857142857146, "alnum_prop": 0.6763754045307443, "repo_name": "fake-name/ReadableWebProxy", "id": "8640f47271121baaf1025afbcf1bd0a0f36774be", "size": "618", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "WebMirror/management/rss_parser_funcs/feed_parse_extractRealmOfChaos.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "105811" }, { "name": "Dockerfile", "bytes": "1178" }, { "name": "HTML", "bytes": "119737" }, { "name": "JavaScript", "bytes": "3006524" }, { "name": "Jupyter Notebook", "bytes": "148075" }, { "name": "Mako", "bytes": "1454" }, { "name": "Python", "bytes": "5264346" }, { "name": "Shell", "bytes": "1059" } ], "symlink_target": "" }
import copy import datetime import math import uuid import iso8601 import mock import netaddr from oslo_config import cfg import webob from nova.api.openstack.compute.contrib import networks_associate from nova.api.openstack.compute.contrib import os_networks as networks from nova.api.openstack.compute.plugins.v3 import networks as networks_v21 from nova.api.openstack.compute.plugins.v3 import networks_associate as \ networks_associate_v21 from nova.api.openstack import extensions import nova.context from nova import exception from nova.network import manager from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes import nova.utils CONF = cfg.CONF UTC = iso8601.iso8601.Utc() FAKE_NETWORKS = [ { 'bridge': 'br100', 'vpn_public_port': 1000, 'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0', 'updated_at': datetime.datetime(2011, 8, 16, 9, 26, 13, 48257, tzinfo=UTC), 'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047', 'cidr_v6': None, 'deleted_at': None, 'gateway': '10.0.0.1', 'label': 'mynet_0', 'project_id': '1234', 'rxtx_base': None, 'vpn_private_address': '10.0.0.2', 'deleted': False, 'vlan': 100, 'broadcast': '10.0.0.7', 'netmask': '255.255.255.248', 'injected': False, 'cidr': '10.0.0.0/29', 'vpn_public_address': '127.0.0.1', 'multi_host': False, 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop', 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525, tzinfo=UTC), 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True, 'share_address': False, }, { 'bridge': 'br101', 'vpn_public_port': 1001, 'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0', 'updated_at': None, 'id': 2, 'cidr_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000', 'deleted_at': None, 'gateway': '10.0.0.9', 'label': 'mynet_1', 'project_id': None, 'vpn_private_address': '10.0.0.10', 'deleted': False, 'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None, 'netmask': '255.255.255.248', 'injected': False, 'cidr': '10.0.0.10/29', 'vpn_public_address': None, 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None, 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495, tzinfo=UTC), 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True, 'share_address': False, }, ] FAKE_USER_NETWORKS = [ { 'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248', 'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None, 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0', 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047', }, { 'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248', 'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None, 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1', 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000', }, ] NEW_NETWORK = { "network": { "bridge_interface": "eth0", "cidr": "10.20.105.0/24", "label": "new net 111", "vlan_start": 111, "multi_host": False, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True, 'share_address': False, } } class FakeNetworkAPI(object): _sentinel = object() _vlan_is_disabled = False def __init__(self, skip_policy_check=False): self.networks = copy.deepcopy(FAKE_NETWORKS) def disable_vlan(self): self._vlan_is_disabled = True def delete(self, context, network_id): if network_id == 'always_delete': return True if network_id == -1: raise exception.NetworkInUse(network_id=network_id) for i, network in enumerate(self.networks): if network['id'] == network_id: del self.networks[0] return True raise exception.NetworkNotFoundForUUID(uuid=network_id) def disassociate(self, context, network_uuid): for network in self.networks: if network.get('uuid') == network_uuid: network['project_id'] = None return True raise exception.NetworkNotFound(network_id=network_uuid) def associate(self, context, network_uuid, host=_sentinel, project=_sentinel): for network in self.networks: if network.get('uuid') == network_uuid: if host is not FakeNetworkAPI._sentinel: network['host'] = host if project is not FakeNetworkAPI._sentinel: network['project_id'] = project return True raise exception.NetworkNotFound(network_id=network_uuid) def add_network_to_project(self, context, project_id, network_uuid=None): if self._vlan_is_disabled: raise NotImplementedError() if network_uuid: for network in self.networks: if network.get('project_id', None) is None: network['project_id'] = project_id return return for network in self.networks: if network.get('uuid') == network_uuid: network['project_id'] = project_id return def get_all(self, context): return self._fake_db_network_get_all(context, project_only=True) def _fake_db_network_get_all(self, context, project_only="allow_none"): project_id = context.project_id nets = self.networks if nova.context.is_user_context(context) and project_only: if project_only == 'allow_none': nets = [n for n in self.networks if (n['project_id'] == project_id or n['project_id'] is None)] else: nets = [n for n in self.networks if n['project_id'] == project_id] objs = [objects.Network._from_db_object(context, objects.Network(), net) for net in nets] return objects.NetworkList(objects=objs) def get(self, context, network_id): for network in self.networks: if network.get('uuid') == network_id: if 'injected' in network and network['injected'] is None: # NOTE: This is a workaround for passing unit tests. # When using nova-network, 'injected' value should be # boolean because of the definition of objects.Network(). # However, 'injected' value can be None if neutron. # So here changes the value to False just for passing # following _from_db_object(). network['injected'] = False return objects.Network._from_db_object(context, objects.Network(), network) raise exception.NetworkNotFound(network_id=network_id) def create(self, context, **kwargs): subnet_bits = int(math.ceil(math.log(kwargs.get( 'network_size', CONF.network_size), 2))) fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr']) prefixlen_v4 = 32 - subnet_bits subnets_v4 = list(fixed_net_v4.subnet( prefixlen_v4, count=kwargs.get('num_networks', CONF.num_networks))) new_networks = [] new_id = max((net['id'] for net in self.networks)) for index, subnet_v4 in enumerate(subnets_v4): new_id += 1 net = {'id': new_id, 'uuid': str(uuid.uuid4())} net['cidr'] = str(subnet_v4) net['netmask'] = str(subnet_v4.netmask) net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1]) net['broadcast'] = str(subnet_v4.broadcast) net['dhcp_start'] = str(subnet_v4[2]) for key in FAKE_NETWORKS[0].iterkeys(): net.setdefault(key, kwargs.get(key)) new_networks.append(net) self.networks += new_networks return new_networks # NOTE(vish): tests that network create Exceptions actually return # the proper error responses class NetworkCreateExceptionsTestV21(test.TestCase): validation_error = exception.ValidationError class PassthroughAPI(object): def __init__(self): self.network_manager = manager.FlatDHCPManager() def create(self, *args, **kwargs): if kwargs['label'] == 'fail_NetworkNotCreated': raise exception.NetworkNotCreated(req='fake_fail') return self.network_manager.create_networks(*args, **kwargs) def setUp(self): super(NetworkCreateExceptionsTestV21, self).setUp() self._setup() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) self.new_network = copy.deepcopy(NEW_NETWORK) self.req = fakes.HTTPRequest.blank('') def _setup(self): self.controller = networks_v21.NetworkController(self.PassthroughAPI()) def test_network_create_bad_vlan(self): self.new_network['network']['vlan_start'] = 'foo' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_no_cidr(self): self.new_network['network']['cidr'] = '' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_invalid_fixed_cidr(self): self.new_network['network']['fixed_cidr'] = 'foo' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_invalid_start(self): self.new_network['network']['allowed_start'] = 'foo' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_bad_cidr(self): self.new_network['network']['cidr'] = '128.0.0.0/900' self.assertRaises(self.validation_error, self.controller.create, self.req, body=self.new_network) def test_network_create_handle_network_not_created(self): self.new_network['network']['label'] = 'fail_NetworkNotCreated' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.new_network) def test_network_create_cidr_conflict(self): @staticmethod def get_all(context): ret = objects.NetworkList(context=context, objects=[]) net = objects.Network(cidr='10.0.0.0/23') ret.objects.append(net) return ret self.stubs.Set(objects.NetworkList, 'get_all', get_all) self.new_network['network']['cidr'] = '10.0.0.0/24' self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, body=self.new_network) class NetworkCreateExceptionsTestV2(NetworkCreateExceptionsTestV21): validation_error = webob.exc.HTTPBadRequest def _setup(self): ext_mgr = extensions.ExtensionManager() ext_mgr.extensions = {'os-extended-networks': 'fake'} self.controller = networks.NetworkController( self.PassthroughAPI(), ext_mgr) def test_network_create_with_both_cidr_and_cidr_v6(self): # NOTE: v2.0 API cannot handle this case, so we need to just # skip it on the API. pass class NetworksTestV21(test.NoDBTestCase): validation_error = exception.ValidationError def setUp(self): super(NetworksTestV21, self).setUp() self.fake_network_api = FakeNetworkAPI() self._setup() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) self.new_network = copy.deepcopy(NEW_NETWORK) self.req = fakes.HTTPRequest.blank('') self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True) def _setup(self): self.controller = networks_v21.NetworkController( self.fake_network_api) def _check_status(self, res, method, code): self.assertEqual(method.wsgi_code, code) @staticmethod def network_uuid_to_id(network): network['id'] = network['uuid'] del network['uuid'] def test_network_list_all_as_user(self): self.maxDiff = None res_dict = self.controller.index(self.req) self.assertEqual(res_dict, {'networks': []}) project_id = self.req.environ["nova.context"].project_id cxt = self.req.environ["nova.context"] uuid = FAKE_NETWORKS[0]['uuid'] self.fake_network_api.associate(context=cxt, network_uuid=uuid, project=project_id) res_dict = self.controller.index(self.req) expected = [copy.deepcopy(FAKE_USER_NETWORKS[0])] for network in expected: self.network_uuid_to_id(network) self.assertEqual({'networks': expected}, res_dict) def test_network_list_all_as_admin(self): res_dict = self.controller.index(self.admin_req) expected = copy.deepcopy(FAKE_NETWORKS) for network in expected: self.network_uuid_to_id(network) self.assertEqual({'networks': expected}, res_dict) def test_network_disassociate(self): uuid = FAKE_NETWORKS[0]['uuid'] res = self.controller._disassociate_host_and_project( self.req, uuid, {'disassociate': None}) self._check_status(res, self.controller._disassociate_host_and_project, 202) self.assertIsNone(self.fake_network_api.networks[0]['project_id']) self.assertIsNone(self.fake_network_api.networks[0]['host']) def test_network_disassociate_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller._disassociate_host_and_project, self.req, 100, {'disassociate': None}) def test_network_get_as_user(self): uuid = FAKE_USER_NETWORKS[0]['uuid'] res_dict = self.controller.show(self.req, uuid) expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])} self.network_uuid_to_id(expected['network']) self.assertEqual(expected, res_dict) def test_network_get_as_admin(self): uuid = FAKE_NETWORKS[0]['uuid'] res_dict = self.controller.show(self.admin_req, uuid) expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])} self.network_uuid_to_id(expected['network']) self.assertEqual(expected, res_dict) def test_network_get_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, self.req, 100) def test_network_delete(self): res = self.controller.delete(self.req, 1) self._check_status(res, self.controller.delete, 202) def test_network_delete_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, self.req, 100) def test_network_delete_in_use(self): self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, self.req, -1) def test_network_add(self): uuid = FAKE_NETWORKS[1]['uuid'] res = self.controller.add(self.req, body={'id': uuid}) self._check_status(res, self.controller.add, 202) res_dict = self.controller.show(self.admin_req, uuid) self.assertEqual(res_dict['network']['project_id'], 'fake') @mock.patch('nova.tests.unit.api.openstack.compute.contrib.test_networks.' 'FakeNetworkAPI.add_network_to_project', side_effect=exception.NoMoreNetworks) def test_network_add_no_more_networks_fail(self, mock_add): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, self.req, body={'id': uuid}) @mock.patch('nova.tests.unit.api.openstack.compute.contrib.test_networks.' 'FakeNetworkAPI.add_network_to_project', side_effect=exception.NetworkNotFoundForUUID(uuid='fake_uuid')) def test_network_add_network_not_found_networks_fail(self, mock_add): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, self.req, body={'id': uuid}) def test_network_add_network_without_body(self): self.assertRaises(self.validation_error, self.controller.add, self.req, body=None) def test_network_add_network_with_invalid_id(self): self.assertRaises(exception.ValidationError, self.controller.add, self.req, body={'id': 123}) def test_network_add_network_with_extra_arg(self): uuid = FAKE_NETWORKS[1]['uuid'] self.assertRaises(exception.ValidationError, self.controller.add, self.req, body={'id': uuid, 'extra_arg': 123}) def test_network_add_network_with_none_id(self): res = self.controller.add(self.req, body={'id': None}) self._check_status(res, self.controller.add, 202) def test_network_create(self): res_dict = self.controller.create(self.req, body=self.new_network) self.assertIn('network', res_dict) uuid = res_dict['network']['id'] res_dict = self.controller.show(self.req, uuid) self.assertTrue(res_dict['network']['label']. startswith(NEW_NETWORK['network']['label'])) def test_network_create_large(self): self.new_network['network']['cidr'] = '128.0.0.0/4' res_dict = self.controller.create(self.req, body=self.new_network) self.assertEqual(res_dict['network']['cidr'], self.new_network['network']['cidr']) def test_network_neutron_disassociate_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.flags(network_api_class='nova.network.neutronv2.api.API') controller = networks.NetworkController() self.assertRaises(webob.exc.HTTPNotImplemented, controller._disassociate_host_and_project, self.req, uuid, {'disassociate': None}) class NetworksTestV2(NetworksTestV21): validation_error = webob.exc.HTTPUnprocessableEntity def _setup(self): ext_mgr = extensions.ExtensionManager() ext_mgr.extensions = {'os-extended-networks': 'fake'} self.controller = networks.NetworkController(self.fake_network_api, ext_mgr) def _check_status(self, res, method, code): self.assertEqual(res.status_int, code) def test_network_create_not_extended(self): self.stubs.Set(self.controller, 'extended', False) # NOTE(vish): Verify that new params are not passed through if # extension is not enabled. def no_mtu(*args, **kwargs): if 'mtu' in kwargs: raise test.TestingException("mtu should not pass through") return [{}] self.stubs.Set(self.controller.network_api, 'create', no_mtu) self.new_network['network']['mtu'] = 9000 self.controller.create(self.req, body=self.new_network) def test_network_add_network_with_invalid_id(self): pass def test_network_add_network_with_extra_arg(self): pass class NetworksAssociateTestV21(test.NoDBTestCase): def setUp(self): super(NetworksAssociateTestV21, self).setUp() self.fake_network_api = FakeNetworkAPI() self._setup() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) self.req = fakes.HTTPRequest.blank('') self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True) def _setup(self): self.controller = networks.NetworkController(self.fake_network_api) self.associate_controller = networks_associate_v21\ .NetworkAssociateActionController(self.fake_network_api) def _check_status(self, res, method, code): self.assertEqual(method.wsgi_code, code) def test_network_disassociate_host_only(self): uuid = FAKE_NETWORKS[0]['uuid'] res = self.associate_controller._disassociate_host_only( self.req, uuid, {'disassociate_host': None}) self._check_status(res, self.associate_controller._disassociate_host_only, 202) self.assertIsNotNone(self.fake_network_api.networks[0]['project_id']) self.assertIsNone(self.fake_network_api.networks[0]['host']) def test_network_disassociate_project_only(self): uuid = FAKE_NETWORKS[0]['uuid'] res = self.associate_controller._disassociate_project_only( self.req, uuid, {'disassociate_project': None}) self._check_status( res, self.associate_controller._disassociate_project_only, 202) self.assertIsNone(self.fake_network_api.networks[0]['project_id']) self.assertIsNotNone(self.fake_network_api.networks[0]['host']) def test_network_disassociate_project_network_delete(self): uuid = FAKE_NETWORKS[1]['uuid'] req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid) res = self.associate_controller._disassociate_project_only( req, uuid, {'disassociate_project': None}) self._check_status( res, self.associate_controller._disassociate_project_only, 202) self.assertIsNone(self.fake_network_api.networks[1]['project_id']) res = self.controller.delete(req, 1) self.assertEqual(202, res.status_int) def test_network_associate_project_delete_fail(self): uuid = FAKE_NETWORKS[0]['uuid'] req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, req, -1) def test_network_associate_with_host(self): uuid = FAKE_NETWORKS[1]['uuid'] res = self.associate_controller._associate_host( self.req, uuid, body={'associate_host': "TestHost"}) self._check_status(res, self.associate_controller._associate_host, 202) res_dict = self.controller.show(self.admin_req, uuid) self.assertEqual(res_dict['network']['host'], 'TestHost') def test_network_neutron_associate_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.flags(network_api_class='nova.network.neutronv2.api.API') assoc_ctrl = networks_associate.NetworkAssociateActionController() self.assertRaises(webob.exc.HTTPNotImplemented, assoc_ctrl._associate_host, self.req, uuid, {'associate_host': "TestHost"}) def _test_network_neutron_associate_host_validation_failed(self, body): uuid = FAKE_NETWORKS[1]['uuid'] req = fakes.HTTPRequest.blank('') self.assertRaises(exception.ValidationError, self.associate_controller._associate_host, req, uuid, body=body) def test_network_neutron_associate_host_non_string(self): self._test_network_neutron_associate_host_validation_failed( {'associate_host': 123}) def test_network_neutron_associate_host_empty_body(self): self._test_network_neutron_associate_host_validation_failed({}) def test_network_neutron_associate_bad_associate_host_key(self): self._test_network_neutron_associate_host_validation_failed( {'badassociate_host': "TestHost"}) def test_network_neutron_associate_host_extra_arg(self): self._test_network_neutron_associate_host_validation_failed( {'associate_host': "TestHost", 'extra_arg': "extra_arg"}) def test_network_neutron_disassociate_project_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.flags(network_api_class='nova.network.neutronv2.api.API') assoc_ctrl = networks_associate.NetworkAssociateActionController() self.assertRaises(webob.exc.HTTPNotImplemented, assoc_ctrl._disassociate_project_only, self.req, uuid, {'disassociate_project': None}) def test_network_neutron_disassociate_host_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.flags(network_api_class='nova.network.neutronv2.api.API') assoc_ctrl = networks_associate.NetworkAssociateActionController() self.assertRaises(webob.exc.HTTPNotImplemented, assoc_ctrl._disassociate_host_only, self.req, uuid, {'disassociate_host': None}) class NetworksAssociateTestV2(NetworksAssociateTestV21): def _setup(self): ext_mgr = extensions.ExtensionManager() ext_mgr.extensions = {'os-extended-networks': 'fake'} self.controller = networks.NetworkController( self.fake_network_api, ext_mgr) self.associate_controller = networks_associate\ .NetworkAssociateActionController(self.fake_network_api) def _check_status(self, res, method, code): self.assertEqual(res.status_int, code) def _test_network_neutron_associate_host_validation_failed(self, body): pass
{ "content_hash": "92bdcbf8f77431c9204c85239200f1ff", "timestamp": "", "source": "github", "line_count": 635, "max_line_length": 79, "avg_line_length": 42.188976377952756, "alnum_prop": 0.5911160880925719, "repo_name": "orbitfp7/nova", "id": "2d9a7fe3f4f6ca773f8c2784bb6faac6edd83204", "size": "27457", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nova/tests/unit/api/openstack/compute/contrib/test_networks.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "3272" }, { "name": "Python", "bytes": "15640028" }, { "name": "Shell", "bytes": "20716" }, { "name": "XML", "bytes": "45493" } ], "symlink_target": "" }
"""The Dune HD component.""" import asyncio from pdunehd import DuneHDPlayer from homeassistant.const import CONF_HOST from .const import DOMAIN PLATFORMS = ["media_player"] async def async_setup_entry(hass, config_entry): """Set up a config entry.""" host = config_entry.data[CONF_HOST] player = DuneHDPlayer(host) hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][config_entry.entry_id] = player for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, platform) ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(config_entry, platform) for platform in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) return unload_ok
{ "content_hash": "3a21cb07e144d289667ddacc42326282", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 86, "avg_line_length": 22.818181818181817, "alnum_prop": 0.6354581673306773, "repo_name": "adrienbrault/home-assistant", "id": "10c66c3bfb06f8fe03bf9589f38996eeed09229c", "size": "1004", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/dunehd/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1795" }, { "name": "Python", "bytes": "32021043" }, { "name": "Shell", "bytes": "4900" } ], "symlink_target": "" }
import sys from antlr3 import * from antlr3.compat import set, frozenset from antlr3.tree import * HIDDEN = BaseRecognizer.HIDDEN UNDERSCORE=53 GEOPOINT=33 UNICODE_ESC=56 LT=11 TEXT=27 HTML=28 MINUS=18 RSQUARE=25 SNIPPET=44 PHRASE=35 T__58=58 INDEX=5 OCTAL_ESC=57 NUMBER=31 DISTANCE=39 LOG=40 LPAREN=21 RPAREN=22 EQ=15 NAME=26 GEO=32 DATE=30 NOT=10 MIN=42 ASCII_LETTER=52 AND=7 NE=16 POW=43 XOR=9 COUNT=38 SWITCH=45 DOLLAR=54 COND=6 PLUS=17 QUOTE=47 FLOAT=34 MAX=41 INT=24 ATOM=29 NAME_START=50 ABS=37 HEX_DIGIT=55 ESC_SEQ=48 WS=51 EOF=-1 GE=14 COMMA=36 OR=8 TIMES=19 GT=13 DIGIT=46 DIV=20 NEG=4 LSQUARE=23 LE=12 EXPONENT=49 tokenNames = [ "<invalid>", "<EOR>", "<DOWN>", "<UP>", "NEG", "INDEX", "COND", "AND", "OR", "XOR", "NOT", "LT", "LE", "GT", "GE", "EQ", "NE", "PLUS", "MINUS", "TIMES", "DIV", "LPAREN", "RPAREN", "LSQUARE", "INT", "RSQUARE", "NAME", "TEXT", "HTML", "ATOM", "DATE", "NUMBER", "GEO", "GEOPOINT", "FLOAT", "PHRASE", "COMMA", "ABS", "COUNT", "DISTANCE", "LOG", "MAX", "MIN", "POW", "SNIPPET", "SWITCH", "DIGIT", "QUOTE", "ESC_SEQ", "EXPONENT", "NAME_START", "WS", "ASCII_LETTER", "UNDERSCORE", "DOLLAR", "HEX_DIGIT", "UNICODE_ESC", "OCTAL_ESC", "'.'" ] class ExpressionParser(Parser): grammarFileName = "" # antlr_version = version_str_to_tuple("3.1.1") antlr_version_str = "3.1.1" tokenNames = tokenNames def __init__(self, input, state=None): if state is None: state = RecognizerSharedState() Parser.__init__(self, input, state) self.dfa9 = self.DFA9( self, 9, eot = self.DFA9_eot, eof = self.DFA9_eof, min = self.DFA9_min, max = self.DFA9_max, accept = self.DFA9_accept, special = self.DFA9_special, transition = self.DFA9_transition ) self.dfa10 = self.DFA10( self, 10, eot = self.DFA10_eot, eof = self.DFA10_eof, min = self.DFA10_min, max = self.DFA10_max, accept = self.DFA10_accept, special = self.DFA10_special, transition = self.DFA10_transition ) self._adaptor = CommonTreeAdaptor() def getTreeAdaptor(self): return self._adaptor def setTreeAdaptor(self, adaptor): self._adaptor = adaptor adaptor = property(getTreeAdaptor, setTreeAdaptor) def mismatch(input, ttype, follow): raise MismatchedTokenException(ttype, input) def recoverFromMismatchedSet(input, e, follow): raise e class expression_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def expression(self, ): retval = self.expression_return() retval.start = self.input.LT(1) root_0 = None EOF2 = None conjunction1 = None EOF2_tree = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_conjunction_in_expression90) conjunction1 = self.conjunction() self._state.following.pop() self._adaptor.addChild(root_0, conjunction1.tree) EOF2=self.match(self.input, EOF, self.FOLLOW_EOF_in_expression92) EOF2_tree = self._adaptor.createWithPayload(EOF2) self._adaptor.addChild(root_0, EOF2_tree) retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class condExpr_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def condExpr(self, ): retval = self.condExpr_return() retval.start = self.input.LT(1) root_0 = None COND4 = None conjunction3 = None addExpr5 = None COND4_tree = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_conjunction_in_condExpr105) conjunction3 = self.conjunction() self._state.following.pop() self._adaptor.addChild(root_0, conjunction3.tree) alt1 = 2 LA1_0 = self.input.LA(1) if (LA1_0 == COND) : alt1 = 1 if alt1 == 1: pass COND4=self.match(self.input, COND, self.FOLLOW_COND_in_condExpr108) COND4_tree = self._adaptor.createWithPayload(COND4) root_0 = self._adaptor.becomeRoot(COND4_tree, root_0) self._state.following.append(self.FOLLOW_addExpr_in_condExpr111) addExpr5 = self.addExpr() self._state.following.pop() self._adaptor.addChild(root_0, addExpr5.tree) retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class conjunction_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def conjunction(self, ): retval = self.conjunction_return() retval.start = self.input.LT(1) root_0 = None AND7 = None disjunction6 = None disjunction8 = None AND7_tree = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_disjunction_in_conjunction126) disjunction6 = self.disjunction() self._state.following.pop() self._adaptor.addChild(root_0, disjunction6.tree) while True: alt2 = 2 LA2_0 = self.input.LA(1) if (LA2_0 == AND) : alt2 = 1 if alt2 == 1: pass AND7=self.match(self.input, AND, self.FOLLOW_AND_in_conjunction129) AND7_tree = self._adaptor.createWithPayload(AND7) root_0 = self._adaptor.becomeRoot(AND7_tree, root_0) self._state.following.append(self.FOLLOW_disjunction_in_conjunction132) disjunction8 = self.disjunction() self._state.following.pop() self._adaptor.addChild(root_0, disjunction8.tree) else: break retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class disjunction_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def disjunction(self, ): retval = self.disjunction_return() retval.start = self.input.LT(1) root_0 = None set10 = None negation9 = None negation11 = None set10_tree = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_negation_in_disjunction147) negation9 = self.negation() self._state.following.pop() self._adaptor.addChild(root_0, negation9.tree) while True: alt3 = 2 LA3_0 = self.input.LA(1) if ((OR <= LA3_0 <= XOR)) : alt3 = 1 if alt3 == 1: pass set10 = self.input.LT(1) set10 = self.input.LT(1) if (OR <= self.input.LA(1) <= XOR): self.input.consume() root_0 = self._adaptor.becomeRoot(self._adaptor.createWithPayload(set10), root_0) self._state.errorRecovery = False else: mse = MismatchedSetException(None, self.input) raise mse self._state.following.append(self.FOLLOW_negation_in_disjunction159) negation11 = self.negation() self._state.following.pop() self._adaptor.addChild(root_0, negation11.tree) else: break retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class negation_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def negation(self, ): retval = self.negation_return() retval.start = self.input.LT(1) root_0 = None NOT13 = None cmpExpr12 = None cmpExpr14 = None NOT13_tree = None try: try: alt4 = 2 LA4_0 = self.input.LA(1) if (LA4_0 == MINUS or LA4_0 == LPAREN or LA4_0 == INT or (NAME <= LA4_0 <= PHRASE) or (ABS <= LA4_0 <= SWITCH)) : alt4 = 1 elif (LA4_0 == NOT) : alt4 = 2 else: nvae = NoViableAltException("", 4, 0, self.input) raise nvae if alt4 == 1: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_cmpExpr_in_negation174) cmpExpr12 = self.cmpExpr() self._state.following.pop() self._adaptor.addChild(root_0, cmpExpr12.tree) elif alt4 == 2: pass root_0 = self._adaptor.nil() NOT13=self.match(self.input, NOT, self.FOLLOW_NOT_in_negation180) NOT13_tree = self._adaptor.createWithPayload(NOT13) root_0 = self._adaptor.becomeRoot(NOT13_tree, root_0) self._state.following.append(self.FOLLOW_cmpExpr_in_negation183) cmpExpr14 = self.cmpExpr() self._state.following.pop() self._adaptor.addChild(root_0, cmpExpr14.tree) retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class cmpExpr_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def cmpExpr(self, ): retval = self.cmpExpr_return() retval.start = self.input.LT(1) root_0 = None addExpr15 = None cmpOp16 = None addExpr17 = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_addExpr_in_cmpExpr196) addExpr15 = self.addExpr() self._state.following.pop() self._adaptor.addChild(root_0, addExpr15.tree) alt5 = 2 LA5_0 = self.input.LA(1) if ((LT <= LA5_0 <= NE)) : alt5 = 1 if alt5 == 1: pass self._state.following.append(self.FOLLOW_cmpOp_in_cmpExpr199) cmpOp16 = self.cmpOp() self._state.following.pop() root_0 = self._adaptor.becomeRoot(cmpOp16.tree, root_0) self._state.following.append(self.FOLLOW_addExpr_in_cmpExpr202) addExpr17 = self.addExpr() self._state.following.pop() self._adaptor.addChild(root_0, addExpr17.tree) retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class cmpOp_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def cmpOp(self, ): retval = self.cmpOp_return() retval.start = self.input.LT(1) root_0 = None set18 = None set18_tree = None try: try: pass root_0 = self._adaptor.nil() set18 = self.input.LT(1) if (LT <= self.input.LA(1) <= NE): self.input.consume() self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set18)) self._state.errorRecovery = False else: mse = MismatchedSetException(None, self.input) raise mse retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class addExpr_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def addExpr(self, ): retval = self.addExpr_return() retval.start = self.input.LT(1) root_0 = None multExpr19 = None addOp20 = None multExpr21 = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_multExpr_in_addExpr260) multExpr19 = self.multExpr() self._state.following.pop() self._adaptor.addChild(root_0, multExpr19.tree) while True: alt6 = 2 LA6_0 = self.input.LA(1) if ((PLUS <= LA6_0 <= MINUS)) : alt6 = 1 if alt6 == 1: pass self._state.following.append(self.FOLLOW_addOp_in_addExpr263) addOp20 = self.addOp() self._state.following.pop() root_0 = self._adaptor.becomeRoot(addOp20.tree, root_0) self._state.following.append(self.FOLLOW_multExpr_in_addExpr266) multExpr21 = self.multExpr() self._state.following.pop() self._adaptor.addChild(root_0, multExpr21.tree) else: break retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class addOp_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def addOp(self, ): retval = self.addOp_return() retval.start = self.input.LT(1) root_0 = None set22 = None set22_tree = None try: try: pass root_0 = self._adaptor.nil() set22 = self.input.LT(1) if (PLUS <= self.input.LA(1) <= MINUS): self.input.consume() self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set22)) self._state.errorRecovery = False else: mse = MismatchedSetException(None, self.input) raise mse retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class multExpr_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def multExpr(self, ): retval = self.multExpr_return() retval.start = self.input.LT(1) root_0 = None unary23 = None multOp24 = None unary25 = None try: try: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_unary_in_multExpr300) unary23 = self.unary() self._state.following.pop() self._adaptor.addChild(root_0, unary23.tree) while True: alt7 = 2 LA7_0 = self.input.LA(1) if ((TIMES <= LA7_0 <= DIV)) : alt7 = 1 if alt7 == 1: pass self._state.following.append(self.FOLLOW_multOp_in_multExpr303) multOp24 = self.multOp() self._state.following.pop() root_0 = self._adaptor.becomeRoot(multOp24.tree, root_0) self._state.following.append(self.FOLLOW_unary_in_multExpr306) unary25 = self.unary() self._state.following.pop() self._adaptor.addChild(root_0, unary25.tree) else: break retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class multOp_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def multOp(self, ): retval = self.multOp_return() retval.start = self.input.LT(1) root_0 = None set26 = None set26_tree = None try: try: pass root_0 = self._adaptor.nil() set26 = self.input.LT(1) if (TIMES <= self.input.LA(1) <= DIV): self.input.consume() self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set26)) self._state.errorRecovery = False else: mse = MismatchedSetException(None, self.input) raise mse retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class unary_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def unary(self, ): retval = self.unary_return() retval.start = self.input.LT(1) root_0 = None MINUS27 = None atom28 = None atom29 = None MINUS27_tree = None stream_MINUS = RewriteRuleTokenStream(self._adaptor, "token MINUS") stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom") try: try: alt8 = 2 LA8_0 = self.input.LA(1) if (LA8_0 == MINUS) : alt8 = 1 elif (LA8_0 == LPAREN or LA8_0 == INT or (NAME <= LA8_0 <= PHRASE) or (ABS <= LA8_0 <= SWITCH)) : alt8 = 2 else: nvae = NoViableAltException("", 8, 0, self.input) raise nvae if alt8 == 1: pass MINUS27=self.match(self.input, MINUS, self.FOLLOW_MINUS_in_unary340) stream_MINUS.add(MINUS27) self._state.following.append(self.FOLLOW_atom_in_unary342) atom28 = self.atom() self._state.following.pop() stream_atom.add(atom28.tree) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() root_1 = self._adaptor.nil() root_1 = self._adaptor.becomeRoot(self._adaptor.create(NEG, "-"), root_1) self._adaptor.addChild(root_1, stream_atom.nextTree()) self._adaptor.addChild(root_0, root_1) retval.tree = root_0 elif alt8 == 2: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_atom_in_unary357) atom29 = self.atom() self._state.following.pop() self._adaptor.addChild(root_0, atom29.tree) retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class atom_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def atom(self, ): retval = self.atom_return() retval.start = self.input.LT(1) root_0 = None LPAREN34 = None RPAREN36 = None var30 = None num31 = None str32 = None fn33 = None conjunction35 = None LPAREN34_tree = None RPAREN36_tree = None stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN") stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN") stream_conjunction = RewriteRuleSubtreeStream(self._adaptor, "rule conjunction") try: try: alt9 = 5 alt9 = self.dfa9.predict(self.input) if alt9 == 1: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_var_in_atom370) var30 = self.var() self._state.following.pop() self._adaptor.addChild(root_0, var30.tree) elif alt9 == 2: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_num_in_atom376) num31 = self.num() self._state.following.pop() self._adaptor.addChild(root_0, num31.tree) elif alt9 == 3: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_str_in_atom382) str32 = self.str() self._state.following.pop() self._adaptor.addChild(root_0, str32.tree) elif alt9 == 4: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_fn_in_atom388) fn33 = self.fn() self._state.following.pop() self._adaptor.addChild(root_0, fn33.tree) elif alt9 == 5: pass LPAREN34=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_atom394) stream_LPAREN.add(LPAREN34) self._state.following.append(self.FOLLOW_conjunction_in_atom396) conjunction35 = self.conjunction() self._state.following.pop() stream_conjunction.add(conjunction35.tree) RPAREN36=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_atom398) stream_RPAREN.add(RPAREN36) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, stream_conjunction.nextTree()) retval.tree = root_0 retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class var_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def var(self, ): retval = self.var_return() retval.start = self.input.LT(1) root_0 = None name37 = None name38 = None index39 = None stream_name = RewriteRuleSubtreeStream(self._adaptor, "rule name") stream_index = RewriteRuleSubtreeStream(self._adaptor, "rule index") try: try: alt10 = 2 alt10 = self.dfa10.predict(self.input) if alt10 == 1: pass root_0 = self._adaptor.nil() self._state.following.append(self.FOLLOW_name_in_var415) name37 = self.name() self._state.following.pop() self._adaptor.addChild(root_0, name37.tree) elif alt10 == 2: pass self._state.following.append(self.FOLLOW_name_in_var421) name38 = self.name() self._state.following.pop() stream_name.add(name38.tree) self._state.following.append(self.FOLLOW_index_in_var423) index39 = self.index() self._state.following.pop() stream_index.add(index39.tree) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() root_1 = self._adaptor.nil() root_1 = self._adaptor.becomeRoot(self._adaptor.create(INDEX, ((index39 is not None) and [self.input.toString(index39.start,index39.stop)] or [None])[0]), root_1) self._adaptor.addChild(root_1, stream_name.nextTree()) self._adaptor.addChild(root_0, root_1) retval.tree = root_0 retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class index_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def index(self, ): retval = self.index_return() retval.start = self.input.LT(1) root_0 = None x = None LSQUARE40 = None RSQUARE41 = None x_tree = None LSQUARE40_tree = None RSQUARE41_tree = None stream_LSQUARE = RewriteRuleTokenStream(self._adaptor, "token LSQUARE") stream_RSQUARE = RewriteRuleTokenStream(self._adaptor, "token RSQUARE") stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT") try: try: pass LSQUARE40=self.match(self.input, LSQUARE, self.FOLLOW_LSQUARE_in_index445) stream_LSQUARE.add(LSQUARE40) x=self.match(self.input, INT, self.FOLLOW_INT_in_index449) stream_INT.add(x) RSQUARE41=self.match(self.input, RSQUARE, self.FOLLOW_RSQUARE_in_index451) stream_RSQUARE.add(RSQUARE41) retval.tree = root_0 stream_x = RewriteRuleTokenStream(self._adaptor, "token x", x) if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, stream_x.nextNode()) retval.tree = root_0 retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class name_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def name(self, ): retval = self.name_return() retval.start = self.input.LT(1) root_0 = None t = None NAME42 = None char_literal43 = None NAME44 = None t_tree = None NAME42_tree = None char_literal43_tree = None NAME44_tree = None stream_GEO = RewriteRuleTokenStream(self._adaptor, "token GEO") stream_DATE = RewriteRuleTokenStream(self._adaptor, "token DATE") stream_NUMBER = RewriteRuleTokenStream(self._adaptor, "token NUMBER") stream_GEOPOINT = RewriteRuleTokenStream(self._adaptor, "token GEOPOINT") stream_TEXT = RewriteRuleTokenStream(self._adaptor, "token TEXT") stream_HTML = RewriteRuleTokenStream(self._adaptor, "token HTML") stream_ATOM = RewriteRuleTokenStream(self._adaptor, "token ATOM") try: try: alt12 = 8 LA12 = self.input.LA(1) if LA12 == NAME: alt12 = 1 elif LA12 == TEXT: alt12 = 2 elif LA12 == HTML: alt12 = 3 elif LA12 == ATOM: alt12 = 4 elif LA12 == DATE: alt12 = 5 elif LA12 == NUMBER: alt12 = 6 elif LA12 == GEO: alt12 = 7 elif LA12 == GEOPOINT: alt12 = 8 else: nvae = NoViableAltException("", 12, 0, self.input) raise nvae if alt12 == 1: pass root_0 = self._adaptor.nil() NAME42=self.match(self.input, NAME, self.FOLLOW_NAME_in_name469) NAME42_tree = self._adaptor.createWithPayload(NAME42) self._adaptor.addChild(root_0, NAME42_tree) while True: alt11 = 2 LA11_0 = self.input.LA(1) if (LA11_0 == 58) : alt11 = 1 if alt11 == 1: pass char_literal43=self.match(self.input, 58, self.FOLLOW_58_in_name472) char_literal43_tree = self._adaptor.createWithPayload(char_literal43) root_0 = self._adaptor.becomeRoot(char_literal43_tree, root_0) NAME44=self.match(self.input, NAME, self.FOLLOW_NAME_in_name475) NAME44_tree = self._adaptor.createWithPayload(NAME44) self._adaptor.addChild(root_0, NAME44_tree) else: break elif alt12 == 2: pass t=self.match(self.input, TEXT, self.FOLLOW_TEXT_in_name491) stream_TEXT.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 elif alt12 == 3: pass t=self.match(self.input, HTML, self.FOLLOW_HTML_in_name504) stream_HTML.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 elif alt12 == 4: pass t=self.match(self.input, ATOM, self.FOLLOW_ATOM_in_name517) stream_ATOM.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 elif alt12 == 5: pass t=self.match(self.input, DATE, self.FOLLOW_DATE_in_name530) stream_DATE.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 elif alt12 == 6: pass t=self.match(self.input, NUMBER, self.FOLLOW_NUMBER_in_name543) stream_NUMBER.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 elif alt12 == 7: pass t=self.match(self.input, GEO, self.FOLLOW_GEO_in_name556) stream_GEO.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 elif alt12 == 8: pass t=self.match(self.input, GEOPOINT, self.FOLLOW_GEOPOINT_in_name569) stream_GEOPOINT.add(t) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() self._adaptor.addChild(root_0, self._adaptor.create(NAME, t)) retval.tree = root_0 retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class num_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def num(self, ): retval = self.num_return() retval.start = self.input.LT(1) root_0 = None set45 = None set45_tree = None try: try: pass root_0 = self._adaptor.nil() set45 = self.input.LT(1) if self.input.LA(1) == INT or self.input.LA(1) == FLOAT: self.input.consume() self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set45)) self._state.errorRecovery = False else: mse = MismatchedSetException(None, self.input) raise mse retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class str_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def str(self, ): retval = self.str_return() retval.start = self.input.LT(1) root_0 = None PHRASE46 = None PHRASE46_tree = None try: try: pass root_0 = self._adaptor.nil() PHRASE46=self.match(self.input, PHRASE, self.FOLLOW_PHRASE_in_str606) PHRASE46_tree = self._adaptor.createWithPayload(PHRASE46) self._adaptor.addChild(root_0, PHRASE46_tree) retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class fn_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def fn(self, ): retval = self.fn_return() retval.start = self.input.LT(1) root_0 = None LPAREN48 = None COMMA50 = None RPAREN52 = None fnName47 = None condExpr49 = None condExpr51 = None LPAREN48_tree = None COMMA50_tree = None RPAREN52_tree = None stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA") stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN") stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN") stream_fnName = RewriteRuleSubtreeStream(self._adaptor, "rule fnName") stream_condExpr = RewriteRuleSubtreeStream(self._adaptor, "rule condExpr") try: try: pass self._state.following.append(self.FOLLOW_fnName_in_fn619) fnName47 = self.fnName() self._state.following.pop() stream_fnName.add(fnName47.tree) LPAREN48=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_fn621) stream_LPAREN.add(LPAREN48) self._state.following.append(self.FOLLOW_condExpr_in_fn623) condExpr49 = self.condExpr() self._state.following.pop() stream_condExpr.add(condExpr49.tree) while True: alt13 = 2 LA13_0 = self.input.LA(1) if (LA13_0 == COMMA) : alt13 = 1 if alt13 == 1: pass COMMA50=self.match(self.input, COMMA, self.FOLLOW_COMMA_in_fn626) stream_COMMA.add(COMMA50) self._state.following.append(self.FOLLOW_condExpr_in_fn628) condExpr51 = self.condExpr() self._state.following.pop() stream_condExpr.add(condExpr51.tree) else: break RPAREN52=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_fn632) stream_RPAREN.add(RPAREN52) retval.tree = root_0 if retval is not None: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree) else: stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None) root_0 = self._adaptor.nil() root_1 = self._adaptor.nil() root_1 = self._adaptor.becomeRoot(stream_fnName.nextNode(), root_1) if not (stream_condExpr.hasNext()): raise RewriteEarlyExitException() while stream_condExpr.hasNext(): self._adaptor.addChild(root_1, stream_condExpr.nextTree()) stream_condExpr.reset() self._adaptor.addChild(root_0, root_1) retval.tree = root_0 retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval class fnName_return(ParserRuleReturnScope): def __init__(self): ParserRuleReturnScope.__init__(self) self.tree = None def fnName(self, ): retval = self.fnName_return() retval.start = self.input.LT(1) root_0 = None set53 = None set53_tree = None try: try: pass root_0 = self._adaptor.nil() set53 = self.input.LT(1) if (TEXT <= self.input.LA(1) <= GEOPOINT) or (ABS <= self.input.LA(1) <= SWITCH): self.input.consume() self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set53)) self._state.errorRecovery = False else: mse = MismatchedSetException(None, self.input) raise mse retval.stop = self.input.LT(-1) retval.tree = self._adaptor.rulePostProcessing(root_0) self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop) except RecognitionException as e: self.reportError(e) raise e finally: pass return retval DFA9_eot = DFA.unpack( "\15\uffff" ) DFA9_eof = DFA.unpack( "\2\uffff\7\1\4\uffff" ) DFA9_min = DFA.unpack( "\1\25\1\uffff\7\6\4\uffff" ) DFA9_max = DFA.unpack( "\1\55\1\uffff\7\44\4\uffff" ) DFA9_accept = DFA.unpack( "\1\uffff\1\1\7\uffff\1\2\1\3\1\4\1\5" ) DFA9_special = DFA.unpack( "\15\uffff" ) DFA9_transition = [ DFA.unpack("\1\14\2\uffff\1\11\1\uffff\1\1\1\2\1\3\1\4\1\5\1\6\1" "\7\1\10\1\11\1\12\1\uffff\11\13"), DFA.unpack(""), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack("\4\1\1\uffff\12\1\1\13\2\1\14\uffff\1\1"), DFA.unpack(""), DFA.unpack(""), DFA.unpack(""), DFA.unpack("") ] DFA9 = DFA DFA10_eot = DFA.unpack( "\15\uffff" ) DFA10_eof = DFA.unpack( "\1\uffff\10\13\3\uffff\1\13" ) DFA10_min = DFA.unpack( "\1\32\10\6\1\32\2\uffff\1\6" ) DFA10_max = DFA.unpack( "\1\41\1\72\7\44\1\32\2\uffff\1\72" ) DFA10_accept = DFA.unpack( "\12\uffff\1\2\1\1\1\uffff" ) DFA10_special = DFA.unpack( "\15\uffff" ) DFA10_transition = [ DFA.unpack("\1\1\1\2\1\3\1\4\1\5\1\6\1\7\1\10"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13" "\25\uffff\1\11"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13"), DFA.unpack("\1\14"), DFA.unpack(""), DFA.unpack(""), DFA.unpack("\4\13\1\uffff\12\13\1\uffff\1\13\1\12\14\uffff\1\13" "\25\uffff\1\11") ] DFA10 = DFA FOLLOW_conjunction_in_expression90 = frozenset([]) FOLLOW_EOF_in_expression92 = frozenset([1]) FOLLOW_conjunction_in_condExpr105 = frozenset([1, 6]) FOLLOW_COND_in_condExpr108 = frozenset([18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_addExpr_in_condExpr111 = frozenset([1]) FOLLOW_disjunction_in_conjunction126 = frozenset([1, 7]) FOLLOW_AND_in_conjunction129 = frozenset([10, 18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_disjunction_in_conjunction132 = frozenset([1, 7]) FOLLOW_negation_in_disjunction147 = frozenset([1, 8, 9]) FOLLOW_set_in_disjunction150 = frozenset([10, 18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_negation_in_disjunction159 = frozenset([1, 8, 9]) FOLLOW_cmpExpr_in_negation174 = frozenset([1]) FOLLOW_NOT_in_negation180 = frozenset([18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_cmpExpr_in_negation183 = frozenset([1]) FOLLOW_addExpr_in_cmpExpr196 = frozenset([1, 11, 12, 13, 14, 15, 16]) FOLLOW_cmpOp_in_cmpExpr199 = frozenset([18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_addExpr_in_cmpExpr202 = frozenset([1]) FOLLOW_set_in_cmpOp0 = frozenset([1]) FOLLOW_multExpr_in_addExpr260 = frozenset([1, 17, 18]) FOLLOW_addOp_in_addExpr263 = frozenset([18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_multExpr_in_addExpr266 = frozenset([1, 17, 18]) FOLLOW_set_in_addOp0 = frozenset([1]) FOLLOW_unary_in_multExpr300 = frozenset([1, 19, 20]) FOLLOW_multOp_in_multExpr303 = frozenset([18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_unary_in_multExpr306 = frozenset([1, 19, 20]) FOLLOW_set_in_multOp0 = frozenset([1]) FOLLOW_MINUS_in_unary340 = frozenset([18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_atom_in_unary342 = frozenset([1]) FOLLOW_atom_in_unary357 = frozenset([1]) FOLLOW_var_in_atom370 = frozenset([1]) FOLLOW_num_in_atom376 = frozenset([1]) FOLLOW_str_in_atom382 = frozenset([1]) FOLLOW_fn_in_atom388 = frozenset([1]) FOLLOW_LPAREN_in_atom394 = frozenset([10, 18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_conjunction_in_atom396 = frozenset([22]) FOLLOW_RPAREN_in_atom398 = frozenset([1]) FOLLOW_name_in_var415 = frozenset([1]) FOLLOW_name_in_var421 = frozenset([23]) FOLLOW_index_in_var423 = frozenset([1]) FOLLOW_LSQUARE_in_index445 = frozenset([24]) FOLLOW_INT_in_index449 = frozenset([25]) FOLLOW_RSQUARE_in_index451 = frozenset([1]) FOLLOW_NAME_in_name469 = frozenset([1, 58]) FOLLOW_58_in_name472 = frozenset([26]) FOLLOW_NAME_in_name475 = frozenset([1, 58]) FOLLOW_TEXT_in_name491 = frozenset([1]) FOLLOW_HTML_in_name504 = frozenset([1]) FOLLOW_ATOM_in_name517 = frozenset([1]) FOLLOW_DATE_in_name530 = frozenset([1]) FOLLOW_NUMBER_in_name543 = frozenset([1]) FOLLOW_GEO_in_name556 = frozenset([1]) FOLLOW_GEOPOINT_in_name569 = frozenset([1]) FOLLOW_set_in_num0 = frozenset([1]) FOLLOW_PHRASE_in_str606 = frozenset([1]) FOLLOW_fnName_in_fn619 = frozenset([21]) FOLLOW_LPAREN_in_fn621 = frozenset([10, 18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_condExpr_in_fn623 = frozenset([22, 36]) FOLLOW_COMMA_in_fn626 = frozenset([10, 18, 21, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45]) FOLLOW_condExpr_in_fn628 = frozenset([22, 36]) FOLLOW_RPAREN_in_fn632 = frozenset([1]) FOLLOW_set_in_fnName0 = frozenset([1]) def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): from antlr3.main import ParserMain main = ParserMain("ExpressionLexer", ExpressionParser) main.stdin = stdin main.stdout = stdout main.stderr = stderr main.execute(argv) if __name__ == '__main__': main(sys.argv)
{ "content_hash": "1cbeda49dfb8048eb78de871ab6f5eda", "timestamp": "", "source": "github", "line_count": 2287, "max_line_length": 182, "avg_line_length": 23.838216003498033, "alnum_prop": 0.5048240947943798, "repo_name": "Suwmlee/XX-Net", "id": "96f67fe8693040f31d70b743da64f9eb81cb3197", "size": "55121", "binary": false, "copies": "1", "ref": "refs/heads/python3", "path": "gae_proxy/server/lib/google/appengine/api/search/ExpressionParser.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "200" }, { "name": "C", "bytes": "33097" }, { "name": "CSS", "bytes": "86345" }, { "name": "HTML", "bytes": "141382" }, { "name": "JavaScript", "bytes": "345991" }, { "name": "PHP", "bytes": "10671" }, { "name": "Python", "bytes": "17312939" }, { "name": "Shell", "bytes": "4647" }, { "name": "Visual Basic", "bytes": "382" } ], "symlink_target": "" }
from clif_aux.python import lib_hello import unittest class LibHello(unittest.TestCase): def testHello(self): res = lib_hello.hello('world') self.assertEqual(res, 'Hello, world!') def testHelloAndBye(self): res = lib_hello.hello_and_bye('world') self.assertEqual(res, 'Hello, world! Bye, world!') if __name__ == '__main__': unittest.main()
{ "content_hash": "4bf2f3014e3aaf142e10135727b477ea", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 54, "avg_line_length": 22.9375, "alnum_prop": 0.670299727520436, "repo_name": "google/clif", "id": "fae4d3213e0fdebfde3fdabf68330669edada864", "size": "943", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "examples/clif_aux/python/lib_hello_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "4035" }, { "name": "C++", "bytes": "685973" }, { "name": "CMake", "bytes": "29813" }, { "name": "Dockerfile", "bytes": "4053" }, { "name": "Python", "bytes": "742833" }, { "name": "Starlark", "bytes": "28337" } ], "symlink_target": "" }
import sys import logging import numpy as np from sklearn.cross_validation import ShuffleSplit # from sklearn.cross_validation import StratifiedShuffleSplit from sklearn.cross_validation import StratifiedKFold class TrainSplit(object): def __init__(self, eval_size, stratify=True, random_state=None): self.eval_size = eval_size self.stratify = stratify self.random_state = random_state def __call__(self, X, y, net): if self.eval_size is not None: if net.regression or not self.stratify: # test_size = self.eval_size # kf = ShuffleSplit( # y.shape[0], test_size=test_size, # random_state=self.random_state # ) # train_indices, valid_indices = next(iter(kf)) # valid_indices = shuffle(valid_indices) test_size = 1 - self.eval_size kf = ShuffleSplit( y.shape[0], test_size=test_size, random_state=self.random_state ) valid_indices, train_indices = next(iter(kf)) else: n_folds = int(round(1 / self.eval_size)) kf = StratifiedKFold(y, n_folds=n_folds, random_state=self.random_state) train_indices, valid_indices = next(iter(kf)) X_train, y_train = X[train_indices], y[train_indices] X_valid, y_valid = X[valid_indices], y[valid_indices] else: X_train, y_train = X, y X_valid, y_valid = X[len(X):], y[len(y):] return X_train, X_valid, y_train, y_valid def add_padding_to_bbox(x, y, w, h, pad, max_x, max_y, format='ltwh'): l, t = x, y r, b = l + w, t + h pad_x = int(round(w * pad / 2)) pad_y = int(round(h * pad / 2)) new_l = min(max(l - pad_x, 0), max_x) new_r = min(max(r + pad_x, 0), max_x) new_t = min(max(t - pad_y, 0), max_y) new_b = min(max(b + pad_y, 0), max_y) new_w = new_r - new_l new_h = new_b - new_t if format == 'ltwh': return int(new_l), int(new_t), int(new_w), int(new_h) elif format == 'ltrb': return int(new_l), int(new_t), int(new_r), int(new_b) else: raise ValueError('Format is not recongized: %s' % format) class LogFile(object): """ File-like object to log text using the `logging` module. http://stackoverflow.com/questions/616645/how-do-i-duplicate-sys-stdout-to-a-log-file-in-python/2216517#2216517 """ def __init__(self, name=None, stdout=None): self.logger = logging.getLogger(name) self.stdout = stdout def write(self, msg, level=logging.DEBUG): self.logger.log(level, msg) if self.stdout is not None: self.stdout.write(msg) def flush(self): # for handler in self.logger.handlers: # handler.flush() if self.stdout is not None: self.stdout.flush() def mirror_to_log(fname): logging.basicConfig(level=logging.DEBUG, filename=fname) # Redirect stdout sys.stdout = LogFile(fname, sys.stdout) def float32(k): return np.cast['float32'](k)
{ "content_hash": "a2e54a481bc105b7a14e5e97a47a6b0f", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 115, "avg_line_length": 32.51020408163265, "alnum_prop": 0.5662272441933459, "repo_name": "felixlaumon/kaggle-right-whale", "id": "ff64de4bd8572647e8067dd213ad37719839c54b", "size": "3186", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "utils/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "115699" }, { "name": "Makefile", "bytes": "630" }, { "name": "Python", "bytes": "139871" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Switch', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=40)), ('ip', models.GenericIPAddressField()), ('type', models.CharField(default=b'hp', max_length=20, choices=[(b'hp', b'HP'), (b'cisco', b'Cisco')])), ], options={ 'verbose_name_plural': 'Switches', }, bases=(models.Model,), ), ]
{ "content_hash": "9d1881713ea5f5e8230550598d3301e4", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 121, "avg_line_length": 30.32, "alnum_prop": 0.5303430079155673, "repo_name": "BCGamer/CheckIn-Server", "id": "4cb9969a0944fe079b9f8682f95945015a846afd", "size": "782", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "network/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "184" }, { "name": "JavaScript", "bytes": "4199" }, { "name": "PowerShell", "bytes": "45" }, { "name": "Python", "bytes": "58877" } ], "symlink_target": "" }
import os from m2x.client import M2XClient # Instantiate a client client = M2XClient(key=os.environ['API_KEY']) # Delete the device device try: device = client.device(os.environ['DEVICE_ID']) print "Deleting device with ID {id}".format(id=device.id) device.remove() except Exception: pass if client.last_response.status == 204: print "Device removed successfully!" else: print "Error occurred, see printout of API response below for details:" print client.last_response.raw
{ "content_hash": "e7386d5fc07ae3aaca8fe5f8dc58a65d", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 73, "avg_line_length": 24.6, "alnum_prop": 0.7439024390243902, "repo_name": "attm2x/m2x-python", "id": "fb4bec675b14e6b7133a8a465b6f3889ad1cf2fb", "size": "715", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/delete_device.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "232" }, { "name": "Python", "bytes": "74513" }, { "name": "Shell", "bytes": "63" } ], "symlink_target": "" }
"""'Abstract' Tab Handler class that groups several Handlers together. """ from itertools import izip from taba.handlers.tab_handler import TabHandler class CounterGroup(TabHandler): """'Abstract' Tab Handler class that groups several Handlers together. Sub-classes should at least implement wrap NewState(), and add class member variables COUNTERS and LABELS, each of which should be an array of instantiated TabHandlers and string labels, respectively. """ CURRENT_VERSION = 0 def NewState(self, client_id, name, group_state): """See base class definition.""" from taba.server.model.state_manager import StateStruct return [ StateStruct(state, handler.CURRENT_VERSION, None) for handler, state in izip(self.COUNTERS, group_state)] def FoldEvents(self, group_state, events): """See base class definition.""" group_state = self._Unpack(group_state) for handler, state in izip(self.COUNTERS, group_state): state.payload = handler.FoldEvents(state.payload, events) return group_state def Reduce(self, group_states): """See base class definition.""" from taba.server.model.state_manager import StateStruct group_states = map(self._Unpack, group_states) reduced_group = [None] * len(self.COUNTERS) for i, handler in enumerate(self.COUNTERS): payload = handler.Reduce([gs[i].payload for gs in group_states]) reduced_group[i] = StateStruct(payload, handler.CURRENT_VERSION, None) return reduced_group def Render(self, group_state, accept): """See base class definition.""" group_state = self._Unpack(group_state) datas = [ (lbl, hdlr.Render(state.payload, accept)) for hdlr, lbl, state in izip(self.COUNTERS, self.LABELS, group_state)] return '{%s}' % ', '.join([ '"%s": %s' % (label, data) for label, data in datas]) def Upgrade(self, group_state, version): """See base class definition.""" if version == 0: return group_state else: raise ValueError('Unknown version %s' % version) def ShouldPrune(self, group_state): """See base class definition.""" group_state = self._Unpack(group_state) return all( handler.ShouldPrune(state.payload) for handler, state in izip(self.COUNTERS, group_state)) def _Unpack(self, group_state): """For an incoming Group State, upgrade it is necessary. Args: group_state - Group State object. Returns: Group State, possible upgraded to the current version. """ for handler, state in izip(self.COUNTERS, group_state): if state.version != handler.CURRENT_VERSION: state.payload = handler.Upgrade(state.payload, state.version) state.version = handler.CURRENT_VERSION return group_state
{ "content_hash": "e6267dd7753cf692b2467bc6b49a90c2", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 78, "avg_line_length": 33.214285714285715, "alnum_prop": 0.6831541218637993, "repo_name": "tellapart/taba", "id": "c5c1c6ea4c27ab0186d41159c9699c5fc9959727", "size": "3370", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/taba/handlers/counter_group.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "540390" } ], "symlink_target": "" }
class Affliction(): """ Defines a member of the travelling party. """ def __init__(self, affliciton_def=None): assert affliciton_def is not None, 'ERROR - you need to provide an affliction definition dictionary.' try: self.name = affliciton_def['name'] self.severity = affliciton_def['severity'] except: print 'There was an error reading the affliction definition dictionary:' print affliciton_def raise
{ "content_hash": "e287b9b0d2728fe12ebe2440097bfd4b", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 109, "avg_line_length": 36.07142857142857, "alnum_prop": 0.6099009900990099, "repo_name": "rwsharp/TORT", "id": "07316020f2ab1e53574de3328e247516d55a3644", "size": "505", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/afflictions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "45193" } ], "symlink_target": "" }
from peer import * class Stack(Peer): ShowLayer = Pillow.In HideLayer = Pillow.In EnableLayer = Pillow.In DisableLayer = Pillow.In def __init__(self, room, controller): Peer.__init__(self, room) self._controller = controller self._catch(Stack.In.ShowLayer, self._showLayer) self._catch(Stack.In.HideLayer, self._hideLayer) self._catch(Stack.In.EnableLayer, self._enableLayer) self._catch(Stack.In.DisableLayer, self._disableLayer) def _showLayer(self, pillow, name): self._controller.showLayer(name) def _hideLayer(self, pillow, name): self._controller.hideLayer(name) def _enableLayer(self, pillow, name): self._controller.enableLayer(name) def _disableLayer(self, pillow, name): self._controller.disableLayer(name)
{ "content_hash": "0f56971ae94638c98b12b442ed68eabf", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 62, "avg_line_length": 31.074074074074073, "alnum_prop": 0.6555423122765197, "repo_name": "FreshXOpenSource/wallaby-base", "id": "c4a3d797c1a8f0069841addc5954c4d2c93902af", "size": "920", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wallaby/pf/peer/stack.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "248711" } ], "symlink_target": "" }
"""Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys # +++your code here+++ # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print('usage: ./wordcount.py {--count | --topcount} file') sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print('unknown option: ' + option) sys.exit(1) if __name__ == '__main__': main()
{ "content_hash": "52538086702f7fadf45e912ff36e04d2", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 79, "avg_line_length": 31.193548387096776, "alnum_prop": 0.704756980351603, "repo_name": "wltrimbl/google-python-exercises3", "id": "8108bfbd36d095e9333fcfea02746c33a3a3f27b", "size": "2204", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "basic/wordcount.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "DIGITAL Command Language", "bytes": "191608" }, { "name": "HTML", "bytes": "647778" }, { "name": "Python", "bytes": "53345" } ], "symlink_target": "" }
import os import time from StringIO import StringIO from PIL import Image from django.conf import settings from sorl.thumbnail.base import Thumbnail from sorl.thumbnail.main import DjangoThumbnail, get_thumbnail_setting from sorl.thumbnail.processors import dynamic_import, get_valid_options from sorl.thumbnail.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME,\ THUMB_NAME, PIC_SIZE class ThumbnailTest(BaseTest): def testThumbnails(self): # Thumbnail thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 1, requested_size=(240, 240)) self.verify_thumbnail((240, 180), thumb) # Cropped thumbnail thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 2, requested_size=(240, 240), opts=['crop']) self.verify_thumbnail((240, 240), thumb) # Thumbnail with altered JPEG quality thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 3, requested_size=(240, 240), quality=95) self.verify_thumbnail((240, 180), thumb) def testRegeneration(self): # Create thumbnail thumb_name = THUMB_NAME % 4 thumb_size = (240, 240) Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size) self.images_to_delete.add(thumb_name) thumb_mtime = os.path.getmtime(thumb_name) time.sleep(1) # Create another instance, shouldn't generate a new thumb Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size) self.assertEqual(os.path.getmtime(thumb_name), thumb_mtime) # Recreate the source image, then see if a new thumb is generated Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG') Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size) self.assertNotEqual(os.path.getmtime(thumb_name), thumb_mtime) def testFilelikeDest(self): # Thumbnail filelike_dest = StringIO() thumb = Thumbnail(source=PIC_NAME, dest=filelike_dest, requested_size=(240, 240)) self.verify_thumbnail((240, 180), thumb) def testRGBA(self): # RGBA image rgba_pic_name = os.path.join(settings.MEDIA_ROOT, 'sorl-thumbnail-test_rgba_source.png') Image.new('RGBA', PIC_SIZE).save(rgba_pic_name) self.images_to_delete.add(rgba_pic_name) # Create thumb and verify it's still RGBA rgba_thumb_name = os.path.join(settings.MEDIA_ROOT, 'sorl-thumbnail-test_rgba_dest.png') thumb = Thumbnail(source=rgba_pic_name, dest=rgba_thumb_name, requested_size=(240, 240)) self.verify_thumbnail((240, 180), thumb, expected_mode='RGBA') class DjangoThumbnailTest(BaseTest): def setUp(self): super(DjangoThumbnailTest, self).setUp() # Add another source image in a sub-directory for testing subdir and # basedir. self.sub_dir = os.path.join(settings.MEDIA_ROOT, 'test_thumbnail') try: os.mkdir(self.sub_dir) except OSError: pass self.pic_subdir = os.path.join(self.sub_dir, RELATIVE_PIC_NAME) Image.new('RGB', PIC_SIZE).save(self.pic_subdir, 'JPEG') self.images_to_delete.add(self.pic_subdir) def testFilenameGeneration(self): basename = RELATIVE_PIC_NAME.replace('.', '_') # Basic filename thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120)) expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Changed quality and cropped thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120), opts=['crop'], quality=95) expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_crop_q95.jpg' self.verify_thumbnail((240, 120), thumb, expected_filename=expected) # All options on processors = dynamic_import(get_thumbnail_setting('PROCESSORS')) valid_options = get_valid_options(processors) thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120), opts=valid_options) expected = (os.path.join(settings.MEDIA_ROOT, basename) + '_240x120_' 'autocrop_bw_crop_detail_max_sharpen_upscale_q85.jpg') self.verify_thumbnail((240, 120), thumb, expected_filename=expected) # Different basedir basedir = 'sorl-thumbnail-test-basedir' self.change_settings.change({'BASEDIR': basedir}) thumb = DjangoThumbnail(relative_source=self.pic_subdir, requested_size=(240, 120)) expected = os.path.join(basedir, self.sub_dir, basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Different subdir self.change_settings.change({'BASEDIR': '', 'SUBDIR': 'subdir'}) thumb = DjangoThumbnail(relative_source=self.pic_subdir, requested_size=(240, 120)) expected = os.path.join(settings.MEDIA_ROOT, os.path.basename(self.sub_dir), 'subdir', basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Different prefix self.change_settings.change({'SUBDIR': '', 'PREFIX': 'prefix-'}) thumb = DjangoThumbnail(relative_source=self.pic_subdir, requested_size=(240, 120)) expected = os.path.join(self.sub_dir, 'prefix-' + basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) def testAlternateExtension(self): basename = RELATIVE_PIC_NAME.replace('.', '_') # Control JPG thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120)) expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_q85.jpg' expected_jpg = expected self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Test PNG thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120), extension='png') expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_q85.png' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Compare the file size to make sure it's not just saving as a JPG with # a different extension. self.assertNotEqual(os.path.getsize(expected_jpg), os.path.getsize(expected)) def testUnicodeName(self): unicode_name = 'sorl-thumbnail-ążśź_source.jpg' unicode_path = os.path.join(settings.MEDIA_ROOT, unicode_name) Image.new('RGB', PIC_SIZE).save(unicode_path) self.images_to_delete.add(unicode_path) thumb = DjangoThumbnail(relative_source=unicode_name, requested_size=(240, 120)) base_name = unicode_name.replace('.', '_') expected = os.path.join(settings.MEDIA_ROOT, base_name + '_240x120_q85.jpg') self.verify_thumbnail((160, 120), thumb, expected_filename=expected) def tearDown(self): super(DjangoThumbnailTest, self).tearDown() subdir = os.path.join(self.sub_dir, 'subdir') if os.path.exists(subdir): os.rmdir(subdir) os.rmdir(self.sub_dir)
{ "content_hash": "4331ae3f57fe8f5be90bba3d2623f997", "timestamp": "", "source": "github", "line_count": 174, "max_line_length": 79, "avg_line_length": 45.81609195402299, "alnum_prop": 0.6078775715002509, "repo_name": "dantium/sorl-thumbnail", "id": "d15dd1937a90fe47031b7e4ffd8553d352454764", "size": "8000", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sorl/thumbnail/tests/classes.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "87527" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import csv import os import tempfile from django.core.management import call_command from django.core.management.base import CommandError from django.test import TestCase from ..csv_utils import infer_facility from ..management.commands.importusers import create_user from ..management.commands.importusers import infer_and_create_class from ..management.commands.importusers import validate_username from ..models import Classroom from ..models import FacilityUser from .helpers import setup_device from kolibri.core.auth.constants.demographics import DEFERRED from kolibri.core.auth.constants.demographics import FEMALE from kolibri.core.auth.constants.demographics import MALE from kolibri.core.auth.constants.demographics import NOT_SPECIFIED from kolibri.core.auth.csv_utils import labels class UserImportTestCase(TestCase): """ Tests for functions used in userimport command. """ def setUp(self): self.facility, self.superuser = setup_device() def test_validate_username_no_username(self): with self.assertRaises(CommandError): validate_username({}) def test_validate_username_none_username(self): with self.assertRaises(CommandError): validate_username({"username": None}) def test_infer_facility_none(self): default = {} self.assertEqual(infer_facility(None, default), default) def test_infer_facility_empty_string(self): default = {} self.assertEqual(infer_facility("", default), default) def test_infer_facility_by_id(self): default = {} self.assertEqual(infer_facility(self.facility.id, default), self.facility) def test_infer_facility_by_name(self): default = {} self.assertEqual(infer_facility(self.facility.name, default), self.facility) def test_infer_facility_fail(self): default = {} with self.assertRaises(ValueError): infer_facility("garbage", default) def test_infer_class_no_class_no_effect(self): infer_and_create_class(None, self.facility) self.assertEqual(Classroom.objects.count(), 0) def test_infer_class_falsy_class_no_effect(self): infer_and_create_class("", self.facility) self.assertEqual(Classroom.objects.count(), 0) def test_infer_class_by_id(self): classroom = Classroom.objects.create(name="testclass", parent=self.facility) self.assertEqual(infer_and_create_class(classroom.id, self.facility), classroom) def test_infer_class_by_name(self): classroom = Classroom.objects.create(name="testclass", parent=self.facility) self.assertEqual( infer_and_create_class(classroom.name, self.facility), classroom ) def test_infer_class_create(self): self.assertEqual( infer_and_create_class("testclass", self.facility), Classroom.objects.get(name="testclass"), ) def test_create_user_exists(self): user = {"username": self.superuser.username} self.assertFalse(create_user(user, default_facility=self.facility)) def test_create_user_exists_add_classroom(self): user = {"username": self.superuser.username, "class": "testclass"} create_user(user, default_facility=self.facility) self.assertTrue( self.superuser.is_member_of(Classroom.objects.get(name="testclass")) ) def test_create_user_not_exist(self): user = {"username": "testuser"} self.assertTrue(create_user(user, default_facility=self.facility)) def test_create_user_not_exist_add_classroom(self): user = {"username": "testuser", "class": "testclass"} create_user(user, default_facility=self.facility) self.assertTrue( FacilityUser.objects.get(username="testuser").is_member_of( Classroom.objects.get(name="testclass") ) ) def test_create_user_not_exist_bad_username(self): user = {"username": "test$user"} self.assertFalse(create_user(user, default_facility=self.facility)) users = [ { "username": "alice", "birth_year": "1990", "gender": FEMALE, "password": "password", }, {"username": "bob", "birth_year": "1914", "gender": MALE, "password": "password"}, { "username": "clara", "birth_year": "1900", "gender": NOT_SPECIFIED, "password": "password", }, { "username": "devone", "birth_year": "2100", "gender": DEFERRED, "password": "password", }, ] class DeviceNotSetup(TestCase): def test_device_not_setup(self): csvfile, csvpath = tempfile.mkstemp(suffix="csv") with self.assertRaisesRegexp(CommandError, "No default facility exists"): call_command("importusers", csvpath) os.remove(csvpath) class UserImportCommandTestCase(TestCase): """ Tests for 'kolibri manage importusers' command. """ @classmethod def setUpClass(self): super(UserImportCommandTestCase, self).setUpClass() self.facility, self.superuser = setup_device() def setUp(self): self.csvfile, self.csvpath = tempfile.mkstemp(suffix="csv") def tearDown(self): FacilityUser.objects.exclude(username=self.superuser.username).delete() os.remove(self.csvpath) def importFromRows(self, *args): with open(self.csvpath, "w") as f: writer = csv.writer(f) writer.writerows([a for a in args]) call_command("importusers", self.csvpath) def test_setup_headers_no_username(self): with self.assertRaisesRegexp(CommandError, "No usernames specified"): self.importFromRows(["class", "facility"]) call_command("importusers", self.csvpath) def test_setup_headers_invalid_header(self): with self.assertRaisesRegexp(CommandError, "Mix of valid and invalid header"): self.importFromRows(["class", "facility", "dogfood"]) call_command("importusers", self.csvpath) def test_setup_headers_make_user(self): self.importFromRows(["username"], ["testuser"]) call_command("importusers", self.csvpath) self.assertTrue(FacilityUser.objects.filter(username="testuser").exists()) def test_setup_no_headers_make_user(self): self.importFromRows(["Test user", "testuser"]) self.assertTrue(FacilityUser.objects.filter(username="testuser").exists()) def test_setup_no_headers_bad_user_good_user(self): self.importFromRows(["Test user", "testuser"], ["Other user", "te$tuser"]) self.assertTrue(FacilityUser.objects.filter(username="testuser").exists()) self.assertFalse(FacilityUser.objects.filter(username="te$tuser").exists()) def test_empty_fullname_defaults_to_username(self): self.importFromRows(["full_name", "username"], ["", "bob123"]) self.assertEqual( FacilityUser.objects.get(username="bob123").full_name, "bob123" ) def test_missing_fullname_defaults_to_username(self): self.importFromRows(["username"], ["bob123"]) self.assertEqual( FacilityUser.objects.get(username="bob123").full_name, "bob123" ) def test_update_valid_demographic_info_succeeds(self): FacilityUser.objects.create( username="alice", birth_year="1990", gender="FEMALE", password="password", facility=self.facility, ) self.importFromRows( ["username", "birth_year", "gender"], ["alice", "", "NOT_SPECIFIED"], ["bob", "1970", "MALE"], ) alice = FacilityUser.objects.get(username="alice") bob = FacilityUser.objects.get(username="bob") self.assertEqual(alice.birth_year, "") self.assertEqual(alice.gender, "NOT_SPECIFIED") self.assertEqual(bob.birth_year, "1970") self.assertEqual(bob.gender, "MALE") def test_update_with_invalid_demographic_info_fails(self): FacilityUser.objects.create( username="alice", birth_year="NOT_SPECIFIED", password="password", facility=self.facility, ) self.importFromRows( ["username", "birth_year", "gender"], ["alice", "BLAH", "FEMALE"], ["bob", "1970", "man"], ) # Alice and Bob's demographic data aren't updated alice = FacilityUser.objects.get(username="alice") bob = FacilityUser.objects.get(username="bob") self.assertEqual(alice.birth_year, "NOT_SPECIFIED") self.assertEqual(alice.gender, "") self.assertEqual(bob.birth_year, "") self.assertEqual(bob.gender, "") def test_update_with_missing_columns(self): FacilityUser.objects.create( username="alice", birth_year="1990", gender="FEMALE", id_number="ALICE", password="password", facility=self.facility, ) # CSV is missing column for gender, so it should not be updated self.importFromRows( ["username", "birth_year", "id_number"], ["alice", "2000", ""] ) alice = FacilityUser.objects.get(username="alice") self.assertEqual(alice.gender, "FEMALE") self.assertEqual(alice.birth_year, "2000") self.assertEqual(alice.id_number, "") def test_import_from_export_csv(self): for user in users: FacilityUser.objects.create(facility=self.facility, **user) call_command( "exportusers", output_file=self.csvpath, overwrite=True, demographic=True ) FacilityUser.objects.all().delete() call_command("importusers", self.csvpath) for user in users: user_model = FacilityUser.objects.get(username=user["username"]) self.assertEqual(user_model.gender, user["gender"]) self.assertEqual(user_model.birth_year, user["birth_year"]) self.assertEqual(user_model.id_number, "") def test_import_from_export_missing_headers(self): for user in users: FacilityUser.objects.create(facility=self.facility, **user) call_command( "exportusers", output_file=self.csvpath, overwrite=True, demographic=True ) cols_to_remove = ["Facility id", "Gender"] with open(self.csvpath, "r") as source: reader = csv.DictReader(source) rows = list(row for row in reader) with open(self.csvpath, "w") as result: writer = csv.DictWriter( result, tuple( label for label in labels.values() if label not in cols_to_remove ), ) writer.writeheader() for row in rows: for col in cols_to_remove: del row[col] writer.writerow(row) FacilityUser.objects.all().delete() call_command("importusers", self.csvpath) for user in users: user_model = FacilityUser.objects.get(username=user["username"]) self.assertEqual(user_model.birth_year, user["birth_year"]) self.assertEqual(user_model.id_number, "") def test_import_from_export_mixed_headers(self): for user in users: FacilityUser.objects.create(facility=self.facility, **user) call_command( "exportusers", output_file=self.csvpath, overwrite=True, demographic=True ) cols_to_replace = {"Facility id": "facility", "Gender": "gender"} with open(self.csvpath, "r") as source: reader = csv.DictReader(source) rows = list(row for row in reader) with open(self.csvpath, "w") as result: writer = csv.DictWriter( result, tuple( cols_to_replace[label] if label in cols_to_replace else label for label in labels.values() ), ) writer.writeheader() for row in rows: for col in cols_to_replace: row[cols_to_replace[col]] = row[col] del row[col] writer.writerow(row) FacilityUser.objects.all().delete() call_command("importusers", self.csvpath) for user in users: user_model = FacilityUser.objects.get(username=user["username"]) self.assertEqual(user_model.birth_year, user["birth_year"]) self.assertEqual(user_model.id_number, "")
{ "content_hash": "73ebc429f43beded5776e7de0fc985f2", "timestamp": "", "source": "github", "line_count": 339, "max_line_length": 88, "avg_line_length": 37.74631268436578, "alnum_prop": 0.6203501094091903, "repo_name": "mrpau/kolibri", "id": "8f9b27c83ef0a32d45e2a262a87dab1735182384", "size": "12796", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "kolibri/core/auth/test/test_user_import.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "601" }, { "name": "CSS", "bytes": "1716299" }, { "name": "Dockerfile", "bytes": "7303" }, { "name": "Gherkin", "bytes": "278074" }, { "name": "HTML", "bytes": "26440" }, { "name": "JavaScript", "bytes": "1537923" }, { "name": "Makefile", "bytes": "13308" }, { "name": "Python", "bytes": "2298911" }, { "name": "Shell", "bytes": "11777" }, { "name": "Vue", "bytes": "1558714" } ], "symlink_target": "" }
import binascii # Searching short title from readability implementation import re def normalize_entities(cur_title): entities = { '—': '-', '–': '-', '&mdash;': '-', '&ndash;': '-', ' ': ' ', '«': '"', '»': '"', '&quot;': '"', '\xa0': ' ', } for c in entities: if c in cur_title: cur_title = cur_title.replace(c, entities[c]) return cur_title def normalize_spaces(s): """ Replace any sequence of whitespace characters with a single space. """ if not s: return '' return ' '.join(s.split()) def remove_punctuation(s): if not s: return '' return ''.join([l for l in s if l not in '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~']) def norm_title(title): return normalize_spaces(remove_punctuation(normalize_entities(title))) def shinglify(clean_text): """ Generates list of 'shingles': crc sums of word subsequences of default length :param clean_text: cleaned text to calculate shingles sequence. :return: shingles sequence """ shingle_length = 3 result = [] for idx in range(len(clean_text) - shingle_length + 1): result.append( binascii.crc32( bytes( u' '.join( [word for word in clean_text[idx:idx+shingle_length]] ), 'utf-8' ) ) ) return result def compare(initial, candidate): """ Compares two shingles sequence and returns similarity value. :param initial: initial sentence shingles sequence :param candidate: compared sentence shingles sequence :return: similarity value """ matches = 0 for shingle in initial: if shingle in candidate: matches += 1 return matches * 2 / float(len(initial) + len(candidate)) * 100 def shorten_title(doc, article_node): """ Finding title :param doc: full initial document :param article_node: node containing article :return: found title """ title = doc.find('.//title') if title is None or title.text is None or len(title.text) == 0: return '' title = title.text.strip() title_shingles = shinglify(norm_title(title)) candidates = [] search_tree = None body = doc.xpath('//body') if len(body) > 0: search_tree = body[0].iter() if search_tree is not None: for elem in search_tree: # titles are usually set somewhere before articles if article_node is not None and elem == article_node: break if elem.text is not None: candidate = elem.text.strip() if 0 < len(candidate) <= len(title): similarity = compare(title_shingles, shinglify(norm_title(candidate))) if similarity >= 50: candidates.append({ 'text': candidate, 'similarity': similarity, 'element': elem, }) else: return title, None cleaned_candidates = [] for c in reversed(candidates): if all([cc.get('similarity') != c.get('similarity') for cc in cleaned_candidates]) is True: cleaned_candidates.insert(0, c) best_title_entry = sorted(cleaned_candidates, key=lambda x: x['similarity'], reverse=True)[0] \ if len(candidates) > 0 else {'text': title, 'similarity': 100, 'element': None} # normalizing title and stripping starting/ending sequences of non-letter/non-digit symbols, dates best_title = normalize_spaces(normalize_entities(best_title_entry.get('text'))) # improve leading dates/time stripping best_title = re.sub(r'^\d{1,2}[/.]\d{1,2}[/.]\d{2,4}\s+', '', best_title) best_title = re.sub(r'^\d{1,2}[-:]\d{1,2}\d{0,2}\s+', '', best_title) best_title = re.sub(r'^(\W+\s+)', '', best_title) best_title = re.sub(r'(\s+\W+)$', '', best_title) return best_title, best_title_entry.get('element')
{ "content_hash": "0be5f630d11deda53af6fbd3455d8471", "timestamp": "", "source": "github", "line_count": 143, "max_line_length": 102, "avg_line_length": 28.937062937062937, "alnum_prop": 0.5495408409859835, "repo_name": "reefeed/wanish", "id": "7b270bd3055b98562d45defad3bc9c70d5613011", "size": "4144", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wanish/title.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "653" }, { "name": "Python", "bytes": "2570625" }, { "name": "Shell", "bytes": "267" } ], "symlink_target": "" }
from __future__ import print_function import sys import os import argparse from px4params import srcscanner, srcparser, xmlout, dokuwikiout, dokuwikirpc def main(): # Parse command line arguments parser = argparse.ArgumentParser(description="Process parameter documentation.") parser.add_argument("-s", "--src-path", default="../src", metavar="PATH", help="path to source files to scan for parameters") parser.add_argument("-x", "--xml", nargs='?', const="parameters.xml", metavar="FILENAME", help="Create XML file" " (default FILENAME: parameters.xml)") parser.add_argument("-i", "--inject-xml", nargs='?', const="../Tools/parameters_injected.xml", metavar="FILENAME", help="Inject additional param XML file" " (default FILENAME: ../Tools/parameters_injected.xml)") parser.add_argument("-b", "--board", nargs='?', const="", metavar="BOARD", help="Board to create xml parameter xml for") parser.add_argument("-w", "--wiki", nargs='?', const="parameters.wiki", metavar="FILENAME", help="Create DokuWiki file" " (default FILENAME: parameters.wiki)") parser.add_argument("-u", "--wiki-update", nargs='?', const="firmware:parameters", metavar="PAGENAME", help="Update DokuWiki page" " (default PAGENAME: firmware:parameters)") parser.add_argument("--wiki-url", default="https://pixhawk.org", metavar="URL", help="DokuWiki URL" " (default: https://pixhawk.org)") parser.add_argument("--wiki-user", default=os.environ.get('XMLRPCUSER', None), metavar="USERNAME", help="DokuWiki XML-RPC user name" " (default: $XMLRPCUSER environment variable)") parser.add_argument("--wiki-pass", default=os.environ.get('XMLRPCPASS', None), metavar="PASSWORD", help="DokuWiki XML-RPC user password" " (default: $XMLRPCUSER environment variable)") parser.add_argument("--wiki-summary", metavar="SUMMARY", default="Automagically updated parameter documentation from code.", help="DokuWiki page edit summary") parser.add_argument('-v', '--verbose', action='store_true', help="verbose output") args = parser.parse_args() # Check for valid command if not (args.xml or args.wiki or args.wiki_update): print("Error: You need to specify at least one output method!\n") parser.print_usage() sys.exit(1) # Initialize source scanner and parser scanner = srcscanner.SourceScanner() parser = srcparser.SourceParser() # Scan directories, and parse the files if (args.verbose): print("Scanning source path " + args.src_path) if not scanner.ScanDir(args.src_path, parser): sys.exit(1) if not parser.Validate(): sys.exit(1) param_groups = parser.GetParamGroups() # Output to XML file if args.xml: if args.verbose: print("Creating XML file " + args.xml) out = xmlout.XMLOutput(param_groups, args.board, os.path.join(args.src_path, args.inject_xml)) out.Save(args.xml) # Output to DokuWiki tables if args.wiki or args.wiki_update: out = dokuwikiout.DokuWikiTablesOutput(param_groups) if args.wiki: print("Creating wiki file " + args.wiki) out.Save(args.wiki) if args.wiki_update: if args.wiki_user and args.wiki_pass: print("Updating wiki page " + args.wiki_update) xmlrpc = dokuwikirpc.get_xmlrpc(args.wiki_url, args.wiki_user, args.wiki_pass) xmlrpc.wiki.putPage(args.wiki_update, out.output, {'sum': args.wiki_summary}) else: print("Error: You need to specify DokuWiki XML-RPC username and password!") #print("All done!") if __name__ == "__main__": main()
{ "content_hash": "f9b4e1174a5c03cfb7264355c8463d6c", "timestamp": "", "source": "github", "line_count": 107, "max_line_length": 102, "avg_line_length": 43.85981308411215, "alnum_prop": 0.5197102066908161, "repo_name": "darknight-007/Firmware", "id": "7bac8834d3cfec311b4535dde94c9510b9ecdc01", "size": "6852", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Tools/px_process_params.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "2610222" }, { "name": "C++", "bytes": "5712694" }, { "name": "CMake", "bytes": "548423" }, { "name": "GDB", "bytes": "785" }, { "name": "Io", "bytes": "241" }, { "name": "Makefile", "bytes": "45018" }, { "name": "Matlab", "bytes": "43628" }, { "name": "Python", "bytes": "646848" }, { "name": "Scilab", "bytes": "1502" }, { "name": "Shell", "bytes": "70591" } ], "symlink_target": "" }
from django.test import TestCase from django.test.client import Client from django.conf import settings from importlib import import_module from waffle.models import Switch class TestAccessibilitySwitcher(TestCase): def setUp(self): self.client = Client() # http://code.djangoproject.com/ticket/10899 settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file' engine = import_module(settings.SESSION_ENGINE) store = engine.SessionStore() store.save() self.session = store self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key def test_a11y_testing_waffle_switch_off(self): response = self.client.get("/set-a11y-testing/") self.assertEqual(response.status_code, 404) def test_a11y_testing_mode_tota11y(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=tota11y") response = self.client.get("/") self.assertContains(response, "/static/javascripts/vendor/tota11y.min.js") def test_a11y_testing_mode_google(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=google") response = self.client.get("/") self.assertContains(response, "/static/javascripts/vendor/axs_testing.js") def test_a11y_testing_mode_off(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=off") response = self.client.get("/") self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js") self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js") def test_a11y_testing_mode_wrong(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=gfhdjaks") response = self.client.get("/") self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js") self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js")
{ "content_hash": "4e639b2a7329028e53f55ea5a7b65d07", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 85, "avg_line_length": 37.96551724137931, "alnum_prop": 0.6907356948228883, "repo_name": "ministryofjustice/manchester_traffic_offences_pleas", "id": "b4ac9cb6ff29370e8ad75973efa64d74b2321c46", "size": "2202", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apps/plea/tests/test_accessibility_switcher.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "867" }, { "name": "Gherkin", "bytes": "10122" }, { "name": "HTML", "bytes": "184454" }, { "name": "JavaScript", "bytes": "52955" }, { "name": "Python", "bytes": "792658" }, { "name": "SCSS", "bytes": "43568" }, { "name": "Shell", "bytes": "1766" } ], "symlink_target": "" }
from six import ensure_text from .node import NodeVisitor, ValueNode, ListNode, BinaryExpressionNode from .parser import atoms, precedence atom_names = {v: "@%s" % k for (k,v) in atoms.items()} named_escapes = {"\a", "\b", "\f", "\n", "\r", "\t", "\v"} def escape(string, extras=""): # Assumes input bytes are either UTF8 bytes or unicode. rv = "" for c in string: if c in named_escapes: rv += c.encode("unicode_escape").decode() elif c == "\\": rv += "\\\\" elif c < '\x20': rv += "\\x%02x" % ord(c) elif c in extras: rv += "\\" + c else: rv += c return ensure_text(rv) class ManifestSerializer(NodeVisitor): def __init__(self, skip_empty_data=False): self.skip_empty_data = skip_empty_data def serialize(self, root): self.indent = 2 rv = "\n".join(self.visit(root)) if not rv: return rv rv = rv.strip() if rv[-1] != "\n": rv = rv + "\n" return rv def visit_DataNode(self, node): rv = [] if not self.skip_empty_data or node.children: if node.data: rv.append("[%s]" % escape(node.data, extras="]")) indent = self.indent * " " else: indent = "" for child in node.children: rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child)) if node.parent: rv.append("") return rv def visit_KeyValueNode(self, node): rv = [escape(node.data, ":") + ":"] indent = " " * self.indent if len(node.children) == 1 and isinstance(node.children[0], (ValueNode, ListNode)): rv[0] += " %s" % self.visit(node.children[0])[0] else: for child in node.children: rv.append(indent + self.visit(child)[0]) return rv def visit_ListNode(self, node): rv = ["["] rv.extend(", ".join(self.visit(child)[0] for child in node.children)) rv.append("]") return ["".join(rv)] def visit_ValueNode(self, node): data = ensure_text(node.data) if ("#" in data or data.startswith("if ") or (isinstance(node.parent, ListNode) and ("," in data or "]" in data))): if "\"" in data: quote = "'" else: quote = "\"" else: quote = "" return [quote + escape(data, extras=quote) + quote] def visit_AtomNode(self, node): return [atom_names[node.data]] def visit_ConditionalNode(self, node): return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)] def visit_StringNode(self, node): rv = ["\"%s\"" % escape(node.data, extras="\"")] for child in node.children: rv[0] += self.visit(child)[0] return rv def visit_NumberNode(self, node): return [ensure_text(node.data)] def visit_VariableNode(self, node): rv = escape(node.data) for child in node.children: rv += self.visit(child) return [rv] def visit_IndexNode(self, node): assert len(node.children) == 1 return ["[%s]" % self.visit(node.children[0])[0]] def visit_UnaryExpressionNode(self, node): children = [] for child in node.children: child_str = self.visit(child)[0] if isinstance(child, BinaryExpressionNode): child_str = "(%s)" % child_str children.append(child_str) return [" ".join(children)] def visit_BinaryExpressionNode(self, node): assert len(node.children) == 3 children = [] for child_index in [1, 0, 2]: child = node.children[child_index] child_str = self.visit(child)[0] if (isinstance(child, BinaryExpressionNode) and precedence(node.children[0]) < precedence(child.children[0])): child_str = "(%s)" % child_str children.append(child_str) return [" ".join(children)] def visit_UnaryOperatorNode(self, node): return [ensure_text(node.data)] def visit_BinaryOperatorNode(self, node): return [ensure_text(node.data)] def serialize(tree, *args, **kwargs): s = ManifestSerializer(*args, **kwargs) return s.serialize(tree)
{ "content_hash": "2b31d7e279a5bc74eec22f1643cc0c7e", "timestamp": "", "source": "github", "line_count": 145, "max_line_length": 96, "avg_line_length": 30.910344827586208, "alnum_prop": 0.5258813029897367, "repo_name": "scheib/chromium", "id": "e31a60791ea78f362f1ef27c621b6f4f2eaa230e", "size": "4482", "binary": false, "copies": "9", "ref": "refs/heads/main", "path": "third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/wptmanifest/serializer.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from twisted.words.protocols import irc from txircd.modbase import Mode class NoExternalMessagesMode(Mode): def checkPermission(self, user, cmd, data): if cmd not in ["PRIVMSG", "NOTICE"]: return data targetChannels = data["targetchan"] chanModList = data["chanmod"] removeTargets = [] for channel in targetChannels: if "n" in channel.mode and user not in channel.users: removeTargets.append(channel) user.sendMessage(irc.ERR_CANNOTSENDTOCHAN, channel.name, ":Cannot send to channel (no external messages)") for channel in removeTargets: index = targetChannels.index(channel) targetChannels.pop(index) chanModList.pop(index) data["targetchan"] = targetChannels data["chanmod"] = chanModList return data class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "cnn": NoExternalMessagesMode() }, "common": True }
{ "content_hash": "4cbe78769795f9af61bddb41c582bb01", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 122, "avg_line_length": 31.135135135135137, "alnum_prop": 0.5711805555555556, "repo_name": "DesertBus/txircd", "id": "998490647a5eaf61c6db2a204475381423316455", "size": "1152", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "txircd/modules/cmode_n.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "454329" } ], "symlink_target": "" }
def merge_sort(m): """Sort list m, using merge sort""" return m.sort() # TODO: Replace with actual implementation
{ "content_hash": "70bb10af3d3520f4b928e0a78b1186d3", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 59, "avg_line_length": 38.666666666666664, "alnum_prop": 0.7068965517241379, "repo_name": "praqma-training/gitkatas", "id": "e4faf9ddaa3bd5fc261e3d3d147f477e634edb32", "size": "118", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "merge-mergesort/base.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2187" }, { "name": "Shell", "bytes": "9142" } ], "symlink_target": "" }
""" Provides numerous filters, and a command (outfilter) to set them as filters on the output of the bot. """ import supybot import supybot.world as world # Use this for the version of this plugin. You may wish to put a CVS keyword # in here if you\'re keeping the plugin in CVS or some similar system. __version__ = "%%VERSION%%" __author__ = supybot.authors.jemfinch # This is a dictionary mapping supybot.Author instances to lists of # contributions. __contributors__ = {} from . import config from . import plugin from imp import reload reload(plugin) # In case we're being reloaded. if world.testing: from . import test Class = plugin.Class configure = config.configure # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
{ "content_hash": "97576a5d4354b919151bb647468c4eb4", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 78, "avg_line_length": 24.322580645161292, "alnum_prop": 0.7347480106100795, "repo_name": "Ban3/Limnoria", "id": "cf46f1d713947ace40796228444ce7f116fdc32f", "size": "2338", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "plugins/Filter/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "864" }, { "name": "Python", "bytes": "2513657" }, { "name": "Shell", "bytes": "217" } ], "symlink_target": "" }
import warnings from tencentcloud.common.abstract_model import AbstractModel class Acl(AbstractModel): """ACL对象实体 """ def __init__(self): r""" :param ResourceType: Acl资源类型,(0:UNKNOWN,1:ANY,2:TOPIC,3:GROUP,4:CLUSTER,5:TRANSACTIONAL_ID)当前只有TOPIC, :type ResourceType: int :param ResourceName: 资源名称,和resourceType相关如当resourceType为TOPIC时,则该字段表示topic名称,当resourceType为GROUP时,该字段表示group名称 :type ResourceName: str :param Principal: 用户列表,默认为User:*,表示任何user都可以访问,当前用户只能是用户列表中包含的用户 注意:此字段可能返回 null,表示取不到有效值。 :type Principal: str :param Host: 默认为*,表示任何host都可以访问,当前ckafka不支持host为*,但是后面开源kafka的产品化会直接支持 注意:此字段可能返回 null,表示取不到有效值。 :type Host: str :param Operation: Acl操作方式(0:UNKNOWN,1:ANY,2:ALL,3:READ,4:WRITE,5:CREATE,6:DELETE,7:ALTER,8:DESCRIBE,9:CLUSTER_ACTION,10:DESCRIBE_CONFIGS,11:ALTER_CONFIGS,12:IDEMPOTEN_WRITE) :type Operation: int :param PermissionType: 权限类型(0:UNKNOWN,1:ANY,2:DENY,3:ALLOW) :type PermissionType: int """ self.ResourceType = None self.ResourceName = None self.Principal = None self.Host = None self.Operation = None self.PermissionType = None def _deserialize(self, params): self.ResourceType = params.get("ResourceType") self.ResourceName = params.get("ResourceName") self.Principal = params.get("Principal") self.Host = params.get("Host") self.Operation = params.get("Operation") self.PermissionType = params.get("PermissionType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AclResponse(AbstractModel): """ACL返回结果集 """ def __init__(self): r""" :param TotalCount: 符合条件的总数据条数 :type TotalCount: int :param AclList: ACL列表 注意:此字段可能返回 null,表示取不到有效值。 :type AclList: list of Acl """ self.TotalCount = None self.AclList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("AclList") is not None: self.AclList = [] for item in params.get("AclList"): obj = Acl() obj._deserialize(item) self.AclList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AclRule(AbstractModel): """AclRule列表接口出参 """ def __init__(self): r""" :param RuleName: Acl规则名称 注意:此字段可能返回 null,表示取不到有效值。 :type RuleName: str :param InstanceId: 实例ID 注意:此字段可能返回 null,表示取不到有效值。 :type InstanceId: str :param PatternType: 匹配类型,目前只支持前缀匹配,枚举值列表:PREFIXED 注意:此字段可能返回 null,表示取不到有效值。 :type PatternType: str :param Pattern: 表示前缀匹配的前缀的值 注意:此字段可能返回 null,表示取不到有效值。 :type Pattern: str :param ResourceType: Acl资源类型,目前只支持Topic,枚举值列表:Topic 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceType: str :param AclList: 该规则所包含的ACL信息 注意:此字段可能返回 null,表示取不到有效值。 :type AclList: str :param CreateTimeStamp: 规则所创建的时间 注意:此字段可能返回 null,表示取不到有效值。 :type CreateTimeStamp: str :param IsApplied: 预设ACL规则是否应用到新增的topic中 注意:此字段可能返回 null,表示取不到有效值。 :type IsApplied: int :param UpdateTimeStamp: 规则更新时间 注意:此字段可能返回 null,表示取不到有效值。 :type UpdateTimeStamp: str :param Comment: 规则的备注 注意:此字段可能返回 null,表示取不到有效值。 :type Comment: str :param TopicName: 其中一个显示的对应的TopicName 注意:此字段可能返回 null,表示取不到有效值。 :type TopicName: str :param TopicCount: 应用该ACL规则的Topic数 注意:此字段可能返回 null,表示取不到有效值。 :type TopicCount: int :param PatternTypeTitle: patternType的中文显示 注意:此字段可能返回 null,表示取不到有效值。 :type PatternTypeTitle: str """ self.RuleName = None self.InstanceId = None self.PatternType = None self.Pattern = None self.ResourceType = None self.AclList = None self.CreateTimeStamp = None self.IsApplied = None self.UpdateTimeStamp = None self.Comment = None self.TopicName = None self.TopicCount = None self.PatternTypeTitle = None def _deserialize(self, params): self.RuleName = params.get("RuleName") self.InstanceId = params.get("InstanceId") self.PatternType = params.get("PatternType") self.Pattern = params.get("Pattern") self.ResourceType = params.get("ResourceType") self.AclList = params.get("AclList") self.CreateTimeStamp = params.get("CreateTimeStamp") self.IsApplied = params.get("IsApplied") self.UpdateTimeStamp = params.get("UpdateTimeStamp") self.Comment = params.get("Comment") self.TopicName = params.get("TopicName") self.TopicCount = params.get("TopicCount") self.PatternTypeTitle = params.get("PatternTypeTitle") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AclRuleInfo(AbstractModel): """表示ACL 规则的四元组信息 """ def __init__(self): r""" :param Operation: Acl操作方式,枚举值(所有操作: All, 读:Read,写:Write) :type Operation: str :param PermissionType: 权限类型,(Deny,Allow) :type PermissionType: str :param Host: 默认为*,表示任何host都可以访问,当前ckafka不支持host为*和ip网段 :type Host: str :param Principal: 用户列表,默认为User:*,表示任何user都可以访问,当前用户只能是用户列表中包含的用户。传入格式需要带【User:】前缀。例如用户A,传入为User:A。 :type Principal: str """ self.Operation = None self.PermissionType = None self.Host = None self.Principal = None def _deserialize(self, params): self.Operation = params.get("Operation") self.PermissionType = params.get("PermissionType") self.Host = params.get("Host") self.Principal = params.get("Principal") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AnalyseParam(AbstractModel): """数据处理-解析参数 """ def __init__(self): r""" :param Format: 解析格式,JSON,DELIMITER分隔符,REGULAR正则提取,SOURCE处理上层所有结果 :type Format: str :param Regex: 分隔符、正则表达式 注意:此字段可能返回 null,表示取不到有效值。 :type Regex: str :param InputValueType: 需再次处理的KEY——模式 注意:此字段可能返回 null,表示取不到有效值。 :type InputValueType: str :param InputValue: 需再次处理的KEY——KEY表达式 注意:此字段可能返回 null,表示取不到有效值。 :type InputValue: str """ self.Format = None self.Regex = None self.InputValueType = None self.InputValue = None def _deserialize(self, params): self.Format = params.get("Format") self.Regex = params.get("Regex") self.InputValueType = params.get("InputValueType") self.InputValue = params.get("InputValue") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AppIdResponse(AbstractModel): """AppId的查询结果 """ def __init__(self): r""" :param TotalCount: 符合要求的所有AppId数量 :type TotalCount: int :param AppIdList: 符合要求的App Id列表 注意:此字段可能返回 null,表示取不到有效值。 :type AppIdList: list of int """ self.TotalCount = None self.AppIdList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") self.AppIdList = params.get("AppIdList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Assignment(AbstractModel): """存储着分配给该消费者的 partition 信息 """ def __init__(self): r""" :param Version: assingment版本信息 :type Version: int :param Topics: topic信息列表 注意:此字段可能返回 null,表示取不到有效值。 :type Topics: list of GroupInfoTopics """ self.Version = None self.Topics = None def _deserialize(self, params): self.Version = params.get("Version") if params.get("Topics") is not None: self.Topics = [] for item in params.get("Topics"): obj = GroupInfoTopics() obj._deserialize(item) self.Topics.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AuthorizeTokenRequest(AbstractModel): """AuthorizeToken请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param User: 用户 :type User: str :param Tokens: token串 :type Tokens: str """ self.InstanceId = None self.User = None self.Tokens = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.User = params.get("User") self.Tokens = params.get("Tokens") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class AuthorizeTokenResponse(AbstractModel): """AuthorizeToken返回参数结构体 """ def __init__(self): r""" :param Result: 0 成功 注意:此字段可能返回 null,表示取不到有效值。 :type Result: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): self.Result = params.get("Result") self.RequestId = params.get("RequestId") class BatchContent(AbstractModel): """批量发送消息内容 """ def __init__(self): r""" :param Body: 发送的消息体 :type Body: str :param Key: 发送消息的键名 :type Key: str """ self.Body = None self.Key = None def _deserialize(self, params): self.Body = params.get("Body") self.Key = params.get("Key") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class BatchCreateAclRequest(AbstractModel): """BatchCreateAcl请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param ResourceType: Acl资源类型,(2:TOPIC) :type ResourceType: int :param ResourceNames: 资源列表数组 :type ResourceNames: list of str :param RuleList: 设置的ACL规则列表 :type RuleList: list of AclRuleInfo """ self.InstanceId = None self.ResourceType = None self.ResourceNames = None self.RuleList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.ResourceType = params.get("ResourceType") self.ResourceNames = params.get("ResourceNames") if params.get("RuleList") is not None: self.RuleList = [] for item in params.get("RuleList"): obj = AclRuleInfo() obj._deserialize(item) self.RuleList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class BatchCreateAclResponse(AbstractModel): """BatchCreateAcl返回参数结构体 """ def __init__(self): r""" :param Result: 状态码 :type Result: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): self.Result = params.get("Result") self.RequestId = params.get("RequestId") class BatchModifyGroupOffsetsRequest(AbstractModel): """BatchModifyGroupOffsets请求参数结构体 """ def __init__(self): r""" :param GroupName: 消费分组名称 :type GroupName: str :param InstanceId: 实例名称 :type InstanceId: str :param Partitions: partition信息 :type Partitions: list of Partitions :param TopicName: 指定topic,默认所有topic :type TopicName: list of str """ self.GroupName = None self.InstanceId = None self.Partitions = None self.TopicName = None def _deserialize(self, params): self.GroupName = params.get("GroupName") self.InstanceId = params.get("InstanceId") if params.get("Partitions") is not None: self.Partitions = [] for item in params.get("Partitions"): obj = Partitions() obj._deserialize(item) self.Partitions.append(obj) self.TopicName = params.get("TopicName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class BatchModifyGroupOffsetsResponse(AbstractModel): """BatchModifyGroupOffsets返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class BatchModifyTopicAttributesRequest(AbstractModel): """BatchModifyTopicAttributes请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str :param Topic: 主题属性列表 :type Topic: list of BatchModifyTopicInfo """ self.InstanceId = None self.Topic = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") if params.get("Topic") is not None: self.Topic = [] for item in params.get("Topic"): obj = BatchModifyTopicInfo() obj._deserialize(item) self.Topic.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class BatchModifyTopicAttributesResponse(AbstractModel): """BatchModifyTopicAttributes返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: list of BatchModifyTopicResultDTO :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = [] for item in params.get("Result"): obj = BatchModifyTopicResultDTO() obj._deserialize(item) self.Result.append(obj) self.RequestId = params.get("RequestId") class BatchModifyTopicInfo(AbstractModel): """批量修改topic参数 """ def __init__(self): r""" :param TopicName: topic名称 :type TopicName: str :param PartitionNum: 分区数 :type PartitionNum: int :param Note: 备注 :type Note: str :param ReplicaNum: 副本数 :type ReplicaNum: int :param CleanUpPolicy: 消息删除策略,可以选择delete 或者compact :type CleanUpPolicy: str :param MinInsyncReplicas: 当producer设置request.required.acks为-1时,min.insync.replicas指定replicas的最小数目 :type MinInsyncReplicas: int :param UncleanLeaderElectionEnable: 是否允许非ISR的副本成为Leader :type UncleanLeaderElectionEnable: bool :param RetentionMs: topic维度的消息保留时间(毫秒)范围1 分钟到90 天 :type RetentionMs: int :param RetentionBytes: topic维度的消息保留大小,范围1 MB到1024 GB :type RetentionBytes: int :param SegmentMs: Segment分片滚动的时长(毫秒),范围1 到90 天 :type SegmentMs: int :param MaxMessageBytes: 批次的消息大小,范围1 KB到12 MB :type MaxMessageBytes: int """ self.TopicName = None self.PartitionNum = None self.Note = None self.ReplicaNum = None self.CleanUpPolicy = None self.MinInsyncReplicas = None self.UncleanLeaderElectionEnable = None self.RetentionMs = None self.RetentionBytes = None self.SegmentMs = None self.MaxMessageBytes = None def _deserialize(self, params): self.TopicName = params.get("TopicName") self.PartitionNum = params.get("PartitionNum") self.Note = params.get("Note") self.ReplicaNum = params.get("ReplicaNum") self.CleanUpPolicy = params.get("CleanUpPolicy") self.MinInsyncReplicas = params.get("MinInsyncReplicas") self.UncleanLeaderElectionEnable = params.get("UncleanLeaderElectionEnable") self.RetentionMs = params.get("RetentionMs") self.RetentionBytes = params.get("RetentionBytes") self.SegmentMs = params.get("SegmentMs") self.MaxMessageBytes = params.get("MaxMessageBytes") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class BatchModifyTopicResultDTO(AbstractModel): """批量修改topic属性结果 """ def __init__(self): r""" :param InstanceId: 实例id 注意:此字段可能返回 null,表示取不到有效值。 :type InstanceId: str :param TopicName: topic名称 注意:此字段可能返回 null,表示取不到有效值。 :type TopicName: str :param ReturnCode: 状态码 注意:此字段可能返回 null,表示取不到有效值。 :type ReturnCode: str :param Message: 状态消息 :type Message: str """ self.InstanceId = None self.TopicName = None self.ReturnCode = None self.Message = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.ReturnCode = params.get("ReturnCode") self.Message = params.get("Message") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CancelAuthorizationTokenRequest(AbstractModel): """CancelAuthorizationToken请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param User: 用户 :type User: str :param Tokens: token串 :type Tokens: str """ self.InstanceId = None self.User = None self.Tokens = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.User = params.get("User") self.Tokens = params.get("Tokens") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CancelAuthorizationTokenResponse(AbstractModel): """CancelAuthorizationToken返回参数结构体 """ def __init__(self): r""" :param Result: 0 成功 注意:此字段可能返回 null,表示取不到有效值。 :type Result: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): self.Result = params.get("Result") self.RequestId = params.get("RequestId") class CdcClusterResponse(AbstractModel): """创建CDC 标准版共享集群出参 """ def __init__(self): r""" :param TaskId: 任务ID 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: int """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CheckCdcClusterRequest(AbstractModel): """CheckCdcCluster请求参数结构体 """ def __init__(self): r""" :param TaskId: 任务ID :type TaskId: int """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CheckCdcClusterResponse(AbstractModel): """CheckCdcCluster返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果状态Success 注意:此字段可能返回 null,表示取不到有效值。 :type Result: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): self.Result = params.get("Result") self.RequestId = params.get("RequestId") class ClickHouseConnectParam(AbstractModel): """ClickHouse连接源参数 """ def __init__(self): r""" :param Port: ClickHouse的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: ClickHouse连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: ClickHouse连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: ClickHouse连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param SelfBuilt: ClickHouse连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param ServiceVip: ClickHouse连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: ClickHouse连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.SelfBuilt = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.SelfBuilt = params.get("SelfBuilt") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClickHouseModifyConnectParam(AbstractModel): """ClickHouse修改连接源参数 """ def __init__(self): r""" :param Resource: ClickHouse连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: ClickHouse的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: ClickHouse连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: ClickHouse连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: ClickHouse连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: ClickHouse连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param SelfBuilt: ClickHouse连接源是否为自建集群【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param IsUpdate: 是否更新到关联的Datahub任务,默认为true 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.SelfBuilt = None self.IsUpdate = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.SelfBuilt = params.get("SelfBuilt") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClickHouseParam(AbstractModel): """ClickHouse类型入参 """ def __init__(self): r""" :param Cluster: ClickHouse的集群 :type Cluster: str :param Database: ClickHouse的数据库名 :type Database: str :param Table: ClickHouse的数据表名 :type Table: str :param Schema: ClickHouse的schema :type Schema: list of ClickHouseSchema :param Resource: 实例资源 :type Resource: str :param Ip: ClickHouse的连接ip :type Ip: str :param Port: ClickHouse的连接port :type Port: int :param UserName: ClickHouse的用户名 :type UserName: str :param Password: ClickHouse的密码 :type Password: str :param ServiceVip: 实例vip :type ServiceVip: str :param UniqVpcId: 实例的vpcId :type UniqVpcId: str :param SelfBuilt: 是否为自建集群 :type SelfBuilt: bool :param DropInvalidMessage: ClickHouse是否抛弃解析失败的消息,默认为true :type DropInvalidMessage: bool :param Type: ClickHouse 类型,emr-clickhouse : "emr";cdw-clickhouse : "cdwch";自建 : "" :type Type: str :param DropCls: 当设置成员参数DropInvalidMessageToCls设置为true时,DropInvalidMessage参数失效 :type DropCls: :class:`tencentcloud.ckafka.v20190819.models.DropCls` """ self.Cluster = None self.Database = None self.Table = None self.Schema = None self.Resource = None self.Ip = None self.Port = None self.UserName = None self.Password = None self.ServiceVip = None self.UniqVpcId = None self.SelfBuilt = None self.DropInvalidMessage = None self.Type = None self.DropCls = None def _deserialize(self, params): self.Cluster = params.get("Cluster") self.Database = params.get("Database") self.Table = params.get("Table") if params.get("Schema") is not None: self.Schema = [] for item in params.get("Schema"): obj = ClickHouseSchema() obj._deserialize(item) self.Schema.append(obj) self.Resource = params.get("Resource") self.Ip = params.get("Ip") self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.SelfBuilt = params.get("SelfBuilt") self.DropInvalidMessage = params.get("DropInvalidMessage") self.Type = params.get("Type") if params.get("DropCls") is not None: self.DropCls = DropCls() self.DropCls._deserialize(params.get("DropCls")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClickHouseSchema(AbstractModel): """ClickHouse的Schema """ def __init__(self): r""" :param ColumnName: 表的列名 :type ColumnName: str :param JsonKey: 该列对应的jsonKey名 :type JsonKey: str :param Type: 表列项的类型 :type Type: str :param AllowNull: 列项是否允许为空 :type AllowNull: bool """ self.ColumnName = None self.JsonKey = None self.Type = None self.AllowNull = None def _deserialize(self, params): self.ColumnName = params.get("ColumnName") self.JsonKey = params.get("JsonKey") self.Type = params.get("Type") self.AllowNull = params.get("AllowNull") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClsParam(AbstractModel): """Cls类型入参 """ def __init__(self): r""" :param DecodeJson: 生产的信息是否为json格式 注意:此字段可能返回 null,表示取不到有效值。 :type DecodeJson: bool :param Resource: cls日志主题id :type Resource: str :param LogSet: cls日志集id 注意:此字段可能返回 null,表示取不到有效值。 :type LogSet: str :param ContentKey: 当DecodeJson为false时必填 注意:此字段可能返回 null,表示取不到有效值。 :type ContentKey: str :param TimeField: 指定消息中的某字段内容作为cls日志的时间。 字段内容格式需要是秒级时间戳 :type TimeField: str """ self.DecodeJson = None self.Resource = None self.LogSet = None self.ContentKey = None self.TimeField = None def _deserialize(self, params): self.DecodeJson = params.get("DecodeJson") self.Resource = params.get("Resource") self.LogSet = params.get("LogSet") self.ContentKey = params.get("ContentKey") self.TimeField = params.get("TimeField") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ClusterInfo(AbstractModel): """集群信息实体 """ def __init__(self): r""" :param ClusterId: 集群Id :type ClusterId: int :param ClusterName: 集群名称 :type ClusterName: str :param MaxDiskSize: 集群最大磁盘 单位GB 注意:此字段可能返回 null,表示取不到有效值。 :type MaxDiskSize: int :param MaxBandWidth: 集群最大带宽 单位MB/s 注意:此字段可能返回 null,表示取不到有效值。 :type MaxBandWidth: int :param AvailableDiskSize: 集群当前可用磁盘 单位GB 注意:此字段可能返回 null,表示取不到有效值。 :type AvailableDiskSize: int :param AvailableBandWidth: 集群当前可用带宽 单位MB/s 注意:此字段可能返回 null,表示取不到有效值。 :type AvailableBandWidth: int :param ZoneId: 集群所属可用区,表明集群归属的可用区 注意:此字段可能返回 null,表示取不到有效值。 :type ZoneId: int :param ZoneIds: 集群节点所在的可用区,若该集群为跨可用区集群,则包含该集群节点所在的多个可用区。 注意:此字段可能返回 null,表示取不到有效值。 :type ZoneIds: list of int """ self.ClusterId = None self.ClusterName = None self.MaxDiskSize = None self.MaxBandWidth = None self.AvailableDiskSize = None self.AvailableBandWidth = None self.ZoneId = None self.ZoneIds = None def _deserialize(self, params): self.ClusterId = params.get("ClusterId") self.ClusterName = params.get("ClusterName") self.MaxDiskSize = params.get("MaxDiskSize") self.MaxBandWidth = params.get("MaxBandWidth") self.AvailableDiskSize = params.get("AvailableDiskSize") self.AvailableBandWidth = params.get("AvailableBandWidth") self.ZoneId = params.get("ZoneId") self.ZoneIds = params.get("ZoneIds") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Config(AbstractModel): """高级配置对象 """ def __init__(self): r""" :param Retention: 消息保留时间 注意:此字段可能返回 null,表示取不到有效值。 :type Retention: int :param MinInsyncReplicas: 最小同步复制数 注意:此字段可能返回 null,表示取不到有效值。 :type MinInsyncReplicas: int :param CleanUpPolicy: 日志清理模式,默认 delete。 delete:日志按保存时间删除;compact:日志按 key 压缩;compact, delete:日志按 key 压缩且会保存时间删除。 注意:此字段可能返回 null,表示取不到有效值。 :type CleanUpPolicy: str :param SegmentMs: Segment 分片滚动的时长 注意:此字段可能返回 null,表示取不到有效值。 :type SegmentMs: int :param UncleanLeaderElectionEnable: 0表示 false。 1表示 true。 注意:此字段可能返回 null,表示取不到有效值。 :type UncleanLeaderElectionEnable: int :param SegmentBytes: Segment 分片滚动的字节数 注意:此字段可能返回 null,表示取不到有效值。 :type SegmentBytes: int :param MaxMessageBytes: 最大消息字节数 注意:此字段可能返回 null,表示取不到有效值。 :type MaxMessageBytes: int :param RetentionBytes: 消息保留文件大小 注意:此字段可能返回 null,表示取不到有效值。 :type RetentionBytes: int """ self.Retention = None self.MinInsyncReplicas = None self.CleanUpPolicy = None self.SegmentMs = None self.UncleanLeaderElectionEnable = None self.SegmentBytes = None self.MaxMessageBytes = None self.RetentionBytes = None def _deserialize(self, params): self.Retention = params.get("Retention") self.MinInsyncReplicas = params.get("MinInsyncReplicas") self.CleanUpPolicy = params.get("CleanUpPolicy") self.SegmentMs = params.get("SegmentMs") self.UncleanLeaderElectionEnable = params.get("UncleanLeaderElectionEnable") self.SegmentBytes = params.get("SegmentBytes") self.MaxMessageBytes = params.get("MaxMessageBytes") self.RetentionBytes = params.get("RetentionBytes") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ConnectResourceResourceIdResp(AbstractModel): """返回连接源的Id """ def __init__(self): r""" :param ResourceId: 连接源的Id 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceId: str """ self.ResourceId = None def _deserialize(self, params): self.ResourceId = params.get("ResourceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Connection(AbstractModel): """Connection信息 """ def __init__(self): r""" :param TopicName: Topic名称 :type TopicName: str :param GroupId: 消费组ID :type GroupId: str :param TopicId: Topic的Id :type TopicId: str """ self.TopicName = None self.GroupId = None self.TopicId = None def _deserialize(self, params): self.TopicName = params.get("TopicName") self.GroupId = params.get("GroupId") self.TopicId = params.get("TopicId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ConsumerGroup(AbstractModel): """用户组实体 """ def __init__(self): r""" :param ConsumerGroupName: 用户组名称 :type ConsumerGroupName: str :param SubscribedInfo: 订阅信息实体 :type SubscribedInfo: list of SubscribedInfo """ self.ConsumerGroupName = None self.SubscribedInfo = None def _deserialize(self, params): self.ConsumerGroupName = params.get("ConsumerGroupName") if params.get("SubscribedInfo") is not None: self.SubscribedInfo = [] for item in params.get("SubscribedInfo"): obj = SubscribedInfo() obj._deserialize(item) self.SubscribedInfo.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ConsumerGroupResponse(AbstractModel): """消费组返回结果实体 """ def __init__(self): r""" :param TotalCount: 符合条件的消费组数量 :type TotalCount: int :param TopicList: 主题列表 注意:此字段可能返回 null,表示取不到有效值。 :type TopicList: list of ConsumerGroupTopic :param GroupList: 消费分组List 注意:此字段可能返回 null,表示取不到有效值。 :type GroupList: list of ConsumerGroup :param TotalPartition: 所有分区数量 注意:此字段可能返回 null,表示取不到有效值。 :type TotalPartition: int :param PartitionListForMonitor: 监控的分区列表 注意:此字段可能返回 null,表示取不到有效值。 :type PartitionListForMonitor: list of Partition :param TotalTopic: 主题总数 注意:此字段可能返回 null,表示取不到有效值。 :type TotalTopic: int :param TopicListForMonitor: 监控的主题列表 注意:此字段可能返回 null,表示取不到有效值。 :type TopicListForMonitor: list of ConsumerGroupTopic :param GroupListForMonitor: 监控的组列表 注意:此字段可能返回 null,表示取不到有效值。 :type GroupListForMonitor: list of Group """ self.TotalCount = None self.TopicList = None self.GroupList = None self.TotalPartition = None self.PartitionListForMonitor = None self.TotalTopic = None self.TopicListForMonitor = None self.GroupListForMonitor = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("TopicList") is not None: self.TopicList = [] for item in params.get("TopicList"): obj = ConsumerGroupTopic() obj._deserialize(item) self.TopicList.append(obj) if params.get("GroupList") is not None: self.GroupList = [] for item in params.get("GroupList"): obj = ConsumerGroup() obj._deserialize(item) self.GroupList.append(obj) self.TotalPartition = params.get("TotalPartition") if params.get("PartitionListForMonitor") is not None: self.PartitionListForMonitor = [] for item in params.get("PartitionListForMonitor"): obj = Partition() obj._deserialize(item) self.PartitionListForMonitor.append(obj) self.TotalTopic = params.get("TotalTopic") if params.get("TopicListForMonitor") is not None: self.TopicListForMonitor = [] for item in params.get("TopicListForMonitor"): obj = ConsumerGroupTopic() obj._deserialize(item) self.TopicListForMonitor.append(obj) if params.get("GroupListForMonitor") is not None: self.GroupListForMonitor = [] for item in params.get("GroupListForMonitor"): obj = Group() obj._deserialize(item) self.GroupListForMonitor.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ConsumerGroupTopic(AbstractModel): """消费组主题对象 """ def __init__(self): r""" :param TopicId: 主题ID :type TopicId: str :param TopicName: 主题名称 :type TopicName: str """ self.TopicId = None self.TopicName = None def _deserialize(self, params): self.TopicId = params.get("TopicId") self.TopicName = params.get("TopicName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ConsumerRecord(AbstractModel): """消息记录 """ def __init__(self): r""" :param Topic: 主题名 :type Topic: str :param Partition: 分区id :type Partition: int :param Offset: 位点 :type Offset: int :param Key: 消息key 注意:此字段可能返回 null,表示取不到有效值。 :type Key: str :param Value: 消息value 注意:此字段可能返回 null,表示取不到有效值。 :type Value: str :param Timestamp: 消息时间戳 注意:此字段可能返回 null,表示取不到有效值。 :type Timestamp: int """ self.Topic = None self.Partition = None self.Offset = None self.Key = None self.Value = None self.Timestamp = None def _deserialize(self, params): self.Topic = params.get("Topic") self.Partition = params.get("Partition") self.Offset = params.get("Offset") self.Key = params.get("Key") self.Value = params.get("Value") self.Timestamp = params.get("Timestamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CosParam(AbstractModel): """Cos Datahub 任务接入参数 """ def __init__(self): r""" :param BucketName: cos 存储桶名称 :type BucketName: str :param Region: 地域代码 :type Region: str :param ObjectKey: 对象名称 :type ObjectKey: str :param AggregateBatchSize: 汇聚消息量的大小(单位:MB) :type AggregateBatchSize: int :param AggregateInterval: 汇聚的时间间隔(单位:小时) :type AggregateInterval: int :param FormatOutputType: 消息汇聚后的文件格式(支持csv, json) :type FormatOutputType: str :param ObjectKeyPrefix: 转储的对象目录前缀 :type ObjectKeyPrefix: str :param DirectoryTimeFormat: 根据strptime 时间格式化的分区格式 :type DirectoryTimeFormat: str """ self.BucketName = None self.Region = None self.ObjectKey = None self.AggregateBatchSize = None self.AggregateInterval = None self.FormatOutputType = None self.ObjectKeyPrefix = None self.DirectoryTimeFormat = None def _deserialize(self, params): self.BucketName = params.get("BucketName") self.Region = params.get("Region") self.ObjectKey = params.get("ObjectKey") self.AggregateBatchSize = params.get("AggregateBatchSize") self.AggregateInterval = params.get("AggregateInterval") self.FormatOutputType = params.get("FormatOutputType") self.ObjectKeyPrefix = params.get("ObjectKeyPrefix") self.DirectoryTimeFormat = params.get("DirectoryTimeFormat") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateAclRequest(AbstractModel): """CreateAcl请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id信息 :type InstanceId: str :param ResourceType: Acl资源类型,(2:TOPIC,3:GROUP,4:CLUSTER) :type ResourceType: int :param Operation: Acl操作方式,(2:ALL,3:READ,4:WRITE,5:CREATE,6:DELETE,7:ALTER,8:DESCRIBE,9:CLUSTER_ACTION,10:DESCRIBE_CONFIGS,11:ALTER_CONFIGS,12:IDEMPOTENT_WRITE) :type Operation: int :param PermissionType: 权限类型,(2:DENY,3:ALLOW),当前ckakfa支持ALLOW(相当于白名单),其它用于后续兼容开源kafka的acl时使用 :type PermissionType: int :param ResourceName: 资源名称,和resourceType相关,如当resourceType为TOPIC时,则该字段表示topic名称,当resourceType为GROUP时,该字段表示group名称,当resourceType为CLUSTER时,该字段可为空。 :type ResourceName: str :param Host: 默认为\*,表示任何host都可以访问,当前ckafka不支持host为\*,但是后面开源kafka的产品化会直接支持 :type Host: str :param Principal: 用户列表,默认为User:*,表示任何user都可以访问,当前用户只能是用户列表中包含的用户。传入时需要加 User: 前缀,如用户A则传入User:A。 :type Principal: str :param ResourceNameList: 资源名称列表,Json字符串格式。ResourceName和resourceNameList只能指定其中一个。 :type ResourceNameList: str """ self.InstanceId = None self.ResourceType = None self.Operation = None self.PermissionType = None self.ResourceName = None self.Host = None self.Principal = None self.ResourceNameList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.ResourceType = params.get("ResourceType") self.Operation = params.get("Operation") self.PermissionType = params.get("PermissionType") self.ResourceName = params.get("ResourceName") self.Host = params.get("Host") self.Principal = params.get("Principal") self.ResourceNameList = params.get("ResourceNameList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateAclResponse(AbstractModel): """CreateAcl返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateCdcClusterRequest(AbstractModel): """CreateCdcCluster请求参数结构体 """ def __init__(self): r""" :param CdcId: cdc的id :type CdcId: str :param CdcVpcId: vpcId,一个地域只有唯一一个vpcid用于CDC :type CdcVpcId: str :param CdcSubnetId: 每个CDC集群有唯一一个子网ID :type CdcSubnetId: str :param ZoneId: 所在可用区ID :type ZoneId: int :param Bandwidth: cdc集群的总带宽 :type Bandwidth: int :param DiskSize: cdc集群的总磁盘 :type DiskSize: int :param DiskType: 数据盘类型 :type DiskType: str :param SystemDiskType: 系统盘类型 :type SystemDiskType: str """ self.CdcId = None self.CdcVpcId = None self.CdcSubnetId = None self.ZoneId = None self.Bandwidth = None self.DiskSize = None self.DiskType = None self.SystemDiskType = None def _deserialize(self, params): self.CdcId = params.get("CdcId") self.CdcVpcId = params.get("CdcVpcId") self.CdcSubnetId = params.get("CdcSubnetId") self.ZoneId = params.get("ZoneId") self.Bandwidth = params.get("Bandwidth") self.DiskSize = params.get("DiskSize") self.DiskType = params.get("DiskType") self.SystemDiskType = params.get("SystemDiskType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateCdcClusterResponse(AbstractModel): """CreateCdcCluster返回参数结构体 """ def __init__(self): r""" :param Result: 无 :type Result: :class:`tencentcloud.ckafka.v20190819.models.CdcClusterResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = CdcClusterResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateConnectResourceRequest(AbstractModel): """CreateConnectResource请求参数结构体 """ def __init__(self): r""" :param ResourceName: 连接源名称 :type ResourceName: str :param Type: 连接源类型 :type Type: str :param Description: 连接源描述 :type Description: str :param DtsConnectParam: Dts配置,Type为DTS时必填 :type DtsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DtsConnectParam` :param MongoDBConnectParam: MongoDB配置,Type为MONGODB时必填 :type MongoDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MongoDBConnectParam` :param EsConnectParam: Es配置,Type为ES时必填 :type EsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.EsConnectParam` :param ClickHouseConnectParam: ClickHouse配置,Type为CLICKHOUSE时必填 :type ClickHouseConnectParam: :class:`tencentcloud.ckafka.v20190819.models.ClickHouseConnectParam` :param MySQLConnectParam: MySQL配置,Type为MYSQL或TDSQL_C_MYSQL时必填 :type MySQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MySQLConnectParam` :param PostgreSQLConnectParam: PostgreSQL配置,Type为POSTGRESQL或TDSQL_C_POSTGRESQL时必填 :type PostgreSQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.PostgreSQLConnectParam` :param MariaDBConnectParam: MariaDB配置,Type为MARIADB时必填 :type MariaDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MariaDBConnectParam` :param SQLServerConnectParam: SQLServer配置,Type为SQLSERVER时必填 :type SQLServerConnectParam: :class:`tencentcloud.ckafka.v20190819.models.SQLServerConnectParam` :param DorisConnectParam: Doris 配置,Type为 DORIS 时必填 :type DorisConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DorisConnectParam` """ self.ResourceName = None self.Type = None self.Description = None self.DtsConnectParam = None self.MongoDBConnectParam = None self.EsConnectParam = None self.ClickHouseConnectParam = None self.MySQLConnectParam = None self.PostgreSQLConnectParam = None self.MariaDBConnectParam = None self.SQLServerConnectParam = None self.DorisConnectParam = None def _deserialize(self, params): self.ResourceName = params.get("ResourceName") self.Type = params.get("Type") self.Description = params.get("Description") if params.get("DtsConnectParam") is not None: self.DtsConnectParam = DtsConnectParam() self.DtsConnectParam._deserialize(params.get("DtsConnectParam")) if params.get("MongoDBConnectParam") is not None: self.MongoDBConnectParam = MongoDBConnectParam() self.MongoDBConnectParam._deserialize(params.get("MongoDBConnectParam")) if params.get("EsConnectParam") is not None: self.EsConnectParam = EsConnectParam() self.EsConnectParam._deserialize(params.get("EsConnectParam")) if params.get("ClickHouseConnectParam") is not None: self.ClickHouseConnectParam = ClickHouseConnectParam() self.ClickHouseConnectParam._deserialize(params.get("ClickHouseConnectParam")) if params.get("MySQLConnectParam") is not None: self.MySQLConnectParam = MySQLConnectParam() self.MySQLConnectParam._deserialize(params.get("MySQLConnectParam")) if params.get("PostgreSQLConnectParam") is not None: self.PostgreSQLConnectParam = PostgreSQLConnectParam() self.PostgreSQLConnectParam._deserialize(params.get("PostgreSQLConnectParam")) if params.get("MariaDBConnectParam") is not None: self.MariaDBConnectParam = MariaDBConnectParam() self.MariaDBConnectParam._deserialize(params.get("MariaDBConnectParam")) if params.get("SQLServerConnectParam") is not None: self.SQLServerConnectParam = SQLServerConnectParam() self.SQLServerConnectParam._deserialize(params.get("SQLServerConnectParam")) if params.get("DorisConnectParam") is not None: self.DorisConnectParam = DorisConnectParam() self.DorisConnectParam._deserialize(params.get("DorisConnectParam")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateConnectResourceResponse(AbstractModel): """CreateConnectResource返回参数结构体 """ def __init__(self): r""" :param Result: 连接源的Id :type Result: :class:`tencentcloud.ckafka.v20190819.models.ConnectResourceResourceIdResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ConnectResourceResourceIdResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateConsumerRequest(AbstractModel): """CreateConsumer请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str :param GroupName: group名称 :type GroupName: str :param TopicName: topic名称,TopicName、TopicNameList 需要显示指定一个存在的topic名称 :type TopicName: str :param TopicNameList: topic名称数组 :type TopicNameList: list of str """ self.InstanceId = None self.GroupName = None self.TopicName = None self.TopicNameList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.GroupName = params.get("GroupName") self.TopicName = params.get("TopicName") self.TopicNameList = params.get("TopicNameList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateConsumerResponse(AbstractModel): """CreateConsumer返回参数结构体 """ def __init__(self): r""" :param Result: 创建group描述 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateDatahubTaskRequest(AbstractModel): """CreateDatahubTask请求参数结构体 """ def __init__(self): r""" :param TaskName: 任务名称 :type TaskName: str :param TaskType: 任务类型,SOURCE数据接入,SINK数据流出 :type TaskType: str :param SourceResource: 数据源 :type SourceResource: :class:`tencentcloud.ckafka.v20190819.models.DatahubResource` :param TargetResource: 数据目标 :type TargetResource: :class:`tencentcloud.ckafka.v20190819.models.DatahubResource` :param TransformParam: 数据处理规则 :type TransformParam: :class:`tencentcloud.ckafka.v20190819.models.TransformParam` :param PrivateLinkParam: 实例连接参数【已废弃】 :type PrivateLinkParam: :class:`tencentcloud.ckafka.v20190819.models.PrivateLinkParam` :param SchemaId: 选择所要绑定的SchemaId :type SchemaId: str :param TransformsParam: 数据处理规则 :type TransformsParam: :class:`tencentcloud.ckafka.v20190819.models.TransformsParam` :param TaskId: 任务ID :type TaskId: str :param Tags: 标签列表 :type Tags: list of Tag """ self.TaskName = None self.TaskType = None self.SourceResource = None self.TargetResource = None self.TransformParam = None self.PrivateLinkParam = None self.SchemaId = None self.TransformsParam = None self.TaskId = None self.Tags = None def _deserialize(self, params): self.TaskName = params.get("TaskName") self.TaskType = params.get("TaskType") if params.get("SourceResource") is not None: self.SourceResource = DatahubResource() self.SourceResource._deserialize(params.get("SourceResource")) if params.get("TargetResource") is not None: self.TargetResource = DatahubResource() self.TargetResource._deserialize(params.get("TargetResource")) if params.get("TransformParam") is not None: self.TransformParam = TransformParam() self.TransformParam._deserialize(params.get("TransformParam")) if params.get("PrivateLinkParam") is not None: self.PrivateLinkParam = PrivateLinkParam() self.PrivateLinkParam._deserialize(params.get("PrivateLinkParam")) self.SchemaId = params.get("SchemaId") if params.get("TransformsParam") is not None: self.TransformsParam = TransformsParam() self.TransformsParam._deserialize(params.get("TransformsParam")) self.TaskId = params.get("TaskId") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateDatahubTaskRes(AbstractModel): """创建数据转储返回值 """ def __init__(self): r""" :param TaskId: 转储任务id :type TaskId: str :param DatahubId: 数据转储Id 注意:此字段可能返回 null,表示取不到有效值。 :type DatahubId: str """ self.TaskId = None self.DatahubId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.DatahubId = params.get("DatahubId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateDatahubTaskResponse(AbstractModel): """CreateDatahubTask返回参数结构体 """ def __init__(self): r""" :param Result: 任务id :type Result: :class:`tencentcloud.ckafka.v20190819.models.CreateDatahubTaskRes` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = CreateDatahubTaskRes() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateInstancePreData(AbstractModel): """创建预付费接口返回的Data """ def __init__(self): r""" :param FlowId: CreateInstancePre返回固定为0,不能作为CheckTaskStatus的查询条件。只是为了保证和后台数据结构对齐。 注意:此字段可能返回 null,表示取不到有效值。 :type FlowId: int :param DealNames: 订单号列表 注意:此字段可能返回 null,表示取不到有效值。 :type DealNames: list of str :param InstanceId: 实例Id 注意:此字段可能返回 null,表示取不到有效值。 :type InstanceId: str """ self.FlowId = None self.DealNames = None self.InstanceId = None def _deserialize(self, params): self.FlowId = params.get("FlowId") self.DealNames = params.get("DealNames") self.InstanceId = params.get("InstanceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateInstancePreRequest(AbstractModel): """CreateInstancePre请求参数结构体 """ def __init__(self): r""" :param InstanceName: 实例名称,是一个不超过 64 个字符的字符串,必须以字母为首字符,剩余部分可以包含字母、数字和横划线(-) :type InstanceName: str :param ZoneId: 可用区,购买多可用区实例时,填写ZoneIds.N字段中的任意一个值 :type ZoneId: int :param Period: 预付费购买时长,例如 "1m",就是一个月 :type Period: str :param InstanceType: 实例规格说明 专业版实例[所有规格]填写1. 标准版实例 ([入门型]填写1,[标准型]填写2,[进阶型]填写3,[容量型]填写4,[高阶型1]填写5,[高阶性2]填写6,[高阶型3]填写7,[高阶型4]填写8,[独占型]填写9。 :type InstanceType: int :param VpcId: vpcId,不填默认基础网络 :type VpcId: str :param SubnetId: 子网id,vpc网络需要传该参数,基础网络可以不传 :type SubnetId: str :param MsgRetentionTime: 可选。实例日志的最长保留时间,单位分钟,默认为10080(7天),最大30天,不填默认0,代表不开启日志保留时间回收策略 :type MsgRetentionTime: int :param ClusterId: 创建实例时可以选择集群Id, 该入参表示集群Id :type ClusterId: int :param RenewFlag: 预付费自动续费标记,0表示默认状态(用户未设置,即初始状态), 1表示自动续费,2表示明确不自动续费(用户设置) :type RenewFlag: int :param KafkaVersion: CKafka版本号[0.10.2、1.1.1、2.4.1], 默认是1.1.1 :type KafkaVersion: str :param SpecificationsType: 实例类型: [标准版实例]填写 standard(默认), [专业版实例]填写 profession :type SpecificationsType: str :param DiskSize: 磁盘大小,专业版不填写默认最小磁盘,填写后根据磁盘带宽分区数弹性计算 :type DiskSize: int :param BandWidth: 带宽,专业版不填写默认最小带宽,填写后根据磁盘带宽分区数弹性计算 :type BandWidth: int :param Partition: 分区大小,专业版不填写默认最小分区数,填写后根据磁盘带宽分区数弹性计算 :type Partition: int :param Tags: 标签 :type Tags: list of Tag :param DiskType: 磁盘类型(ssd填写CLOUD_SSD,sata填写CLOUD_BASIC) :type DiskType: str :param MultiZoneFlag: 跨可用区,zoneIds必填 :type MultiZoneFlag: bool :param ZoneIds: 可用区列表,购买多可用区实例时为必填项 :type ZoneIds: list of int """ self.InstanceName = None self.ZoneId = None self.Period = None self.InstanceType = None self.VpcId = None self.SubnetId = None self.MsgRetentionTime = None self.ClusterId = None self.RenewFlag = None self.KafkaVersion = None self.SpecificationsType = None self.DiskSize = None self.BandWidth = None self.Partition = None self.Tags = None self.DiskType = None self.MultiZoneFlag = None self.ZoneIds = None def _deserialize(self, params): self.InstanceName = params.get("InstanceName") self.ZoneId = params.get("ZoneId") self.Period = params.get("Period") self.InstanceType = params.get("InstanceType") self.VpcId = params.get("VpcId") self.SubnetId = params.get("SubnetId") self.MsgRetentionTime = params.get("MsgRetentionTime") self.ClusterId = params.get("ClusterId") self.RenewFlag = params.get("RenewFlag") self.KafkaVersion = params.get("KafkaVersion") self.SpecificationsType = params.get("SpecificationsType") self.DiskSize = params.get("DiskSize") self.BandWidth = params.get("BandWidth") self.Partition = params.get("Partition") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) self.DiskType = params.get("DiskType") self.MultiZoneFlag = params.get("MultiZoneFlag") self.ZoneIds = params.get("ZoneIds") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateInstancePreResp(AbstractModel): """创建预付费实例返回结构 """ def __init__(self): r""" :param ReturnCode: 返回的code,0为正常,非0为错误 :type ReturnCode: str :param ReturnMessage: 成功消息 :type ReturnMessage: str :param Data: 操作型返回的Data数据 注意:此字段可能返回 null,表示取不到有效值。 :type Data: :class:`tencentcloud.ckafka.v20190819.models.CreateInstancePreData` :param DeleteRouteTimestamp: 删除是时间 注意:此字段可能返回 null,表示取不到有效值。 :type DeleteRouteTimestamp: str """ self.ReturnCode = None self.ReturnMessage = None self.Data = None self.DeleteRouteTimestamp = None def _deserialize(self, params): self.ReturnCode = params.get("ReturnCode") self.ReturnMessage = params.get("ReturnMessage") if params.get("Data") is not None: self.Data = CreateInstancePreData() self.Data._deserialize(params.get("Data")) self.DeleteRouteTimestamp = params.get("DeleteRouteTimestamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateInstancePreResponse(AbstractModel): """CreateInstancePre返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.CreateInstancePreResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = CreateInstancePreResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreatePartitionRequest(AbstractModel): """CreatePartition请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param TopicName: 主题名称 :type TopicName: str :param PartitionNum: 主题分区个数 :type PartitionNum: int """ self.InstanceId = None self.TopicName = None self.PartitionNum = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.PartitionNum = params.get("PartitionNum") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreatePartitionResponse(AbstractModel): """CreatePartition返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果集 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateRouteRequest(AbstractModel): """CreateRoute请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例唯一id :type InstanceId: str :param VipType: 路由网络类型(3:vpc路由;4:标准版支撑路由;7:专业版支撑路由) :type VipType: int :param VpcId: vpc网络Id :type VpcId: str :param SubnetId: vpc子网id :type SubnetId: str :param AccessType: 访问类型 :type AccessType: int :param AuthFlag: 是否需要权限管理 :type AuthFlag: int :param CallerAppid: 调用方appId :type CallerAppid: int :param PublicNetwork: 公网带宽 :type PublicNetwork: int :param Ip: vip地址 :type Ip: str """ self.InstanceId = None self.VipType = None self.VpcId = None self.SubnetId = None self.AccessType = None self.AuthFlag = None self.CallerAppid = None self.PublicNetwork = None self.Ip = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.VipType = params.get("VipType") self.VpcId = params.get("VpcId") self.SubnetId = params.get("SubnetId") self.AccessType = params.get("AccessType") self.AuthFlag = params.get("AuthFlag") self.CallerAppid = params.get("CallerAppid") self.PublicNetwork = params.get("PublicNetwork") self.Ip = params.get("Ip") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateRouteResponse(AbstractModel): """CreateRoute返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateTokenRequest(AbstractModel): """CreateToken请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param User: 用户名 :type User: str """ self.InstanceId = None self.User = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.User = params.get("User") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateTokenResponse(AbstractModel): """CreateToken返回参数结构体 """ def __init__(self): r""" :param Result: token串 注意:此字段可能返回 null,表示取不到有效值。 :type Result: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): self.Result = params.get("Result") self.RequestId = params.get("RequestId") class CreateTopicIpWhiteListRequest(AbstractModel): """CreateTopicIpWhiteList请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param TopicName: 主题名称 :type TopicName: str :param IpWhiteList: ip白名单列表 :type IpWhiteList: list of str """ self.InstanceId = None self.TopicName = None self.IpWhiteList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.IpWhiteList = params.get("IpWhiteList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateTopicIpWhiteListResponse(AbstractModel): """CreateTopicIpWhiteList返回参数结构体 """ def __init__(self): r""" :param Result: 删除主题IP白名单结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateTopicRequest(AbstractModel): """CreateTopic请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param TopicName: 主题名称,是一个不超过 128 个字符的字符串,必须以字母为首字符,剩余部分可以包含字母、数字和横划线(-) :type TopicName: str :param PartitionNum: Partition个数,大于0 :type PartitionNum: int :param ReplicaNum: 副本个数,不能多于 broker 数,最大为3 :type ReplicaNum: int :param EnableWhiteList: ip白名单开关, 1:打开 0:关闭,默认不打开 :type EnableWhiteList: int :param IpWhiteList: Ip白名单列表,配额限制,enableWhileList=1时必选 :type IpWhiteList: list of str :param CleanUpPolicy: 清理日志策略,日志清理模式,默认为"delete"。"delete":日志按保存时间删除,"compact":日志按 key 压缩,"compact, delete":日志按 key 压缩且会按保存时间删除。 :type CleanUpPolicy: str :param Note: 主题备注,是一个不超过 64 个字符的字符串,必须以字母为首字符,剩余部分可以包含字母、数字和横划线(-) :type Note: str :param MinInsyncReplicas: 默认为1 :type MinInsyncReplicas: int :param UncleanLeaderElectionEnable: 是否允许未同步的副本选为leader,false:不允许,true:允许,默认不允许 :type UncleanLeaderElectionEnable: int :param RetentionMs: 可选参数。消息保留时间,单位ms,当前最小值为60000ms :type RetentionMs: int :param SegmentMs: Segment分片滚动的时长,单位ms,当前最小为3600000ms :type SegmentMs: int :param MaxMessageBytes: 主题消息最大值,单位为 Byte,最小值1024Byte(即1KB),最大值为8388608Byte(即8MB)。 :type MaxMessageBytes: int :param EnableAclRule: 预设ACL规则, 1:打开 0:关闭,默认不打开 :type EnableAclRule: int :param AclRuleName: 预设ACL规则的名称 :type AclRuleName: str :param RetentionBytes: 可选, 保留文件大小. 默认为-1,单位bytes, 当前最小值为1048576B :type RetentionBytes: int :param Tags: 标签列表 :type Tags: list of Tag """ self.InstanceId = None self.TopicName = None self.PartitionNum = None self.ReplicaNum = None self.EnableWhiteList = None self.IpWhiteList = None self.CleanUpPolicy = None self.Note = None self.MinInsyncReplicas = None self.UncleanLeaderElectionEnable = None self.RetentionMs = None self.SegmentMs = None self.MaxMessageBytes = None self.EnableAclRule = None self.AclRuleName = None self.RetentionBytes = None self.Tags = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.PartitionNum = params.get("PartitionNum") self.ReplicaNum = params.get("ReplicaNum") self.EnableWhiteList = params.get("EnableWhiteList") self.IpWhiteList = params.get("IpWhiteList") self.CleanUpPolicy = params.get("CleanUpPolicy") self.Note = params.get("Note") self.MinInsyncReplicas = params.get("MinInsyncReplicas") self.UncleanLeaderElectionEnable = params.get("UncleanLeaderElectionEnable") self.RetentionMs = params.get("RetentionMs") self.SegmentMs = params.get("SegmentMs") self.MaxMessageBytes = params.get("MaxMessageBytes") self.EnableAclRule = params.get("EnableAclRule") self.AclRuleName = params.get("AclRuleName") self.RetentionBytes = params.get("RetentionBytes") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateTopicResp(AbstractModel): """创建主题返回 """ def __init__(self): r""" :param TopicId: 主题Id :type TopicId: str """ self.TopicId = None def _deserialize(self, params): self.TopicId = params.get("TopicId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateTopicResponse(AbstractModel): """CreateTopic返回参数结构体 """ def __init__(self): r""" :param Result: 返回创建结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.CreateTopicResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = CreateTopicResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CreateUserRequest(AbstractModel): """CreateUser请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param Name: 用户名称 :type Name: str :param Password: 用户密码 :type Password: str """ self.InstanceId = None self.Name = None self.Password = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Name = params.get("Name") self.Password = params.get("Password") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CreateUserResponse(AbstractModel): """CreateUser返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class CtsdbConnectParam(AbstractModel): """Ctsdb连接源参数 """ def __init__(self): r""" :param Port: Ctsdb的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: Ctsdb连接源的实例vip 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: Ctsdb连接源的vpcId 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: Ctsdb连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Ctsdb连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: Ctsdb连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str """ self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.Resource = None def _deserialize(self, params): self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CtsdbModifyConnectParam(AbstractModel): """Ctsdb连接源参数(更新) """ def __init__(self): r""" :param Port: Ctsdb的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: Ctsdb连接源的实例vip 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: Ctsdb连接源的vpcId 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: Ctsdb连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Ctsdb连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: Ctsdb连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str """ self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.Resource = None def _deserialize(self, params): self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class CtsdbParam(AbstractModel): """Ctsdb类型入参 """ def __init__(self): r""" :param Resource: 连接管理实例资源 :type Resource: str :param CtsdbMetric: Ctsdb的metric :type CtsdbMetric: str """ self.Resource = None self.CtsdbMetric = None def _deserialize(self, params): self.Resource = params.get("Resource") self.CtsdbMetric = params.get("CtsdbMetric") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DatahubResource(AbstractModel): """Datahub资源配置 """ def __init__(self): r""" :param Type: 资源类型 :type Type: str :param KafkaParam: ckafka配置,Type为KAFKA时必填 注意:此字段可能返回 null,表示取不到有效值。 :type KafkaParam: :class:`tencentcloud.ckafka.v20190819.models.KafkaParam` :param EventBusParam: EB配置,Type为EB时必填 注意:此字段可能返回 null,表示取不到有效值。 :type EventBusParam: :class:`tencentcloud.ckafka.v20190819.models.EventBusParam` :param MongoDBParam: MongoDB配置,Type为MONGODB时必填 注意:此字段可能返回 null,表示取不到有效值。 :type MongoDBParam: :class:`tencentcloud.ckafka.v20190819.models.MongoDBParam` :param EsParam: Es配置,Type为ES时必填 注意:此字段可能返回 null,表示取不到有效值。 :type EsParam: :class:`tencentcloud.ckafka.v20190819.models.EsParam` :param TdwParam: Tdw配置,Type为TDW时必填 注意:此字段可能返回 null,表示取不到有效值。 :type TdwParam: :class:`tencentcloud.ckafka.v20190819.models.TdwParam` :param DtsParam: Dts配置,Type为DTS时必填 注意:此字段可能返回 null,表示取不到有效值。 :type DtsParam: :class:`tencentcloud.ckafka.v20190819.models.DtsParam` :param ClickHouseParam: ClickHouse配置,Type为CLICKHOUSE时必填 注意:此字段可能返回 null,表示取不到有效值。 :type ClickHouseParam: :class:`tencentcloud.ckafka.v20190819.models.ClickHouseParam` :param ClsParam: Cls配置,Type为CLS时必填 注意:此字段可能返回 null,表示取不到有效值。 :type ClsParam: :class:`tencentcloud.ckafka.v20190819.models.ClsParam` :param CosParam: Cos配置,Type为COS时必填 注意:此字段可能返回 null,表示取不到有效值。 :type CosParam: :class:`tencentcloud.ckafka.v20190819.models.CosParam` :param MySQLParam: MySQL配置,Type为MYSQL时必填 注意:此字段可能返回 null,表示取不到有效值。 :type MySQLParam: :class:`tencentcloud.ckafka.v20190819.models.MySQLParam` :param PostgreSQLParam: PostgreSQL配置,Type为POSTGRESQL或TDSQL_C_POSTGRESQL时必填 注意:此字段可能返回 null,表示取不到有效值。 :type PostgreSQLParam: :class:`tencentcloud.ckafka.v20190819.models.PostgreSQLParam` :param TopicParam: Topic配置,Type为Topic时必填 注意:此字段可能返回 null,表示取不到有效值。 :type TopicParam: :class:`tencentcloud.ckafka.v20190819.models.TopicParam` :param MariaDBParam: MariaDB配置,Type为MARIADB时必填 注意:此字段可能返回 null,表示取不到有效值。 :type MariaDBParam: :class:`tencentcloud.ckafka.v20190819.models.MariaDBParam` :param SQLServerParam: SQLServer配置,Type为SQLSERVER时必填 注意:此字段可能返回 null,表示取不到有效值。 :type SQLServerParam: :class:`tencentcloud.ckafka.v20190819.models.SQLServerParam` :param CtsdbParam: Ctsdb配置,Type为CTSDB时必填 注意:此字段可能返回 null,表示取不到有效值。 :type CtsdbParam: :class:`tencentcloud.ckafka.v20190819.models.CtsdbParam` """ self.Type = None self.KafkaParam = None self.EventBusParam = None self.MongoDBParam = None self.EsParam = None self.TdwParam = None self.DtsParam = None self.ClickHouseParam = None self.ClsParam = None self.CosParam = None self.MySQLParam = None self.PostgreSQLParam = None self.TopicParam = None self.MariaDBParam = None self.SQLServerParam = None self.CtsdbParam = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("KafkaParam") is not None: self.KafkaParam = KafkaParam() self.KafkaParam._deserialize(params.get("KafkaParam")) if params.get("EventBusParam") is not None: self.EventBusParam = EventBusParam() self.EventBusParam._deserialize(params.get("EventBusParam")) if params.get("MongoDBParam") is not None: self.MongoDBParam = MongoDBParam() self.MongoDBParam._deserialize(params.get("MongoDBParam")) if params.get("EsParam") is not None: self.EsParam = EsParam() self.EsParam._deserialize(params.get("EsParam")) if params.get("TdwParam") is not None: self.TdwParam = TdwParam() self.TdwParam._deserialize(params.get("TdwParam")) if params.get("DtsParam") is not None: self.DtsParam = DtsParam() self.DtsParam._deserialize(params.get("DtsParam")) if params.get("ClickHouseParam") is not None: self.ClickHouseParam = ClickHouseParam() self.ClickHouseParam._deserialize(params.get("ClickHouseParam")) if params.get("ClsParam") is not None: self.ClsParam = ClsParam() self.ClsParam._deserialize(params.get("ClsParam")) if params.get("CosParam") is not None: self.CosParam = CosParam() self.CosParam._deserialize(params.get("CosParam")) if params.get("MySQLParam") is not None: self.MySQLParam = MySQLParam() self.MySQLParam._deserialize(params.get("MySQLParam")) if params.get("PostgreSQLParam") is not None: self.PostgreSQLParam = PostgreSQLParam() self.PostgreSQLParam._deserialize(params.get("PostgreSQLParam")) if params.get("TopicParam") is not None: self.TopicParam = TopicParam() self.TopicParam._deserialize(params.get("TopicParam")) if params.get("MariaDBParam") is not None: self.MariaDBParam = MariaDBParam() self.MariaDBParam._deserialize(params.get("MariaDBParam")) if params.get("SQLServerParam") is not None: self.SQLServerParam = SQLServerParam() self.SQLServerParam._deserialize(params.get("SQLServerParam")) if params.get("CtsdbParam") is not None: self.CtsdbParam = CtsdbParam() self.CtsdbParam._deserialize(params.get("CtsdbParam")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DatahubTaskIdRes(AbstractModel): """Datahub请求的taskid """ def __init__(self): r""" :param TaskId: 任务id 注意:此字段可能返回 null,表示取不到有效值。 :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DatahubTaskInfo(AbstractModel): """Datahub任务信息 """ def __init__(self): r""" :param TaskId: 任务ID :type TaskId: str :param TaskName: 任务名称 :type TaskName: str :param TaskType: 任务类型,SOURCE数据接入,SINK数据流出 :type TaskType: str :param Status: 状态,-1创建失败,0创建中,1运行中,2删除中,3已删除,4删除失败,5暂停中,6已暂停,7暂停失败,8恢复中,9恢复失败 :type Status: int :param SourceResource: 数据源 注意:此字段可能返回 null,表示取不到有效值。 :type SourceResource: :class:`tencentcloud.ckafka.v20190819.models.DatahubResource` :param TargetResource: 数据目标 注意:此字段可能返回 null,表示取不到有效值。 :type TargetResource: :class:`tencentcloud.ckafka.v20190819.models.DatahubResource` :param CreateTime: 任务创建时间 注意:此字段可能返回 null,表示取不到有效值。 :type CreateTime: str :param ErrorMessage: 异常信息 注意:此字段可能返回 null,表示取不到有效值。 :type ErrorMessage: str :param TaskProgress: 创建进度百分比 注意:此字段可能返回 null,表示取不到有效值。 :type TaskProgress: float :param TaskCurrentStep: 任务当前处于的步骤 注意:此字段可能返回 null,表示取不到有效值。 :type TaskCurrentStep: str :param DatahubId: Datahub转储Id 注意:此字段可能返回 null,表示取不到有效值。 :type DatahubId: str :param StepList: 步骤列表 注意:此字段可能返回 null,表示取不到有效值。 :type StepList: list of str """ self.TaskId = None self.TaskName = None self.TaskType = None self.Status = None self.SourceResource = None self.TargetResource = None self.CreateTime = None self.ErrorMessage = None self.TaskProgress = None self.TaskCurrentStep = None self.DatahubId = None self.StepList = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.TaskName = params.get("TaskName") self.TaskType = params.get("TaskType") self.Status = params.get("Status") if params.get("SourceResource") is not None: self.SourceResource = DatahubResource() self.SourceResource._deserialize(params.get("SourceResource")) if params.get("TargetResource") is not None: self.TargetResource = DatahubResource() self.TargetResource._deserialize(params.get("TargetResource")) self.CreateTime = params.get("CreateTime") self.ErrorMessage = params.get("ErrorMessage") self.TaskProgress = params.get("TaskProgress") self.TaskCurrentStep = params.get("TaskCurrentStep") self.DatahubId = params.get("DatahubId") self.StepList = params.get("StepList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DateParam(AbstractModel): """数据处理——Value处理参数——转换时间格式参数 """ def __init__(self): r""" :param Format: 时间格式 :type Format: str :param TargetType: 输入类型,string,unix时间戳,默认string 注意:此字段可能返回 null,表示取不到有效值。 :type TargetType: str :param TimeZone: 时区,默认GMT+8 注意:此字段可能返回 null,表示取不到有效值。 :type TimeZone: str """ self.Format = None self.TargetType = None self.TimeZone = None def _deserialize(self, params): self.Format = params.get("Format") self.TargetType = params.get("TargetType") self.TimeZone = params.get("TimeZone") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteAclRequest(AbstractModel): """DeleteAcl请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id信息 :type InstanceId: str :param ResourceType: Acl资源类型,(2:TOPIC,3:GROUP,4:CLUSTER) :type ResourceType: int :param ResourceName: 资源名称,和resourceType相关,如当resourceType为TOPIC时,则该字段表示topic名称,当resourceType为GROUP时,该字段表示group名称,当resourceType为CLUSTER时,该字段可为空。 :type ResourceName: str :param Operation: Acl操作方式,(2:ALL,3:READ,4:WRITE,5:CREATE,6:DELETE,7:ALTER,8:DESCRIBE,9:CLUSTER_ACTION,10:DESCRIBE_CONFIGS,11:ALTER_CONFIGS,12:IDEMPOTENT_WRITE) :type Operation: int :param PermissionType: 权限类型,(2:DENY,3:ALLOW),当前ckakfa支持ALLOW(相当于白名单),其它用于后续兼容开源kafka的acl时使用 :type PermissionType: int :param Host: 默认为\*,表示任何host都可以访问,当前ckafka不支持host为\*,但是后面开源kafka的产品化会直接支持 :type Host: str :param Principal: 用户列表,默认为*,表示任何user都可以访问,当前用户只能是用户列表中包含的用户 :type Principal: str """ self.InstanceId = None self.ResourceType = None self.ResourceName = None self.Operation = None self.PermissionType = None self.Host = None self.Principal = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.ResourceType = params.get("ResourceType") self.ResourceName = params.get("ResourceName") self.Operation = params.get("Operation") self.PermissionType = params.get("PermissionType") self.Host = params.get("Host") self.Principal = params.get("Principal") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteAclResponse(AbstractModel): """DeleteAcl返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteAclRuleRequest(AbstractModel): """DeleteAclRule请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id信息 :type InstanceId: str :param RuleName: acl规则名称 :type RuleName: str """ self.InstanceId = None self.RuleName = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.RuleName = params.get("RuleName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteAclRuleResponse(AbstractModel): """DeleteAclRule返回参数结构体 """ def __init__(self): r""" :param Result: 返回被删除的规则的ID :type Result: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): self.Result = params.get("Result") self.RequestId = params.get("RequestId") class DeleteConnectResourceRequest(AbstractModel): """DeleteConnectResource请求参数结构体 """ def __init__(self): r""" :param ResourceId: 连接源的Id :type ResourceId: str """ self.ResourceId = None def _deserialize(self, params): self.ResourceId = params.get("ResourceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteConnectResourceResponse(AbstractModel): """DeleteConnectResource返回参数结构体 """ def __init__(self): r""" :param Result: 连接源的Id :type Result: :class:`tencentcloud.ckafka.v20190819.models.ConnectResourceResourceIdResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ConnectResourceResourceIdResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteDatahubTaskRequest(AbstractModel): """DeleteDatahubTask请求参数结构体 """ def __init__(self): r""" :param TaskId: 任务id :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteDatahubTaskResponse(AbstractModel): """DeleteDatahubTask返回参数结构体 """ def __init__(self): r""" :param Result: 任务id 注意:此字段可能返回 null,表示取不到有效值。 :type Result: :class:`tencentcloud.ckafka.v20190819.models.DatahubTaskIdRes` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = DatahubTaskIdRes() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteDatahubTopicRequest(AbstractModel): """DeleteDatahubTopic请求参数结构体 """ def __init__(self): r""" :param Name: Topic名称 :type Name: str """ self.Name = None def _deserialize(self, params): self.Name = params.get("Name") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteDatahubTopicResponse(AbstractModel): """DeleteDatahubTopic返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果集 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteGroupRequest(AbstractModel): """DeleteGroup请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param Group: 消费分组 :type Group: str """ self.InstanceId = None self.Group = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Group = params.get("Group") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteGroupResponse(AbstractModel): """DeleteGroup返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteInstancePreRequest(AbstractModel): """DeleteInstancePre请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str """ self.InstanceId = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteInstancePreResponse(AbstractModel): """DeleteInstancePre返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.CreateInstancePreResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = CreateInstancePreResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteRouteRequest(AbstractModel): """DeleteRoute请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例唯一id :type InstanceId: str :param RouteId: 路由id :type RouteId: int :param CallerAppid: 调用方appId :type CallerAppid: int :param DeleteRouteTime: 删除路由时间 :type DeleteRouteTime: str """ self.InstanceId = None self.RouteId = None self.CallerAppid = None self.DeleteRouteTime = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.RouteId = params.get("RouteId") self.CallerAppid = params.get("CallerAppid") self.DeleteRouteTime = params.get("DeleteRouteTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteRouteResponse(AbstractModel): """DeleteRoute返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteRouteTriggerTimeRequest(AbstractModel): """DeleteRouteTriggerTime请求参数结构体 """ def __init__(self): r""" :param DelayTime: 修改时间 :type DelayTime: str """ self.DelayTime = None def _deserialize(self, params): self.DelayTime = params.get("DelayTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteRouteTriggerTimeResponse(AbstractModel): """DeleteRouteTriggerTime返回参数结构体 """ def __init__(self): r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.RequestId = None def _deserialize(self, params): self.RequestId = params.get("RequestId") class DeleteTopicIpWhiteListRequest(AbstractModel): """DeleteTopicIpWhiteList请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param TopicName: 主题名称 :type TopicName: str :param IpWhiteList: ip白名单列表 :type IpWhiteList: list of str """ self.InstanceId = None self.TopicName = None self.IpWhiteList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.IpWhiteList = params.get("IpWhiteList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteTopicIpWhiteListResponse(AbstractModel): """DeleteTopicIpWhiteList返回参数结构体 """ def __init__(self): r""" :param Result: 删除主题IP白名单结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteTopicRequest(AbstractModel): """DeleteTopic请求参数结构体 """ def __init__(self): r""" :param InstanceId: ckafka 实例Id :type InstanceId: str :param TopicName: ckafka 主题名称 :type TopicName: str """ self.InstanceId = None self.TopicName = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteTopicResponse(AbstractModel): """DeleteTopic返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果集 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DeleteUserRequest(AbstractModel): """DeleteUser请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param Name: 用户名称 :type Name: str """ self.InstanceId = None self.Name = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Name = params.get("Name") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DeleteUserResponse(AbstractModel): """DeleteUser返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeACLRequest(AbstractModel): """DescribeACL请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param ResourceType: Acl资源类型,(2:TOPIC,3:GROUP,4:CLUSTER) :type ResourceType: int :param ResourceName: 资源名称,和resourceType相关,如当resourceType为TOPIC时,则该字段表示topic名称,当resourceType为GROUP时,该字段表示group名称,当resourceType为CLUSTER时,该字段可为空。 :type ResourceName: str :param Offset: 偏移位置 :type Offset: int :param Limit: 个数限制 :type Limit: int :param SearchWord: 关键字匹配 :type SearchWord: str """ self.InstanceId = None self.ResourceType = None self.ResourceName = None self.Offset = None self.Limit = None self.SearchWord = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.ResourceType = params.get("ResourceType") self.ResourceName = params.get("ResourceName") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.SearchWord = params.get("SearchWord") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeACLResponse(AbstractModel): """DescribeACL返回参数结构体 """ def __init__(self): r""" :param Result: 返回的ACL结果集对象 :type Result: :class:`tencentcloud.ckafka.v20190819.models.AclResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = AclResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeAppInfoRequest(AbstractModel): """DescribeAppInfo请求参数结构体 """ def __init__(self): r""" :param Offset: 偏移位置 :type Offset: int :param Limit: 本次查询用户数目最大数量限制,最大值为50,默认50 :type Limit: int """ self.Offset = None self.Limit = None def _deserialize(self, params): self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeAppInfoResponse(AbstractModel): """DescribeAppInfo返回参数结构体 """ def __init__(self): r""" :param Result: 返回的符合要求的App Id列表 :type Result: :class:`tencentcloud.ckafka.v20190819.models.AppIdResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = AppIdResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeCkafkaZoneRequest(AbstractModel): """DescribeCkafkaZone请求参数结构体 """ def __init__(self): r""" :param CdcId: cdc专业集群业务参数 :type CdcId: str """ self.CdcId = None def _deserialize(self, params): self.CdcId = params.get("CdcId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeCkafkaZoneResponse(AbstractModel): """DescribeCkafkaZone返回参数结构体 """ def __init__(self): r""" :param Result: 查询结果复杂对象实体 :type Result: :class:`tencentcloud.ckafka.v20190819.models.ZoneResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ZoneResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeConnectResource(AbstractModel): """查询连接源具体数据的返参 """ def __init__(self): r""" :param ResourceId: 连接源的Id 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceId: str :param ResourceName: 连接源名称 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceName: str :param Description: 连接源描述 注意:此字段可能返回 null,表示取不到有效值。 :type Description: str :param Type: 连接源类型 注意:此字段可能返回 null,表示取不到有效值。 :type Type: str :param Status: 连接源的状态 注意:此字段可能返回 null,表示取不到有效值。 :type Status: int :param CreateTime: 连接源的创建时间 注意:此字段可能返回 null,表示取不到有效值。 :type CreateTime: str :param ErrorMessage: 连接源的异常信息 注意:此字段可能返回 null,表示取不到有效值。 :type ErrorMessage: str :param CurrentStep: 连接源的当前所处步骤 注意:此字段可能返回 null,表示取不到有效值。 :type CurrentStep: str :param DatahubTaskCount: 该连接源关联的Datahub任务数 注意:此字段可能返回 null,表示取不到有效值。 :type DatahubTaskCount: int :param DtsConnectParam: Dts配置,Type为DTS时返回 注意:此字段可能返回 null,表示取不到有效值。 :type DtsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DtsConnectParam` :param MongoDBConnectParam: MongoDB配置,Type为MONGODB时返回 注意:此字段可能返回 null,表示取不到有效值。 :type MongoDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MongoDBConnectParam` :param EsConnectParam: Es配置,Type为ES时返回 注意:此字段可能返回 null,表示取不到有效值。 :type EsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.EsConnectParam` :param ClickHouseConnectParam: ClickHouse配置,Type为CLICKHOUSE时返回 注意:此字段可能返回 null,表示取不到有效值。 :type ClickHouseConnectParam: :class:`tencentcloud.ckafka.v20190819.models.ClickHouseConnectParam` :param MySQLConnectParam: MySQL配置,Type为MYSQL或TDSQL_C_MYSQL时返回 注意:此字段可能返回 null,表示取不到有效值。 :type MySQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MySQLConnectParam` :param PostgreSQLConnectParam: PostgreSQL配置,Type为POSTGRESQL或TDSQL_C_POSTGRESQL时返回 注意:此字段可能返回 null,表示取不到有效值。 :type PostgreSQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.PostgreSQLConnectParam` :param MariaDBConnectParam: MariaDB配置,Type为MARIADB时返回 注意:此字段可能返回 null,表示取不到有效值。 :type MariaDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MariaDBConnectParam` :param SQLServerConnectParam: SQLServer配置,Type为SQLSERVER时返回 注意:此字段可能返回 null,表示取不到有效值。 :type SQLServerConnectParam: :class:`tencentcloud.ckafka.v20190819.models.SQLServerConnectParam` :param CtsdbConnectParam: Ctsdb配置,Type为CTSDB时返回 注意:此字段可能返回 null,表示取不到有效值。 :type CtsdbConnectParam: :class:`tencentcloud.ckafka.v20190819.models.CtsdbConnectParam` :param DorisConnectParam: Doris 配置,Type 为 DORIS 时返回 注意:此字段可能返回 null,表示取不到有效值。 :type DorisConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DorisConnectParam` """ self.ResourceId = None self.ResourceName = None self.Description = None self.Type = None self.Status = None self.CreateTime = None self.ErrorMessage = None self.CurrentStep = None self.DatahubTaskCount = None self.DtsConnectParam = None self.MongoDBConnectParam = None self.EsConnectParam = None self.ClickHouseConnectParam = None self.MySQLConnectParam = None self.PostgreSQLConnectParam = None self.MariaDBConnectParam = None self.SQLServerConnectParam = None self.CtsdbConnectParam = None self.DorisConnectParam = None def _deserialize(self, params): self.ResourceId = params.get("ResourceId") self.ResourceName = params.get("ResourceName") self.Description = params.get("Description") self.Type = params.get("Type") self.Status = params.get("Status") self.CreateTime = params.get("CreateTime") self.ErrorMessage = params.get("ErrorMessage") self.CurrentStep = params.get("CurrentStep") self.DatahubTaskCount = params.get("DatahubTaskCount") if params.get("DtsConnectParam") is not None: self.DtsConnectParam = DtsConnectParam() self.DtsConnectParam._deserialize(params.get("DtsConnectParam")) if params.get("MongoDBConnectParam") is not None: self.MongoDBConnectParam = MongoDBConnectParam() self.MongoDBConnectParam._deserialize(params.get("MongoDBConnectParam")) if params.get("EsConnectParam") is not None: self.EsConnectParam = EsConnectParam() self.EsConnectParam._deserialize(params.get("EsConnectParam")) if params.get("ClickHouseConnectParam") is not None: self.ClickHouseConnectParam = ClickHouseConnectParam() self.ClickHouseConnectParam._deserialize(params.get("ClickHouseConnectParam")) if params.get("MySQLConnectParam") is not None: self.MySQLConnectParam = MySQLConnectParam() self.MySQLConnectParam._deserialize(params.get("MySQLConnectParam")) if params.get("PostgreSQLConnectParam") is not None: self.PostgreSQLConnectParam = PostgreSQLConnectParam() self.PostgreSQLConnectParam._deserialize(params.get("PostgreSQLConnectParam")) if params.get("MariaDBConnectParam") is not None: self.MariaDBConnectParam = MariaDBConnectParam() self.MariaDBConnectParam._deserialize(params.get("MariaDBConnectParam")) if params.get("SQLServerConnectParam") is not None: self.SQLServerConnectParam = SQLServerConnectParam() self.SQLServerConnectParam._deserialize(params.get("SQLServerConnectParam")) if params.get("CtsdbConnectParam") is not None: self.CtsdbConnectParam = CtsdbConnectParam() self.CtsdbConnectParam._deserialize(params.get("CtsdbConnectParam")) if params.get("DorisConnectParam") is not None: self.DorisConnectParam = DorisConnectParam() self.DorisConnectParam._deserialize(params.get("DorisConnectParam")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeConnectResourceRequest(AbstractModel): """DescribeConnectResource请求参数结构体 """ def __init__(self): r""" :param ResourceId: 连接源的Id :type ResourceId: str """ self.ResourceId = None def _deserialize(self, params): self.ResourceId = params.get("ResourceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeConnectResourceResp(AbstractModel): """查询连接源具体数据的返参 """ def __init__(self): r""" :param ResourceId: 连接源的Id 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceId: str :param ResourceName: 连接源名称 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceName: str :param Description: 连接源描述 注意:此字段可能返回 null,表示取不到有效值。 :type Description: str :param Type: 连接源类型 注意:此字段可能返回 null,表示取不到有效值。 :type Type: str :param Status: 连接源的状态 注意:此字段可能返回 null,表示取不到有效值。 :type Status: int :param CreateTime: 连接源的创建时间 注意:此字段可能返回 null,表示取不到有效值。 :type CreateTime: str :param ErrorMessage: 连接源的异常信息 注意:此字段可能返回 null,表示取不到有效值。 :type ErrorMessage: str :param CurrentStep: 连接源的当前所处步骤 注意:此字段可能返回 null,表示取不到有效值。 :type CurrentStep: str :param StepList: 步骤列表 注意:此字段可能返回 null,表示取不到有效值。 :type StepList: list of str :param MySQLConnectParam: MySQL配置,Type为MYSQL或TDSQL_C_MYSQL时返回 注意:此字段可能返回 null,表示取不到有效值。 :type MySQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MySQLConnectParam` :param PostgreSQLConnectParam: PostgreSQL配置,Type为POSTGRESQL或TDSQL_C_POSTGRESQL时返回 注意:此字段可能返回 null,表示取不到有效值。 :type PostgreSQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.PostgreSQLConnectParam` :param DtsConnectParam: Dts配置,Type为DTS时返回 注意:此字段可能返回 null,表示取不到有效值。 :type DtsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DtsConnectParam` :param MongoDBConnectParam: MongoDB配置,Type为MONGODB时返回 注意:此字段可能返回 null,表示取不到有效值。 :type MongoDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MongoDBConnectParam` :param EsConnectParam: Es配置,Type为ES时返回 注意:此字段可能返回 null,表示取不到有效值。 :type EsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.EsConnectParam` :param ClickHouseConnectParam: ClickHouse配置,Type为CLICKHOUSE时返回 注意:此字段可能返回 null,表示取不到有效值。 :type ClickHouseConnectParam: :class:`tencentcloud.ckafka.v20190819.models.ClickHouseConnectParam` :param MariaDBConnectParam: MariaDB配置,Type为MARIADB时返回 注意:此字段可能返回 null,表示取不到有效值。 :type MariaDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MariaDBConnectParam` :param SQLServerConnectParam: SQLServer配置,Type为SQLSERVER时返回 注意:此字段可能返回 null,表示取不到有效值。 :type SQLServerConnectParam: :class:`tencentcloud.ckafka.v20190819.models.SQLServerConnectParam` :param CtsdbConnectParam: Ctsdb配置,Type为CTSDB时返回 注意:此字段可能返回 null,表示取不到有效值。 :type CtsdbConnectParam: :class:`tencentcloud.ckafka.v20190819.models.CtsdbConnectParam` :param DorisConnectParam: Doris 配置,Type 为 DORIS 时返回 注意:此字段可能返回 null,表示取不到有效值。 :type DorisConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DorisConnectParam` """ self.ResourceId = None self.ResourceName = None self.Description = None self.Type = None self.Status = None self.CreateTime = None self.ErrorMessage = None self.CurrentStep = None self.StepList = None self.MySQLConnectParam = None self.PostgreSQLConnectParam = None self.DtsConnectParam = None self.MongoDBConnectParam = None self.EsConnectParam = None self.ClickHouseConnectParam = None self.MariaDBConnectParam = None self.SQLServerConnectParam = None self.CtsdbConnectParam = None self.DorisConnectParam = None def _deserialize(self, params): self.ResourceId = params.get("ResourceId") self.ResourceName = params.get("ResourceName") self.Description = params.get("Description") self.Type = params.get("Type") self.Status = params.get("Status") self.CreateTime = params.get("CreateTime") self.ErrorMessage = params.get("ErrorMessage") self.CurrentStep = params.get("CurrentStep") self.StepList = params.get("StepList") if params.get("MySQLConnectParam") is not None: self.MySQLConnectParam = MySQLConnectParam() self.MySQLConnectParam._deserialize(params.get("MySQLConnectParam")) if params.get("PostgreSQLConnectParam") is not None: self.PostgreSQLConnectParam = PostgreSQLConnectParam() self.PostgreSQLConnectParam._deserialize(params.get("PostgreSQLConnectParam")) if params.get("DtsConnectParam") is not None: self.DtsConnectParam = DtsConnectParam() self.DtsConnectParam._deserialize(params.get("DtsConnectParam")) if params.get("MongoDBConnectParam") is not None: self.MongoDBConnectParam = MongoDBConnectParam() self.MongoDBConnectParam._deserialize(params.get("MongoDBConnectParam")) if params.get("EsConnectParam") is not None: self.EsConnectParam = EsConnectParam() self.EsConnectParam._deserialize(params.get("EsConnectParam")) if params.get("ClickHouseConnectParam") is not None: self.ClickHouseConnectParam = ClickHouseConnectParam() self.ClickHouseConnectParam._deserialize(params.get("ClickHouseConnectParam")) if params.get("MariaDBConnectParam") is not None: self.MariaDBConnectParam = MariaDBConnectParam() self.MariaDBConnectParam._deserialize(params.get("MariaDBConnectParam")) if params.get("SQLServerConnectParam") is not None: self.SQLServerConnectParam = SQLServerConnectParam() self.SQLServerConnectParam._deserialize(params.get("SQLServerConnectParam")) if params.get("CtsdbConnectParam") is not None: self.CtsdbConnectParam = CtsdbConnectParam() self.CtsdbConnectParam._deserialize(params.get("CtsdbConnectParam")) if params.get("DorisConnectParam") is not None: self.DorisConnectParam = DorisConnectParam() self.DorisConnectParam._deserialize(params.get("DorisConnectParam")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeConnectResourceResponse(AbstractModel): """DescribeConnectResource返回参数结构体 """ def __init__(self): r""" :param Result: 连接源的Id 注意:此字段可能返回 null,表示取不到有效值。 :type Result: :class:`tencentcloud.ckafka.v20190819.models.DescribeConnectResourceResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = DescribeConnectResourceResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeConnectResourcesRequest(AbstractModel): """DescribeConnectResources请求参数结构体 """ def __init__(self): r""" :param Type: 连接源类型 :type Type: str :param SearchWord: 连接源名称的关键字查询 :type SearchWord: str :param Offset: 分页偏移量,默认为0 :type Offset: int :param Limit: 返回数量,默认为20,最大值为100 :type Limit: int """ self.Type = None self.SearchWord = None self.Offset = None self.Limit = None def _deserialize(self, params): self.Type = params.get("Type") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeConnectResourcesResp(AbstractModel): """查询连接源列表的返参 """ def __init__(self): r""" :param TotalCount: 连接源个数 :type TotalCount: int :param ConnectResourceList: 连接源数据 注意:此字段可能返回 null,表示取不到有效值。 :type ConnectResourceList: list of DescribeConnectResource """ self.TotalCount = None self.ConnectResourceList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("ConnectResourceList") is not None: self.ConnectResourceList = [] for item in params.get("ConnectResourceList"): obj = DescribeConnectResource() obj._deserialize(item) self.ConnectResourceList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeConnectResourcesResponse(AbstractModel): """DescribeConnectResources返回参数结构体 """ def __init__(self): r""" :param Result: 连接源列表 :type Result: :class:`tencentcloud.ckafka.v20190819.models.DescribeConnectResourcesResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = DescribeConnectResourcesResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeConsumerGroupRequest(AbstractModel): """DescribeConsumerGroup请求参数结构体 """ def __init__(self): r""" :param InstanceId: ckafka实例id。 :type InstanceId: str :param GroupName: 可选,用户需要查询的group名称。 :type GroupName: str :param TopicName: 可选,用户需要查询的group中的对应的topic名称,如果指定了该参数,而group又未指定则忽略该参数。 :type TopicName: str :param Limit: 本次返回个数限制 :type Limit: int :param Offset: 偏移位置 :type Offset: int """ self.InstanceId = None self.GroupName = None self.TopicName = None self.Limit = None self.Offset = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.GroupName = params.get("GroupName") self.TopicName = params.get("TopicName") self.Limit = params.get("Limit") self.Offset = params.get("Offset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeConsumerGroupResponse(AbstractModel): """DescribeConsumerGroup返回参数结构体 """ def __init__(self): r""" :param Result: 返回的消费分组信息 :type Result: :class:`tencentcloud.ckafka.v20190819.models.ConsumerGroupResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ConsumerGroupResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeDatahubGroupOffsetsRequest(AbstractModel): """DescribeDatahubGroupOffsets请求参数结构体 """ def __init__(self): r""" :param Name: (过滤条件)按照实例 ID 过滤 :type Name: str :param Group: Kafka 消费分组 :type Group: str :param SearchWord: 模糊匹配 topicName :type SearchWord: str :param Offset: 本次查询的偏移位置,默认为0 :type Offset: int :param Limit: 本次返回结果的最大个数,默认为50,最大值为50 :type Limit: int """ self.Name = None self.Group = None self.SearchWord = None self.Offset = None self.Limit = None def _deserialize(self, params): self.Name = params.get("Name") self.Group = params.get("Group") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDatahubGroupOffsetsResponse(AbstractModel): """DescribeDatahubGroupOffsets返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果对象 :type Result: :class:`tencentcloud.ckafka.v20190819.models.GroupOffsetResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = GroupOffsetResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeDatahubTaskRequest(AbstractModel): """DescribeDatahubTask请求参数结构体 """ def __init__(self): r""" :param TaskId: 任务id :type TaskId: str """ self.TaskId = None def _deserialize(self, params): self.TaskId = params.get("TaskId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDatahubTaskRes(AbstractModel): """查询Datahub任务信息 """ def __init__(self): r""" :param TaskId: 任务ID :type TaskId: str :param TaskName: 任务名称 :type TaskName: str :param TaskType: 任务类型,SOURCE数据接入,SINK数据流出 :type TaskType: str :param Status: 状态,-1创建失败,0创建中,1运行中,2删除中,3已删除,4删除失败,5暂停中,6已暂停,7暂停失败,8恢复中,9恢复失败 :type Status: int :param SourceResource: 数据源 注意:此字段可能返回 null,表示取不到有效值。 :type SourceResource: :class:`tencentcloud.ckafka.v20190819.models.DatahubResource` :param TargetResource: 数据目标 注意:此字段可能返回 null,表示取不到有效值。 :type TargetResource: :class:`tencentcloud.ckafka.v20190819.models.DatahubResource` :param Connections: Connection列表 注意:此字段可能返回 null,表示取不到有效值。 :type Connections: list of Connection :param CreateTime: 任务创建时间 注意:此字段可能返回 null,表示取不到有效值。 :type CreateTime: str :param TransformParam: 消息处理规则 注意:此字段可能返回 null,表示取不到有效值。 :type TransformParam: :class:`tencentcloud.ckafka.v20190819.models.TransformParam` :param DatahubId: 数据接入ID 注意:此字段可能返回 null,表示取不到有效值。 :type DatahubId: str :param SchemaId: 绑定的SchemaId 注意:此字段可能返回 null,表示取不到有效值。 :type SchemaId: str :param SchemaName: 绑定的Schema名称 注意:此字段可能返回 null,表示取不到有效值。 :type SchemaName: str :param TransformsParam: 数据处理规则 注意:此字段可能返回 null,表示取不到有效值。 :type TransformsParam: :class:`tencentcloud.ckafka.v20190819.models.TransformsParam` :param ErrorMessage: 异常信息 注意:此字段可能返回 null,表示取不到有效值。 :type ErrorMessage: str :param Tags: 任务标签列表 注意:此字段可能返回 null,表示取不到有效值。 :type Tags: list of Tag """ self.TaskId = None self.TaskName = None self.TaskType = None self.Status = None self.SourceResource = None self.TargetResource = None self.Connections = None self.CreateTime = None self.TransformParam = None self.DatahubId = None self.SchemaId = None self.SchemaName = None self.TransformsParam = None self.ErrorMessage = None self.Tags = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.TaskName = params.get("TaskName") self.TaskType = params.get("TaskType") self.Status = params.get("Status") if params.get("SourceResource") is not None: self.SourceResource = DatahubResource() self.SourceResource._deserialize(params.get("SourceResource")) if params.get("TargetResource") is not None: self.TargetResource = DatahubResource() self.TargetResource._deserialize(params.get("TargetResource")) if params.get("Connections") is not None: self.Connections = [] for item in params.get("Connections"): obj = Connection() obj._deserialize(item) self.Connections.append(obj) self.CreateTime = params.get("CreateTime") if params.get("TransformParam") is not None: self.TransformParam = TransformParam() self.TransformParam._deserialize(params.get("TransformParam")) self.DatahubId = params.get("DatahubId") self.SchemaId = params.get("SchemaId") self.SchemaName = params.get("SchemaName") if params.get("TransformsParam") is not None: self.TransformsParam = TransformsParam() self.TransformsParam._deserialize(params.get("TransformsParam")) self.ErrorMessage = params.get("ErrorMessage") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDatahubTaskResponse(AbstractModel): """DescribeDatahubTask返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.DescribeDatahubTaskRes` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = DescribeDatahubTaskRes() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeDatahubTasksRequest(AbstractModel): """DescribeDatahubTasks请求参数结构体 """ def __init__(self): r""" :param Limit: 返回数量,默认为20,最大值为100 :type Limit: int :param Offset: 分页偏移量,默认为0 :type Offset: int :param SearchWord: 过滤条件,按照 TaskName 过滤,支持模糊查询 :type SearchWord: str :param TargetType: 转储的目标类型 :type TargetType: str :param TaskType: 任务类型,SOURCE数据接入,SINK数据流出 :type TaskType: str :param SourceType: 转储的源类型 :type SourceType: str :param Resource: 转储的资源 :type Resource: str """ self.Limit = None self.Offset = None self.SearchWord = None self.TargetType = None self.TaskType = None self.SourceType = None self.Resource = None def _deserialize(self, params): self.Limit = params.get("Limit") self.Offset = params.get("Offset") self.SearchWord = params.get("SearchWord") self.TargetType = params.get("TargetType") self.TaskType = params.get("TaskType") self.SourceType = params.get("SourceType") self.Resource = params.get("Resource") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDatahubTasksRes(AbstractModel): """查询Datahub任务列表 """ def __init__(self): r""" :param TotalCount: 任务总数 :type TotalCount: int :param TaskList: Datahub任务信息列表 注意:此字段可能返回 null,表示取不到有效值。 :type TaskList: list of DatahubTaskInfo """ self.TotalCount = None self.TaskList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("TaskList") is not None: self.TaskList = [] for item in params.get("TaskList"): obj = DatahubTaskInfo() obj._deserialize(item) self.TaskList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeDatahubTasksResponse(AbstractModel): """DescribeDatahubTasks返回参数结构体 """ def __init__(self): r""" :param Result: 返回任务查询结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.DescribeDatahubTasksRes` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = DescribeDatahubTasksRes() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeGroup(AbstractModel): """DescribeGroup返回实体 """ def __init__(self): r""" :param Group: groupId :type Group: str :param Protocol: 该 group 使用的协议。 :type Protocol: str """ self.Group = None self.Protocol = None def _deserialize(self, params): self.Group = params.get("Group") self.Protocol = params.get("Protocol") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeGroupInfoRequest(AbstractModel): """DescribeGroupInfo请求参数结构体 """ def __init__(self): r""" :param InstanceId: (过滤条件)按照实例 ID 过滤。 :type InstanceId: str :param GroupList: Kafka 消费分组,Consumer-group,这里是数组形式,格式:GroupList.0=xxx&GroupList.1=yyy。 :type GroupList: list of str """ self.InstanceId = None self.GroupList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.GroupList = params.get("GroupList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeGroupInfoResponse(AbstractModel): """DescribeGroupInfo返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果 注意:此字段可能返回 null,表示取不到有效值。 :type Result: list of GroupInfoResponse :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = [] for item in params.get("Result"): obj = GroupInfoResponse() obj._deserialize(item) self.Result.append(obj) self.RequestId = params.get("RequestId") class DescribeGroupOffsetsRequest(AbstractModel): """DescribeGroupOffsets请求参数结构体 """ def __init__(self): r""" :param InstanceId: (过滤条件)按照实例 ID 过滤 :type InstanceId: str :param Group: Kafka 消费分组 :type Group: str :param Topics: group 订阅的主题名称数组,如果没有该数组,则表示指定的 group 下所有 topic 信息 :type Topics: list of str :param SearchWord: 模糊匹配 topicName :type SearchWord: str :param Offset: 本次查询的偏移位置,默认为0 :type Offset: int :param Limit: 本次返回结果的最大个数,默认为50,最大值为50 :type Limit: int """ self.InstanceId = None self.Group = None self.Topics = None self.SearchWord = None self.Offset = None self.Limit = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Group = params.get("Group") self.Topics = params.get("Topics") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeGroupOffsetsResponse(AbstractModel): """DescribeGroupOffsets返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果对象 :type Result: :class:`tencentcloud.ckafka.v20190819.models.GroupOffsetResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = GroupOffsetResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeGroupRequest(AbstractModel): """DescribeGroup请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param SearchWord: 搜索关键字 :type SearchWord: str :param Offset: 偏移量 :type Offset: int :param Limit: 最大返回数量 :type Limit: int """ self.InstanceId = None self.SearchWord = None self.Offset = None self.Limit = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeGroupResponse(AbstractModel): """DescribeGroup返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果集列表 :type Result: :class:`tencentcloud.ckafka.v20190819.models.GroupResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = GroupResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeInstanceAttributesRequest(AbstractModel): """DescribeInstanceAttributes请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str """ self.InstanceId = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeInstanceAttributesResponse(AbstractModel): """DescribeInstanceAttributes返回参数结构体 """ def __init__(self): r""" :param Result: 实例属性返回结果对象。 :type Result: :class:`tencentcloud.ckafka.v20190819.models.InstanceAttributesResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = InstanceAttributesResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeInstancesDetailRequest(AbstractModel): """DescribeInstancesDetail请求参数结构体 """ def __init__(self): r""" :param InstanceId: (过滤条件)按照实例ID过滤 :type InstanceId: str :param SearchWord: (过滤条件)按照实例名称过滤,支持模糊查询 :type SearchWord: str :param Status: (过滤条件)实例的状态。0:创建中,1:运行中,2:删除中,不填默认返回全部 :type Status: list of int :param Offset: 偏移量,不填默认为0。 :type Offset: int :param Limit: 返回数量,不填则默认10,最大值20。 :type Limit: int :param TagKey: 匹配标签key值。 :type TagKey: str :param Filters: 过滤器。filter.Name 支持('Ip', 'VpcId', 'SubNetId', 'InstanceType','InstanceId') ,filter.Values最多传递10个值. :type Filters: list of Filter :param InstanceIds: 已经废弃, 使用InstanceIdList :type InstanceIds: str :param InstanceIdList: 按照实例ID过滤 :type InstanceIdList: list of str """ self.InstanceId = None self.SearchWord = None self.Status = None self.Offset = None self.Limit = None self.TagKey = None self.Filters = None self.InstanceIds = None self.InstanceIdList = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SearchWord = params.get("SearchWord") self.Status = params.get("Status") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.TagKey = params.get("TagKey") if params.get("Filters") is not None: self.Filters = [] for item in params.get("Filters"): obj = Filter() obj._deserialize(item) self.Filters.append(obj) self.InstanceIds = params.get("InstanceIds") self.InstanceIdList = params.get("InstanceIdList") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeInstancesDetailResponse(AbstractModel): """DescribeInstancesDetail返回参数结构体 """ def __init__(self): r""" :param Result: 返回的实例详情结果对象 :type Result: :class:`tencentcloud.ckafka.v20190819.models.InstanceDetailResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = InstanceDetailResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeInstancesRequest(AbstractModel): """DescribeInstances请求参数结构体 """ def __init__(self): r""" :param InstanceId: (过滤条件)按照实例ID过滤 :type InstanceId: str :param SearchWord: (过滤条件)按照实例名称过滤,支持模糊查询 :type SearchWord: str :param Status: (过滤条件)实例的状态。0:创建中,1:运行中,2:删除中,不填默认返回全部 :type Status: list of int :param Offset: 偏移量,不填默认为0 :type Offset: int :param Limit: 返回数量,不填则默认10,最大值100 :type Limit: int :param TagKey: 已废弃。匹配标签key值。 :type TagKey: str :param VpcId: 私有网络Id :type VpcId: str """ self.InstanceId = None self.SearchWord = None self.Status = None self.Offset = None self.Limit = None self.TagKey = None self.VpcId = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SearchWord = params.get("SearchWord") self.Status = params.get("Status") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.TagKey = params.get("TagKey") self.VpcId = params.get("VpcId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeInstancesResponse(AbstractModel): """DescribeInstances返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.InstanceResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = InstanceResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeRegionRequest(AbstractModel): """DescribeRegion请求参数结构体 """ def __init__(self): r""" :param Offset: 偏移量 :type Offset: int :param Limit: 返回最大结果数 :type Limit: int :param Business: 业务字段,可忽略 :type Business: str :param CdcId: cdc专有集群业务字段,可忽略 :type CdcId: str """ self.Offset = None self.Limit = None self.Business = None self.CdcId = None def _deserialize(self, params): self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.Business = params.get("Business") self.CdcId = params.get("CdcId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeRegionResponse(AbstractModel): """DescribeRegion返回参数结构体 """ def __init__(self): r""" :param Result: 返回地域枚举结果列表 注意:此字段可能返回 null,表示取不到有效值。 :type Result: list of Region :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = [] for item in params.get("Result"): obj = Region() obj._deserialize(item) self.Result.append(obj) self.RequestId = params.get("RequestId") class DescribeRouteRequest(AbstractModel): """DescribeRoute请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例唯一id :type InstanceId: str """ self.InstanceId = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeRouteResponse(AbstractModel): """DescribeRoute返回参数结构体 """ def __init__(self): r""" :param Result: 返回的路由信息结果集 :type Result: :class:`tencentcloud.ckafka.v20190819.models.RouteResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = RouteResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeTopicAttributesRequest(AbstractModel): """DescribeTopicAttributes请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID :type InstanceId: str :param TopicName: 主题名称 :type TopicName: str """ self.InstanceId = None self.TopicName = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopicAttributesResponse(AbstractModel): """DescribeTopicAttributes返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果对象 :type Result: :class:`tencentcloud.ckafka.v20190819.models.TopicAttributesResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = TopicAttributesResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeTopicDetailRequest(AbstractModel): """DescribeTopicDetail请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str :param SearchWord: (过滤条件)按照topicName过滤,支持模糊查询 :type SearchWord: str :param Offset: 偏移量,不填默认为0 :type Offset: int :param Limit: 返回数量,不填则默认 10,最大值20,取值要大于0 :type Limit: int :param AclRuleName: Acl预设策略名称 :type AclRuleName: str """ self.InstanceId = None self.SearchWord = None self.Offset = None self.Limit = None self.AclRuleName = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.AclRuleName = params.get("AclRuleName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopicDetailResponse(AbstractModel): """DescribeTopicDetail返回参数结构体 """ def __init__(self): r""" :param Result: 返回的主题详情实体 :type Result: :class:`tencentcloud.ckafka.v20190819.models.TopicDetailResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = TopicDetailResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeTopicRequest(AbstractModel): """DescribeTopic请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID :type InstanceId: str :param SearchWord: 过滤条件,按照 topicName 过滤,支持模糊查询 :type SearchWord: str :param Offset: 偏移量,不填默认为0 :type Offset: int :param Limit: 返回数量,不填则默认为20,最大值为50 :type Limit: int :param AclRuleName: Acl预设策略名称 :type AclRuleName: str """ self.InstanceId = None self.SearchWord = None self.Offset = None self.Limit = None self.AclRuleName = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.AclRuleName = params.get("AclRuleName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopicResponse(AbstractModel): """DescribeTopic返回参数结构体 """ def __init__(self): r""" :param Result: 返回的结果 注意:此字段可能返回 null,表示取不到有效值。 :type Result: :class:`tencentcloud.ckafka.v20190819.models.TopicResult` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = TopicResult() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeTopicSubscribeGroupRequest(AbstractModel): """DescribeTopicSubscribeGroup请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param TopicName: 主题名称 :type TopicName: str :param Offset: 分页时的起始位置 :type Offset: int :param Limit: 分页时的个数 :type Limit: int """ self.InstanceId = None self.TopicName = None self.Offset = None self.Limit = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopicSubscribeGroupResponse(AbstractModel): """DescribeTopicSubscribeGroup返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.TopicSubscribeGroup` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = TopicSubscribeGroup() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeTopicSyncReplicaRequest(AbstractModel): """DescribeTopicSyncReplica请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param TopicName: 主题名称 :type TopicName: str :param Offset: 偏移量,不填默认为0 :type Offset: int :param Limit: 返回数量,不填则默认10,最大值20。 :type Limit: int :param OutOfSyncReplicaOnly: 仅筛选未同步副本 :type OutOfSyncReplicaOnly: bool """ self.InstanceId = None self.TopicName = None self.Offset = None self.Limit = None self.OutOfSyncReplicaOnly = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.Offset = params.get("Offset") self.Limit = params.get("Limit") self.OutOfSyncReplicaOnly = params.get("OutOfSyncReplicaOnly") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeTopicSyncReplicaResponse(AbstractModel): """DescribeTopicSyncReplica返回参数结构体 """ def __init__(self): r""" :param Result: 返回topic 副本详情 :type Result: :class:`tencentcloud.ckafka.v20190819.models.TopicInSyncReplicaResult` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = TopicInSyncReplicaResult() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DescribeUserRequest(AbstractModel): """DescribeUser请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param SearchWord: 按照名称过滤 :type SearchWord: str :param Offset: 偏移 :type Offset: int :param Limit: 本次返回个数 :type Limit: int """ self.InstanceId = None self.SearchWord = None self.Offset = None self.Limit = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.SearchWord = params.get("SearchWord") self.Offset = params.get("Offset") self.Limit = params.get("Limit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DescribeUserResponse(AbstractModel): """DescribeUser返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果列表 :type Result: :class:`tencentcloud.ckafka.v20190819.models.UserResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = UserResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class DorisConnectParam(AbstractModel): """Doris 连接源参数 """ def __init__(self): r""" :param Port: Doris jdbc 负载均衡连接 port,通常映射到 fe 的 9030 端口 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: Doris 连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Doris 连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: Doris 连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param ServiceVip: Doris 连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: Doris 连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param SelfBuilt: Doris 连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param BePort: Doris 的 http 负载均衡连接 port,通常映射到 be 的 8040 端口 注意:此字段可能返回 null,表示取不到有效值。 :type BePort: int """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None self.SelfBuilt = None self.BePort = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") self.SelfBuilt = params.get("SelfBuilt") self.BePort = params.get("BePort") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DorisModifyConnectParam(AbstractModel): """Doris 连接源修改参数 """ def __init__(self): r""" :param Resource: Doris 连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: Doris jdbc 负载均衡连接 port,通常映射到 fe 的 9030 端口 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: Doris 连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: Doris 连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: Doris 连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Doris 连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param SelfBuilt: Doris 连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param BePort: Doris 的 http 负载均衡连接 port,通常映射到 be 的 8040 端口 注意:此字段可能返回 null,表示取不到有效值。 :type BePort: int """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.IsUpdate = None self.SelfBuilt = None self.BePort = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.IsUpdate = params.get("IsUpdate") self.SelfBuilt = params.get("SelfBuilt") self.BePort = params.get("BePort") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DropCls(AbstractModel): """dip失败消息写入cls的配置 """ def __init__(self): r""" :param DropInvalidMessageToCls: 是否投递到cls 注意:此字段可能返回 null,表示取不到有效值。 :type DropInvalidMessageToCls: bool :param DropClsRegion: 投递cls的地域 注意:此字段可能返回 null,表示取不到有效值。 :type DropClsRegion: str :param DropClsOwneruin: 投递cls的账号 注意:此字段可能返回 null,表示取不到有效值。 :type DropClsOwneruin: str :param DropClsTopicId: 投递cls的主题 注意:此字段可能返回 null,表示取不到有效值。 :type DropClsTopicId: str :param DropClsLogSet: 投递cls的日志集id 注意:此字段可能返回 null,表示取不到有效值。 :type DropClsLogSet: str """ self.DropInvalidMessageToCls = None self.DropClsRegion = None self.DropClsOwneruin = None self.DropClsTopicId = None self.DropClsLogSet = None def _deserialize(self, params): self.DropInvalidMessageToCls = params.get("DropInvalidMessageToCls") self.DropClsRegion = params.get("DropClsRegion") self.DropClsOwneruin = params.get("DropClsOwneruin") self.DropClsTopicId = params.get("DropClsTopicId") self.DropClsLogSet = params.get("DropClsLogSet") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DtsConnectParam(AbstractModel): """Dts连接源参数 """ def __init__(self): r""" :param Port: Dts的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param GroupId: Dts消费分组的Id 注意:此字段可能返回 null,表示取不到有效值。 :type GroupId: str :param UserName: Dts消费分组的账号 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Dts消费分组的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: Dts实例Id 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Topic: Dts订阅的topic 注意:此字段可能返回 null,表示取不到有效值。 :type Topic: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Port = None self.GroupId = None self.UserName = None self.Password = None self.Resource = None self.Topic = None self.IsUpdate = None def _deserialize(self, params): self.Port = params.get("Port") self.GroupId = params.get("GroupId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.Topic = params.get("Topic") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DtsModifyConnectParam(AbstractModel): """Dts修改连接源参数 """ def __init__(self): r""" :param Resource: Dts实例Id【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: Dts的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param GroupId: Dts消费分组的Id 注意:此字段可能返回 null,表示取不到有效值。 :type GroupId: str :param UserName: Dts消费分组的账号 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Dts消费分组的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param IsUpdate: 是否更新到关联的Datahub任务,默认为true 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param Topic: Dts订阅的topic【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Topic: str """ self.Resource = None self.Port = None self.GroupId = None self.UserName = None self.Password = None self.IsUpdate = None self.Topic = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.GroupId = params.get("GroupId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.IsUpdate = params.get("IsUpdate") self.Topic = params.get("Topic") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DtsParam(AbstractModel): """Dts类型入参 """ def __init__(self): r""" :param Resource: Dts实例Id :type Resource: str :param Ip: Dts的连接ip :type Ip: str :param Port: Dts的连接port :type Port: int :param Topic: Dts订阅的topic :type Topic: str :param GroupId: Dts消费分组的Id :type GroupId: str :param GroupUser: Dts消费分组的账号 :type GroupUser: str :param GroupPassword: Dts消费分组的密码 :type GroupPassword: str :param TranSql: false同步原始数据,true同步解析后的json格式数据,默认true :type TranSql: bool """ self.Resource = None self.Ip = None self.Port = None self.Topic = None self.GroupId = None self.GroupUser = None self.GroupPassword = None self.TranSql = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Ip = params.get("Ip") self.Port = params.get("Port") self.Topic = params.get("Topic") self.GroupId = params.get("GroupId") self.GroupUser = params.get("GroupUser") self.GroupPassword = params.get("GroupPassword") self.TranSql = params.get("TranSql") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DynamicDiskConfig(AbstractModel): """动态硬盘扩容配置 """ def __init__(self): r""" :param Enable: 动态硬盘扩容配置开关(0: 关闭,1: 开启) 注意:此字段可能返回 null,表示取不到有效值。 :type Enable: int :param StepForwardPercentage: 每次磁盘动态扩容大小百分比 注意:此字段可能返回 null,表示取不到有效值。 :type StepForwardPercentage: int :param DiskQuotaPercentage: 磁盘配额百分比触发条件,即消息达到此值触发硬盘自动扩容事件 注意:此字段可能返回 null,表示取不到有效值。 :type DiskQuotaPercentage: int :param MaxDiskSpace: 最大扩容硬盘大小,以 GB 为单位 注意:此字段可能返回 null,表示取不到有效值。 :type MaxDiskSpace: int """ self.Enable = None self.StepForwardPercentage = None self.DiskQuotaPercentage = None self.MaxDiskSpace = None def _deserialize(self, params): self.Enable = params.get("Enable") self.StepForwardPercentage = params.get("StepForwardPercentage") self.DiskQuotaPercentage = params.get("DiskQuotaPercentage") self.MaxDiskSpace = params.get("MaxDiskSpace") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class DynamicRetentionTime(AbstractModel): """动态消息保留时间配置 """ def __init__(self): r""" :param Enable: 动态消息保留时间配置开关(0: 关闭,1: 开启) 注意:此字段可能返回 null,表示取不到有效值。 :type Enable: int :param DiskQuotaPercentage: 磁盘配额百分比触发条件,即消息达到此值触发消息保留时间变更事件 注意:此字段可能返回 null,表示取不到有效值。 :type DiskQuotaPercentage: int :param StepForwardPercentage: 每次向前调整消息保留时间百分比 注意:此字段可能返回 null,表示取不到有效值。 :type StepForwardPercentage: int :param BottomRetention: 保底时长,单位分钟 注意:此字段可能返回 null,表示取不到有效值。 :type BottomRetention: int """ self.Enable = None self.DiskQuotaPercentage = None self.StepForwardPercentage = None self.BottomRetention = None def _deserialize(self, params): self.Enable = params.get("Enable") self.DiskQuotaPercentage = params.get("DiskQuotaPercentage") self.StepForwardPercentage = params.get("StepForwardPercentage") self.BottomRetention = params.get("BottomRetention") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EsConnectParam(AbstractModel): """Es连接源参数 """ def __init__(self): r""" :param Port: Es的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: Es连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Es连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: Es连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param SelfBuilt: Es连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param ServiceVip: Es连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: Es连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.SelfBuilt = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.SelfBuilt = params.get("SelfBuilt") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EsModifyConnectParam(AbstractModel): """Es修改连接源参数 """ def __init__(self): r""" :param Resource: Es连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: Es的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: Es连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: Es连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: Es连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: Es连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param SelfBuilt: Es连接源是否为自建集群【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.SelfBuilt = None self.IsUpdate = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.SelfBuilt = params.get("SelfBuilt") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EsParam(AbstractModel): """Es类型入参 """ def __init__(self): r""" :param Resource: 实例资源 :type Resource: str :param Port: Es的连接port :type Port: int :param UserName: Es用户名 :type UserName: str :param Password: Es密码 :type Password: str :param SelfBuilt: 是否为自建集群 :type SelfBuilt: bool :param ServiceVip: 实例vip :type ServiceVip: str :param UniqVpcId: 实例的vpcId :type UniqVpcId: str :param DropInvalidMessage: Es是否抛弃解析失败的消息 :type DropInvalidMessage: bool :param Index: Es自定义index名称 :type Index: str :param DateFormat: Es自定义日期后缀 :type DateFormat: str :param ContentKey: 非json格式数据的自定义key :type ContentKey: str :param DropInvalidJsonMessage: Es是否抛弃非json格式的消息 :type DropInvalidJsonMessage: bool :param DocumentIdField: 转储到Es中的文档ID取值字段名 :type DocumentIdField: str :param IndexType: Es自定义index名称的类型,STRING,JSONPATH,默认为STRING :type IndexType: str :param DropCls: 当设置成员参数DropInvalidMessageToCls设置为true时,DropInvalidMessage参数失效 :type DropCls: :class:`tencentcloud.ckafka.v20190819.models.DropCls` :param DatabasePrimaryKey: 转储到ES的消息为Database的binlog时,如果需要同步数据库操作,即增删改的操作到ES时填写数据库表主键 :type DatabasePrimaryKey: str """ self.Resource = None self.Port = None self.UserName = None self.Password = None self.SelfBuilt = None self.ServiceVip = None self.UniqVpcId = None self.DropInvalidMessage = None self.Index = None self.DateFormat = None self.ContentKey = None self.DropInvalidJsonMessage = None self.DocumentIdField = None self.IndexType = None self.DropCls = None self.DatabasePrimaryKey = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.SelfBuilt = params.get("SelfBuilt") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.DropInvalidMessage = params.get("DropInvalidMessage") self.Index = params.get("Index") self.DateFormat = params.get("DateFormat") self.ContentKey = params.get("ContentKey") self.DropInvalidJsonMessage = params.get("DropInvalidJsonMessage") self.DocumentIdField = params.get("DocumentIdField") self.IndexType = params.get("IndexType") if params.get("DropCls") is not None: self.DropCls = DropCls() self.DropCls._deserialize(params.get("DropCls")) self.DatabasePrimaryKey = params.get("DatabasePrimaryKey") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class EventBusParam(AbstractModel): """EventBus配置 """ def __init__(self): r""" :param Type: 资源类型。EB_COS/EB_ES/EB_CLS :type Type: str :param SelfBuilt: 是否为自建集群 :type SelfBuilt: bool :param Resource: 实例资源 :type Resource: str :param Namespace: SCF云函数命名空间 :type Namespace: str :param FunctionName: SCF云函数函数名 :type FunctionName: str :param Qualifier: SCF云函数版本及别名 :type Qualifier: str """ self.Type = None self.SelfBuilt = None self.Resource = None self.Namespace = None self.FunctionName = None self.Qualifier = None def _deserialize(self, params): self.Type = params.get("Type") self.SelfBuilt = params.get("SelfBuilt") self.Resource = params.get("Resource") self.Namespace = params.get("Namespace") self.FunctionName = params.get("FunctionName") self.Qualifier = params.get("Qualifier") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FailureParam(AbstractModel): """数据处理规则失败处理 """ def __init__(self): r""" :param Type: 类型,DLQ死信队列,IGNORE_ERROR保留,DROP废弃 :type Type: str :param KafkaParam: Ckafka类型死信队列 :type KafkaParam: :class:`tencentcloud.ckafka.v20190819.models.KafkaParam` :param RetryInterval: 重试间隔 :type RetryInterval: int :param MaxRetryAttempts: 重试次数 :type MaxRetryAttempts: int :param TopicParam: DIP Topic类型死信队列 注意:此字段可能返回 null,表示取不到有效值。 :type TopicParam: :class:`tencentcloud.ckafka.v20190819.models.TopicParam` :param DlqType: 死信队列类型,CKAFKA,TOPIC 注意:此字段可能返回 null,表示取不到有效值。 :type DlqType: str """ self.Type = None self.KafkaParam = None self.RetryInterval = None self.MaxRetryAttempts = None self.TopicParam = None self.DlqType = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("KafkaParam") is not None: self.KafkaParam = KafkaParam() self.KafkaParam._deserialize(params.get("KafkaParam")) self.RetryInterval = params.get("RetryInterval") self.MaxRetryAttempts = params.get("MaxRetryAttempts") if params.get("TopicParam") is not None: self.TopicParam = TopicParam() self.TopicParam._deserialize(params.get("TopicParam")) self.DlqType = params.get("DlqType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FetchDatahubMessageByOffsetRequest(AbstractModel): """FetchDatahubMessageByOffset请求参数结构体 """ def __init__(self): r""" :param Name: 主题名 :type Name: str :param Partition: 分区id :type Partition: int :param Offset: 位点信息,必填 :type Offset: int """ self.Name = None self.Partition = None self.Offset = None def _deserialize(self, params): self.Name = params.get("Name") self.Partition = params.get("Partition") self.Offset = params.get("Offset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FetchDatahubMessageByOffsetResponse(AbstractModel): """FetchDatahubMessageByOffset返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.ConsumerRecord` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ConsumerRecord() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class FetchLatestDatahubMessageListRequest(AbstractModel): """FetchLatestDatahubMessageList请求参数结构体 """ def __init__(self): r""" :param Name: 主题名 :type Name: str :param Partition: 分区id :type Partition: int :param Offset: 位点信息 :type Offset: int :param MessageCount: 最大查询条数,最小1,最大100 :type MessageCount: int """ self.Name = None self.Partition = None self.Offset = None self.MessageCount = None def _deserialize(self, params): self.Name = params.get("Name") self.Partition = params.get("Partition") self.Offset = params.get("Offset") self.MessageCount = params.get("MessageCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FetchLatestDatahubMessageListResponse(AbstractModel): """FetchLatestDatahubMessageList返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果。 :type Result: list of ConsumerRecord :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = [] for item in params.get("Result"): obj = ConsumerRecord() obj._deserialize(item) self.Result.append(obj) self.RequestId = params.get("RequestId") class FetchMessageByOffsetRequest(AbstractModel): """FetchMessageByOffset请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param Topic: 主题名 :type Topic: str :param Partition: 分区id :type Partition: int :param Offset: 位点信息,必填 :type Offset: int """ self.InstanceId = None self.Topic = None self.Partition = None self.Offset = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Topic = params.get("Topic") self.Partition = params.get("Partition") self.Offset = params.get("Offset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FetchMessageByOffsetResponse(AbstractModel): """FetchMessageByOffset返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.ConsumerRecord` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ConsumerRecord() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class FetchMessageListByOffsetRequest(AbstractModel): """FetchMessageListByOffset请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param Topic: 主题名 :type Topic: str :param Partition: 分区id :type Partition: int :param Offset: 位点信息 :type Offset: int :param SinglePartitionRecordNumber: 最大查询条数,默认20,最大20 :type SinglePartitionRecordNumber: int """ self.InstanceId = None self.Topic = None self.Partition = None self.Offset = None self.SinglePartitionRecordNumber = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Topic = params.get("Topic") self.Partition = params.get("Partition") self.Offset = params.get("Offset") self.SinglePartitionRecordNumber = params.get("SinglePartitionRecordNumber") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FetchMessageListByOffsetResponse(AbstractModel): """FetchMessageListByOffset返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果。注意,列表中不返回具体的消息内容(key、value),如果需要查询具体消息内容,请使用FetchMessageByOffset接口 :type Result: list of ConsumerRecord :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = [] for item in params.get("Result"): obj = ConsumerRecord() obj._deserialize(item) self.Result.append(obj) self.RequestId = params.get("RequestId") class FieldParam(AbstractModel): """数据处理——处理链 """ def __init__(self): r""" :param Analyse: 解析 :type Analyse: :class:`tencentcloud.ckafka.v20190819.models.AnalyseParam` :param SecondaryAnalyse: 二次解析 注意:此字段可能返回 null,表示取不到有效值。 :type SecondaryAnalyse: :class:`tencentcloud.ckafka.v20190819.models.SecondaryAnalyseParam` :param SMT: 数据处理 注意:此字段可能返回 null,表示取不到有效值。 :type SMT: list of SMTParam :param Result: 测试结果 注意:此字段可能返回 null,表示取不到有效值。 :type Result: str :param AnalyseResult: 解析结果 注意:此字段可能返回 null,表示取不到有效值。 :type AnalyseResult: list of SMTParam :param SecondaryAnalyseResult: 二次解析结果 注意:此字段可能返回 null,表示取不到有效值。 :type SecondaryAnalyseResult: list of SMTParam :param AnalyseJsonResult: JSON格式解析结果 注意:此字段可能返回 null,表示取不到有效值。 :type AnalyseJsonResult: str :param SecondaryAnalyseJsonResult: JSON格式二次解析结果 注意:此字段可能返回 null,表示取不到有效值。 :type SecondaryAnalyseJsonResult: str """ self.Analyse = None self.SecondaryAnalyse = None self.SMT = None self.Result = None self.AnalyseResult = None self.SecondaryAnalyseResult = None self.AnalyseJsonResult = None self.SecondaryAnalyseJsonResult = None def _deserialize(self, params): if params.get("Analyse") is not None: self.Analyse = AnalyseParam() self.Analyse._deserialize(params.get("Analyse")) if params.get("SecondaryAnalyse") is not None: self.SecondaryAnalyse = SecondaryAnalyseParam() self.SecondaryAnalyse._deserialize(params.get("SecondaryAnalyse")) if params.get("SMT") is not None: self.SMT = [] for item in params.get("SMT"): obj = SMTParam() obj._deserialize(item) self.SMT.append(obj) self.Result = params.get("Result") if params.get("AnalyseResult") is not None: self.AnalyseResult = [] for item in params.get("AnalyseResult"): obj = SMTParam() obj._deserialize(item) self.AnalyseResult.append(obj) if params.get("SecondaryAnalyseResult") is not None: self.SecondaryAnalyseResult = [] for item in params.get("SecondaryAnalyseResult"): obj = SMTParam() obj._deserialize(item) self.SecondaryAnalyseResult.append(obj) self.AnalyseJsonResult = params.get("AnalyseJsonResult") self.SecondaryAnalyseJsonResult = params.get("SecondaryAnalyseJsonResult") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Filter(AbstractModel): """查询过滤器 >描述键值对过滤器,用于条件过滤查询。例如过滤ID、名称、状态等 > * 若存在多个`Filter`时,`Filter`间的关系为逻辑与(`AND`)关系。 > * 若同一个`Filter`存在多个`Values`,同一`Filter`下`Values`间的关系为逻辑或(`OR`)关系。 > """ def __init__(self): r""" :param Name: 需要过滤的字段。 :type Name: str :param Values: 字段的过滤值。 :type Values: list of str """ self.Name = None self.Values = None def _deserialize(self, params): self.Name = params.get("Name") self.Values = params.get("Values") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class FilterMapParam(AbstractModel): """过滤器参数 """ def __init__(self): r""" :param Key: Key值 :type Key: str :param MatchMode: 匹配模式,前缀匹配PREFIX,后缀匹配SUFFIX,包含匹配CONTAINS,EXCEPT除外匹配,数值匹配NUMBER,IP匹配IP :type MatchMode: str :param Value: Value值 :type Value: str :param Type: 固定REGULAR :type Type: str """ self.Key = None self.MatchMode = None self.Value = None self.Type = None def _deserialize(self, params): self.Key = params.get("Key") self.MatchMode = params.get("MatchMode") self.Value = params.get("Value") self.Type = params.get("Type") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Group(AbstractModel): """组实体 """ def __init__(self): r""" :param GroupName: 组名称 :type GroupName: str """ self.GroupName = None def _deserialize(self, params): self.GroupName = params.get("GroupName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupInfoMember(AbstractModel): """consumer信息 """ def __init__(self): r""" :param MemberId: coordinator 为消费分组中的消费者生成的唯一 ID :type MemberId: str :param ClientId: 客户消费者 SDK 自己设置的 client.id 信息 :type ClientId: str :param ClientHost: 一般存储客户的 IP 地址 :type ClientHost: str :param Assignment: 存储着分配给该消费者的 partition 信息 :type Assignment: :class:`tencentcloud.ckafka.v20190819.models.Assignment` """ self.MemberId = None self.ClientId = None self.ClientHost = None self.Assignment = None def _deserialize(self, params): self.MemberId = params.get("MemberId") self.ClientId = params.get("ClientId") self.ClientHost = params.get("ClientHost") if params.get("Assignment") is not None: self.Assignment = Assignment() self.Assignment._deserialize(params.get("Assignment")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupInfoResponse(AbstractModel): """GroupInfo返回数据的实体 """ def __init__(self): r""" :param ErrorCode: 错误码,正常为0 :type ErrorCode: str :param State: group 状态描述(常见的为 Empty、Stable、Dead 三种状态): Dead:消费分组不存在 Empty:消费分组,当前没有任何消费者订阅 PreparingRebalance:消费分组处于 rebalance 状态 CompletingRebalance:消费分组处于 rebalance 状态 Stable:消费分组中各个消费者已经加入,处于稳定状态 :type State: str :param ProtocolType: 消费分组选择的协议类型正常的消费者一般为 consumer 但有些系统采用了自己的协议如 kafka-connect 用的就是 connect。只有标准的 consumer 协议,本接口才知道具体的分配方式的格式,才能解析到具体的 partition 的分配情况 :type ProtocolType: str :param Protocol: 消费者 partition 分配算法常见的有如下几种(Kafka 消费者 SDK 默认的选择项为 range):range、 roundrobin、 sticky :type Protocol: str :param Members: 仅当 state 为 Stable 且 protocol_type 为 consumer 时, 该数组才包含信息 :type Members: list of GroupInfoMember :param Group: Kafka 消费分组 :type Group: str """ self.ErrorCode = None self.State = None self.ProtocolType = None self.Protocol = None self.Members = None self.Group = None def _deserialize(self, params): self.ErrorCode = params.get("ErrorCode") self.State = params.get("State") self.ProtocolType = params.get("ProtocolType") self.Protocol = params.get("Protocol") if params.get("Members") is not None: self.Members = [] for item in params.get("Members"): obj = GroupInfoMember() obj._deserialize(item) self.Members.append(obj) self.Group = params.get("Group") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupInfoTopics(AbstractModel): """GroupInfo内部topic对象 """ def __init__(self): r""" :param Topic: 分配的 topic 名称 :type Topic: str :param Partitions: 分配的 partition 信息 注意:此字段可能返回 null,表示取不到有效值。 :type Partitions: list of int """ self.Topic = None self.Partitions = None def _deserialize(self, params): self.Topic = params.get("Topic") self.Partitions = params.get("Partitions") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupOffsetPartition(AbstractModel): """组偏移量分区对象 """ def __init__(self): r""" :param Partition: topic 的 partitionId :type Partition: int :param Offset: consumer 提交的 offset 位置 :type Offset: int :param Metadata: 支持消费者提交消息时,传入 metadata 作为它用,当前一般为空字符串 注意:此字段可能返回 null,表示取不到有效值。 :type Metadata: str :param ErrorCode: 错误码 :type ErrorCode: int :param LogEndOffset: 当前 partition 最新的 offset :type LogEndOffset: int :param Lag: 未消费的消息个数 :type Lag: int """ self.Partition = None self.Offset = None self.Metadata = None self.ErrorCode = None self.LogEndOffset = None self.Lag = None def _deserialize(self, params): self.Partition = params.get("Partition") self.Offset = params.get("Offset") self.Metadata = params.get("Metadata") self.ErrorCode = params.get("ErrorCode") self.LogEndOffset = params.get("LogEndOffset") self.Lag = params.get("Lag") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupOffsetResponse(AbstractModel): """消费组偏移量返回结果 """ def __init__(self): r""" :param TotalCount: 符合调节的总结果数 :type TotalCount: int :param TopicList: 该主题分区数组,其中每个元素为一个 json object 注意:此字段可能返回 null,表示取不到有效值。 :type TopicList: list of GroupOffsetTopic """ self.TotalCount = None self.TopicList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("TopicList") is not None: self.TopicList = [] for item in params.get("TopicList"): obj = GroupOffsetTopic() obj._deserialize(item) self.TopicList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupOffsetTopic(AbstractModel): """消费分组主题对象 """ def __init__(self): r""" :param Topic: 主题名称 :type Topic: str :param Partitions: 该主题分区数组,其中每个元素为一个 json object 注意:此字段可能返回 null,表示取不到有效值。 :type Partitions: list of GroupOffsetPartition """ self.Topic = None self.Partitions = None def _deserialize(self, params): self.Topic = params.get("Topic") if params.get("Partitions") is not None: self.Partitions = [] for item in params.get("Partitions"): obj = GroupOffsetPartition() obj._deserialize(item) self.Partitions.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class GroupResponse(AbstractModel): """DescribeGroup的返回 """ def __init__(self): r""" :param TotalCount: 计数 注意:此字段可能返回 null,表示取不到有效值。 :type TotalCount: int :param GroupList: GroupList 注意:此字段可能返回 null,表示取不到有效值。 :type GroupList: list of DescribeGroup """ self.TotalCount = None self.GroupList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("GroupList") is not None: self.GroupList = [] for item in params.get("GroupList"): obj = DescribeGroup() obj._deserialize(item) self.GroupList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Instance(AbstractModel): """实例对象 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str :param InstanceName: 实例名称 :type InstanceName: str :param Status: 实例的状态。0:创建中,1:运行中,2:删除中 , 5 隔离中,-1 创建失败 :type Status: int :param IfCommunity: 是否开源实例。开源:true,不开源:false 注意:此字段可能返回 null,表示取不到有效值。 :type IfCommunity: bool """ self.InstanceId = None self.InstanceName = None self.Status = None self.IfCommunity = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.InstanceName = params.get("InstanceName") self.Status = params.get("Status") self.IfCommunity = params.get("IfCommunity") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceAttributesResponse(AbstractModel): """实例属性返回结果对象 """ def __init__(self): r""" :param InstanceId: 实例ID :type InstanceId: str :param InstanceName: 实例名称 :type InstanceName: str :param VipList: 接入点 VIP 列表信息 :type VipList: list of VipEntity :param Vip: 虚拟IP :type Vip: str :param Vport: 虚拟端口 :type Vport: str :param Status: 实例的状态。0:创建中,1:运行中,2:删除中 :type Status: int :param Bandwidth: 实例带宽,单位:Mbps :type Bandwidth: int :param DiskSize: 实例的存储大小,单位:GB :type DiskSize: int :param ZoneId: 可用区 :type ZoneId: int :param VpcId: VPC 的 ID,为空表示是基础网络 :type VpcId: str :param SubnetId: 子网 ID, 为空表示基础网络 :type SubnetId: str :param Healthy: 实例健康状态, 1:健康,2:告警,3:异常 :type Healthy: int :param HealthyMessage: 实例健康信息,当前会展示磁盘利用率,最大长度为256 :type HealthyMessage: str :param CreateTime: 创建时间 :type CreateTime: int :param MsgRetentionTime: 消息保存时间,单位为分钟 :type MsgRetentionTime: int :param Config: 自动创建 Topic 配置, 若该字段为空,则表示未开启自动创建 :type Config: :class:`tencentcloud.ckafka.v20190819.models.InstanceConfigDO` :param RemainderPartitions: 剩余创建分区数 :type RemainderPartitions: int :param RemainderTopics: 剩余创建主题数 :type RemainderTopics: int :param CreatedPartitions: 当前创建分区数 :type CreatedPartitions: int :param CreatedTopics: 当前创建主题数 :type CreatedTopics: int :param Tags: 标签数组 注意:此字段可能返回 null,表示取不到有效值。 :type Tags: list of Tag :param ExpireTime: 过期时间 注意:此字段可能返回 null,表示取不到有效值。 :type ExpireTime: int :param ZoneIds: 跨可用区 注意:此字段可能返回 null,表示取不到有效值。 :type ZoneIds: list of int :param Version: kafka版本信息 注意:此字段可能返回 null,表示取不到有效值。 :type Version: str :param MaxGroupNum: 最大分组数 注意:此字段可能返回 null,表示取不到有效值。 :type MaxGroupNum: int :param Cvm: 售卖类型,0:标准版,1:专业版 注意:此字段可能返回 null,表示取不到有效值。 :type Cvm: int :param InstanceType: 类型 注意:此字段可能返回 null,表示取不到有效值。 :type InstanceType: str :param Features: 表示该实例支持的特性。FEATURE_SUBNET_ACL:表示acl策略支持设置子网。 注意:此字段可能返回 null,表示取不到有效值。 :type Features: list of str :param RetentionTimeConfig: 动态消息保留策略 注意:此字段可能返回 null,表示取不到有效值。 :type RetentionTimeConfig: :class:`tencentcloud.ckafka.v20190819.models.DynamicRetentionTime` :param MaxConnection: 最大连接数 注意:此字段可能返回 null,表示取不到有效值。 :type MaxConnection: int :param PublicNetwork: 公网带宽 注意:此字段可能返回 null,表示取不到有效值。 :type PublicNetwork: int :param DeleteRouteTimestamp: 时间 注意:此字段可能返回 null,表示取不到有效值。 :type DeleteRouteTimestamp: str :param RemainingPartitions: 剩余创建分区数 注意:此字段可能返回 null,表示取不到有效值。 :type RemainingPartitions: int :param RemainingTopics: 剩余创建主题数 注意:此字段可能返回 null,表示取不到有效值。 :type RemainingTopics: int :param DynamicDiskConfig: 动态硬盘扩容策略 注意:此字段可能返回 null,表示取不到有效值。 :type DynamicDiskConfig: :class:`tencentcloud.ckafka.v20190819.models.DynamicDiskConfig` """ self.InstanceId = None self.InstanceName = None self.VipList = None self.Vip = None self.Vport = None self.Status = None self.Bandwidth = None self.DiskSize = None self.ZoneId = None self.VpcId = None self.SubnetId = None self.Healthy = None self.HealthyMessage = None self.CreateTime = None self.MsgRetentionTime = None self.Config = None self.RemainderPartitions = None self.RemainderTopics = None self.CreatedPartitions = None self.CreatedTopics = None self.Tags = None self.ExpireTime = None self.ZoneIds = None self.Version = None self.MaxGroupNum = None self.Cvm = None self.InstanceType = None self.Features = None self.RetentionTimeConfig = None self.MaxConnection = None self.PublicNetwork = None self.DeleteRouteTimestamp = None self.RemainingPartitions = None self.RemainingTopics = None self.DynamicDiskConfig = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.InstanceName = params.get("InstanceName") if params.get("VipList") is not None: self.VipList = [] for item in params.get("VipList"): obj = VipEntity() obj._deserialize(item) self.VipList.append(obj) self.Vip = params.get("Vip") self.Vport = params.get("Vport") self.Status = params.get("Status") self.Bandwidth = params.get("Bandwidth") self.DiskSize = params.get("DiskSize") self.ZoneId = params.get("ZoneId") self.VpcId = params.get("VpcId") self.SubnetId = params.get("SubnetId") self.Healthy = params.get("Healthy") self.HealthyMessage = params.get("HealthyMessage") self.CreateTime = params.get("CreateTime") self.MsgRetentionTime = params.get("MsgRetentionTime") if params.get("Config") is not None: self.Config = InstanceConfigDO() self.Config._deserialize(params.get("Config")) self.RemainderPartitions = params.get("RemainderPartitions") self.RemainderTopics = params.get("RemainderTopics") self.CreatedPartitions = params.get("CreatedPartitions") self.CreatedTopics = params.get("CreatedTopics") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) self.ExpireTime = params.get("ExpireTime") self.ZoneIds = params.get("ZoneIds") self.Version = params.get("Version") self.MaxGroupNum = params.get("MaxGroupNum") self.Cvm = params.get("Cvm") self.InstanceType = params.get("InstanceType") self.Features = params.get("Features") if params.get("RetentionTimeConfig") is not None: self.RetentionTimeConfig = DynamicRetentionTime() self.RetentionTimeConfig._deserialize(params.get("RetentionTimeConfig")) self.MaxConnection = params.get("MaxConnection") self.PublicNetwork = params.get("PublicNetwork") self.DeleteRouteTimestamp = params.get("DeleteRouteTimestamp") self.RemainingPartitions = params.get("RemainingPartitions") self.RemainingTopics = params.get("RemainingTopics") if params.get("DynamicDiskConfig") is not None: self.DynamicDiskConfig = DynamicDiskConfig() self.DynamicDiskConfig._deserialize(params.get("DynamicDiskConfig")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceConfigDO(AbstractModel): """实例配置实体 """ def __init__(self): r""" :param AutoCreateTopicsEnable: 是否自动创建主题 :type AutoCreateTopicsEnable: bool :param DefaultNumPartitions: 分区数 :type DefaultNumPartitions: int :param DefaultReplicationFactor: 默认的复制Factor :type DefaultReplicationFactor: int """ self.AutoCreateTopicsEnable = None self.DefaultNumPartitions = None self.DefaultReplicationFactor = None def _deserialize(self, params): self.AutoCreateTopicsEnable = params.get("AutoCreateTopicsEnable") self.DefaultNumPartitions = params.get("DefaultNumPartitions") self.DefaultReplicationFactor = params.get("DefaultReplicationFactor") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceDetail(AbstractModel): """实例详情 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str :param InstanceName: 实例名称 :type InstanceName: str :param Vip: 访问实例的vip 信息 :type Vip: str :param Vport: 访问实例的端口信息 :type Vport: str :param VipList: 虚拟IP列表 :type VipList: list of VipEntity :param Status: 实例的状态。0:创建中,1:运行中,2:删除中:5隔离中, -1 创建失败 :type Status: int :param Bandwidth: 实例带宽,单位Mbps :type Bandwidth: int :param DiskSize: 实例的存储大小,单位GB :type DiskSize: int :param ZoneId: 可用区域ID :type ZoneId: int :param VpcId: vpcId,如果为空,说明是基础网络 :type VpcId: str :param SubnetId: 子网id :type SubnetId: str :param RenewFlag: 实例是否续费,int 枚举值:1表示自动续费,2表示明确不自动续费 :type RenewFlag: int :param Healthy: 实例状态 int:1表示健康,2表示告警,3 表示实例状态异常 :type Healthy: int :param HealthyMessage: 实例状态信息 :type HealthyMessage: str :param CreateTime: 实例创建时间时间 :type CreateTime: int :param ExpireTime: 实例过期时间 :type ExpireTime: int :param IsInternal: 是否为内部客户。值为1 表示内部客户 :type IsInternal: int :param TopicNum: Topic个数 :type TopicNum: int :param Tags: 标识tag :type Tags: list of Tag :param Version: kafka版本信息 注意:此字段可能返回 null,表示取不到有效值。 :type Version: str :param ZoneIds: 跨可用区 注意:此字段可能返回 null,表示取不到有效值。 :type ZoneIds: list of int :param Cvm: ckafka售卖类型 注意:此字段可能返回 null,表示取不到有效值。 :type Cvm: int :param InstanceType: ckafka实例类型 注意:此字段可能返回 null,表示取不到有效值。 :type InstanceType: str :param DiskType: 磁盘类型 注意:此字段可能返回 null,表示取不到有效值。 :type DiskType: str :param MaxTopicNumber: 当前规格最大Topic数 注意:此字段可能返回 null,表示取不到有效值。 :type MaxTopicNumber: int :param MaxPartitionNumber: 当前规格最大Partition数 注意:此字段可能返回 null,表示取不到有效值。 :type MaxPartitionNumber: int :param RebalanceTime: 计划升级配置时间 注意:此字段可能返回 null,表示取不到有效值。 :type RebalanceTime: str :param PartitionNumber: 实例当前partition数量 注意:此字段可能返回 null,表示取不到有效值。 :type PartitionNumber: int :param PublicNetworkChargeType: 公网带宽类型 注意:此字段可能返回 null,表示取不到有效值。 :type PublicNetworkChargeType: str :param PublicNetwork: 公网带宽值 注意:此字段可能返回 null,表示取不到有效值。 :type PublicNetwork: int :param ClusterType: 实例类型 注意:此字段可能返回 null,表示取不到有效值。 :type ClusterType: str :param Features: 实例功能列表 注意:此字段可能返回 null,表示取不到有效值。 :type Features: list of str """ self.InstanceId = None self.InstanceName = None self.Vip = None self.Vport = None self.VipList = None self.Status = None self.Bandwidth = None self.DiskSize = None self.ZoneId = None self.VpcId = None self.SubnetId = None self.RenewFlag = None self.Healthy = None self.HealthyMessage = None self.CreateTime = None self.ExpireTime = None self.IsInternal = None self.TopicNum = None self.Tags = None self.Version = None self.ZoneIds = None self.Cvm = None self.InstanceType = None self.DiskType = None self.MaxTopicNumber = None self.MaxPartitionNumber = None self.RebalanceTime = None self.PartitionNumber = None self.PublicNetworkChargeType = None self.PublicNetwork = None self.ClusterType = None self.Features = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.InstanceName = params.get("InstanceName") self.Vip = params.get("Vip") self.Vport = params.get("Vport") if params.get("VipList") is not None: self.VipList = [] for item in params.get("VipList"): obj = VipEntity() obj._deserialize(item) self.VipList.append(obj) self.Status = params.get("Status") self.Bandwidth = params.get("Bandwidth") self.DiskSize = params.get("DiskSize") self.ZoneId = params.get("ZoneId") self.VpcId = params.get("VpcId") self.SubnetId = params.get("SubnetId") self.RenewFlag = params.get("RenewFlag") self.Healthy = params.get("Healthy") self.HealthyMessage = params.get("HealthyMessage") self.CreateTime = params.get("CreateTime") self.ExpireTime = params.get("ExpireTime") self.IsInternal = params.get("IsInternal") self.TopicNum = params.get("TopicNum") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) self.Version = params.get("Version") self.ZoneIds = params.get("ZoneIds") self.Cvm = params.get("Cvm") self.InstanceType = params.get("InstanceType") self.DiskType = params.get("DiskType") self.MaxTopicNumber = params.get("MaxTopicNumber") self.MaxPartitionNumber = params.get("MaxPartitionNumber") self.RebalanceTime = params.get("RebalanceTime") self.PartitionNumber = params.get("PartitionNumber") self.PublicNetworkChargeType = params.get("PublicNetworkChargeType") self.PublicNetwork = params.get("PublicNetwork") self.ClusterType = params.get("ClusterType") self.Features = params.get("Features") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceDetailResponse(AbstractModel): """实例详情返回结果 """ def __init__(self): r""" :param TotalCount: 符合条件的实例总数 :type TotalCount: int :param InstanceList: 符合条件的实例详情列表 :type InstanceList: list of InstanceDetail """ self.TotalCount = None self.InstanceList = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") if params.get("InstanceList") is not None: self.InstanceList = [] for item in params.get("InstanceList"): obj = InstanceDetail() obj._deserialize(item) self.InstanceList.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceQuotaConfigResp(AbstractModel): """实例 / topic 维度限流策略 """ def __init__(self): r""" :param QuotaProducerByteRate: 生产限流大小,单位 MB/s 注意:此字段可能返回 null,表示取不到有效值。 :type QuotaProducerByteRate: int :param QuotaConsumerByteRate: 消费限流大小,单位 MB/s 注意:此字段可能返回 null,表示取不到有效值。 :type QuotaConsumerByteRate: int """ self.QuotaProducerByteRate = None self.QuotaConsumerByteRate = None def _deserialize(self, params): self.QuotaProducerByteRate = params.get("QuotaProducerByteRate") self.QuotaConsumerByteRate = params.get("QuotaConsumerByteRate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class InstanceResponse(AbstractModel): """聚合的实例状态返回结果 """ def __init__(self): r""" :param InstanceList: 符合条件的实例列表 注意:此字段可能返回 null,表示取不到有效值。 :type InstanceList: list of Instance :param TotalCount: 符合条件的结果总数 注意:此字段可能返回 null,表示取不到有效值。 :type TotalCount: int """ self.InstanceList = None self.TotalCount = None def _deserialize(self, params): if params.get("InstanceList") is not None: self.InstanceList = [] for item in params.get("InstanceList"): obj = Instance() obj._deserialize(item) self.InstanceList.append(obj) self.TotalCount = params.get("TotalCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class JgwOperateResponse(AbstractModel): """操作型结果返回值 """ def __init__(self): r""" :param ReturnCode: 返回的code,0为正常,非0为错误 :type ReturnCode: str :param ReturnMessage: 成功消息 :type ReturnMessage: str :param Data: 操作型返回的Data数据,可能有flowId等 注意:此字段可能返回 null,表示取不到有效值。 :type Data: :class:`tencentcloud.ckafka.v20190819.models.OperateResponseData` """ self.ReturnCode = None self.ReturnMessage = None self.Data = None def _deserialize(self, params): self.ReturnCode = params.get("ReturnCode") self.ReturnMessage = params.get("ReturnMessage") if params.get("Data") is not None: self.Data = OperateResponseData() self.Data._deserialize(params.get("Data")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class JsonPathReplaceParam(AbstractModel): """数据处理——Value处理参数——Jsonpath替换参数 """ def __init__(self): r""" :param OldValue: 被替换值,Jsonpath表达式 :type OldValue: str :param NewValue: 替换值,Jsonpath表达式或字符串 :type NewValue: str """ self.OldValue = None self.NewValue = None def _deserialize(self, params): self.OldValue = params.get("OldValue") self.NewValue = params.get("NewValue") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class KVParam(AbstractModel): """key-value二次解析 """ def __init__(self): r""" :param Delimiter: 分隔符 :type Delimiter: str :param Regex: key-value二次解析分隔符 :type Regex: str :param KeepOriginalKey: 保留源Key,默认为false不保留 注意:此字段可能返回 null,表示取不到有效值。 :type KeepOriginalKey: str """ self.Delimiter = None self.Regex = None self.KeepOriginalKey = None def _deserialize(self, params): self.Delimiter = params.get("Delimiter") self.Regex = params.get("Regex") self.KeepOriginalKey = params.get("KeepOriginalKey") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class KafkaParam(AbstractModel): """Ckafka配置 """ def __init__(self): r""" :param SelfBuilt: 是否为自建集群 :type SelfBuilt: bool :param Resource: 实例资源 :type Resource: str :param Topic: Topic名称,多个以“,”分隔 :type Topic: str :param OffsetType: Offset类型,最开始位置earliest,最新位置latest,时间点位置timestamp 注意:此字段可能返回 null,表示取不到有效值。 :type OffsetType: str :param StartTime: Offset类型为timestamp时必传,传时间戳,精确到秒 注意:此字段可能返回 null,表示取不到有效值。 :type StartTime: int :param ResourceName: 实例资源名称 注意:此字段可能返回 null,表示取不到有效值。 :type ResourceName: str :param ZoneId: Zone ID 注意:此字段可能返回 null,表示取不到有效值。 :type ZoneId: int :param TopicId: Topic的Id 注意:此字段可能返回 null,表示取不到有效值。 :type TopicId: str :param PartitionNum: Topic的分区数 注意:此字段可能返回 null,表示取不到有效值。 :type PartitionNum: int :param EnableToleration: 启用容错实例/开启死信队列 注意:此字段可能返回 null,表示取不到有效值。 :type EnableToleration: bool :param QpsLimit: Qps 限制 注意:此字段可能返回 null,表示取不到有效值。 :type QpsLimit: int :param TableMappings: Table到Topic的路由,「分发到多个topic」开关打开时必传 注意:此字段可能返回 null,表示取不到有效值。 :type TableMappings: list of TableMapping :param UseTableMapping: 「分发到多个topic」开关,默认为false 注意:此字段可能返回 null,表示取不到有效值。 :type UseTableMapping: bool :param UseAutoCreateTopic: 使用的Topic是否需要自动创建(目前只支持SOURCE流入任务,如果不使用分发到多个topic,需要在Topic字段填写需要自动创建的topic名) 注意:此字段可能返回 null,表示取不到有效值。 :type UseAutoCreateTopic: bool :param CompressionType: 写入Topic时是否进行压缩,不开启填"none",开启的话,填写"open"。 注意:此字段可能返回 null,表示取不到有效值。 :type CompressionType: str """ self.SelfBuilt = None self.Resource = None self.Topic = None self.OffsetType = None self.StartTime = None self.ResourceName = None self.ZoneId = None self.TopicId = None self.PartitionNum = None self.EnableToleration = None self.QpsLimit = None self.TableMappings = None self.UseTableMapping = None self.UseAutoCreateTopic = None self.CompressionType = None def _deserialize(self, params): self.SelfBuilt = params.get("SelfBuilt") self.Resource = params.get("Resource") self.Topic = params.get("Topic") self.OffsetType = params.get("OffsetType") self.StartTime = params.get("StartTime") self.ResourceName = params.get("ResourceName") self.ZoneId = params.get("ZoneId") self.TopicId = params.get("TopicId") self.PartitionNum = params.get("PartitionNum") self.EnableToleration = params.get("EnableToleration") self.QpsLimit = params.get("QpsLimit") if params.get("TableMappings") is not None: self.TableMappings = [] for item in params.get("TableMappings"): obj = TableMapping() obj._deserialize(item) self.TableMappings.append(obj) self.UseTableMapping = params.get("UseTableMapping") self.UseAutoCreateTopic = params.get("UseAutoCreateTopic") self.CompressionType = params.get("CompressionType") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MapParam(AbstractModel): """Map参数 """ def __init__(self): r""" :param Key: key值 :type Key: str :param Type: 类型,DEFAULT默认,DATE系统预设-时间戳,CUSTOMIZE自定义,MAPPING映射 :type Type: str :param Value: 值 :type Value: str """ self.Key = None self.Type = None self.Value = None def _deserialize(self, params): self.Key = params.get("Key") self.Type = params.get("Type") self.Value = params.get("Value") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MariaDBConnectParam(AbstractModel): """MariaDB连接源参数 """ def __init__(self): r""" :param Port: MariaDB的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: MariaDB连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: MariaDB连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: MariaDB连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param ServiceVip: MariaDB连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: MariaDB连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MariaDBModifyConnectParam(AbstractModel): """MariaDB连接源参数 """ def __init__(self): r""" :param Resource: MariaDB连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: MariaDB的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: MariaDB连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: MariaDB连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: MariaDB连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: MariaDB连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.IsUpdate = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MariaDBParam(AbstractModel): """MariaDB类型入参 """ def __init__(self): r""" :param Database: MariaDB的数据库名称,"*"为全数据库 :type Database: str :param Table: MariaDB的数据表名称,"*"为所监听的所有数据库中的非系统表,可以","间隔,监听多个数据表,但数据表需要以"数据库名.数据表名"的格式进行填写 :type Table: str :param Resource: 该MariaDB在连接管理内的Id :type Resource: str :param SnapshotMode: 复制存量信息(schema_only不复制, initial全量),默认位initial :type SnapshotMode: str :param KeyColumns: 格式:库1.表1:字段1,字段2;库2.表2:字段2,表之间;(分号)隔开,字段之间,(逗号)隔开。不指定的表默认取表的主键 :type KeyColumns: str :param IsTablePrefix: 当Table输入的是前缀时,该项值为true,否则为false :type IsTablePrefix: bool :param OutputFormat: 输出格式,DEFAULT、CANAL_1、CANAL_2 :type OutputFormat: str :param IncludeContentChanges: 如果该值为all,则DDL数据以及DML数据也会写入到选中的topic;若该值为dml,则只有DML数据写入到选中的topic :type IncludeContentChanges: str :param IncludeQuery: 如果该值为true,且MySQL中"binlog_rows_query_log_events"配置项的值为"ON",则流入到topic的数据包含原SQL语句;若该值为false,流入到topic的数据不包含原SQL语句 :type IncludeQuery: bool :param RecordWithSchema: 如果该值为 true,则消息中会携带消息结构体对应的schema,如果该值为false则不会携带 :type RecordWithSchema: bool """ self.Database = None self.Table = None self.Resource = None self.SnapshotMode = None self.KeyColumns = None self.IsTablePrefix = None self.OutputFormat = None self.IncludeContentChanges = None self.IncludeQuery = None self.RecordWithSchema = None def _deserialize(self, params): self.Database = params.get("Database") self.Table = params.get("Table") self.Resource = params.get("Resource") self.SnapshotMode = params.get("SnapshotMode") self.KeyColumns = params.get("KeyColumns") self.IsTablePrefix = params.get("IsTablePrefix") self.OutputFormat = params.get("OutputFormat") self.IncludeContentChanges = params.get("IncludeContentChanges") self.IncludeQuery = params.get("IncludeQuery") self.RecordWithSchema = params.get("RecordWithSchema") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyConnectResourceRequest(AbstractModel): """ModifyConnectResource请求参数结构体 """ def __init__(self): r""" :param ResourceId: 连接源的Id :type ResourceId: str :param ResourceName: 连接源名称,为空时不修改 :type ResourceName: str :param Description: 连接源描述,为空时不修改 :type Description: str :param Type: 连接源类型,修改数据源参数时,需要与原Type相同,否则编辑数据源无效 :type Type: str :param DtsConnectParam: Dts配置,Type为DTS时必填 :type DtsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DtsModifyConnectParam` :param MongoDBConnectParam: MongoDB配置,Type为MONGODB时必填 :type MongoDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MongoDBModifyConnectParam` :param EsConnectParam: Es配置,Type为ES时必填 :type EsConnectParam: :class:`tencentcloud.ckafka.v20190819.models.EsModifyConnectParam` :param ClickHouseConnectParam: ClickHouse配置,Type为CLICKHOUSE时必填 :type ClickHouseConnectParam: :class:`tencentcloud.ckafka.v20190819.models.ClickHouseModifyConnectParam` :param MySQLConnectParam: MySQL配置,Type为MYSQL或TDSQL_C_MYSQL时必填 :type MySQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MySQLModifyConnectParam` :param PostgreSQLConnectParam: PostgreSQL配置,Type为POSTGRESQL或TDSQL_C_POSTGRESQL时必填 :type PostgreSQLConnectParam: :class:`tencentcloud.ckafka.v20190819.models.PostgreSQLModifyConnectParam` :param MariaDBConnectParam: MariaDB配置,Type为MARIADB时必填 :type MariaDBConnectParam: :class:`tencentcloud.ckafka.v20190819.models.MariaDBModifyConnectParam` :param SQLServerConnectParam: SQLServer配置,Type为SQLSERVER时必填 :type SQLServerConnectParam: :class:`tencentcloud.ckafka.v20190819.models.SQLServerModifyConnectParam` :param CtsdbConnectParam: Ctsdb配置,Type为CTSDB :type CtsdbConnectParam: :class:`tencentcloud.ckafka.v20190819.models.CtsdbModifyConnectParam` :param DorisConnectParam: Doris配置,Type为DORIS :type DorisConnectParam: :class:`tencentcloud.ckafka.v20190819.models.DorisModifyConnectParam` """ self.ResourceId = None self.ResourceName = None self.Description = None self.Type = None self.DtsConnectParam = None self.MongoDBConnectParam = None self.EsConnectParam = None self.ClickHouseConnectParam = None self.MySQLConnectParam = None self.PostgreSQLConnectParam = None self.MariaDBConnectParam = None self.SQLServerConnectParam = None self.CtsdbConnectParam = None self.DorisConnectParam = None def _deserialize(self, params): self.ResourceId = params.get("ResourceId") self.ResourceName = params.get("ResourceName") self.Description = params.get("Description") self.Type = params.get("Type") if params.get("DtsConnectParam") is not None: self.DtsConnectParam = DtsModifyConnectParam() self.DtsConnectParam._deserialize(params.get("DtsConnectParam")) if params.get("MongoDBConnectParam") is not None: self.MongoDBConnectParam = MongoDBModifyConnectParam() self.MongoDBConnectParam._deserialize(params.get("MongoDBConnectParam")) if params.get("EsConnectParam") is not None: self.EsConnectParam = EsModifyConnectParam() self.EsConnectParam._deserialize(params.get("EsConnectParam")) if params.get("ClickHouseConnectParam") is not None: self.ClickHouseConnectParam = ClickHouseModifyConnectParam() self.ClickHouseConnectParam._deserialize(params.get("ClickHouseConnectParam")) if params.get("MySQLConnectParam") is not None: self.MySQLConnectParam = MySQLModifyConnectParam() self.MySQLConnectParam._deserialize(params.get("MySQLConnectParam")) if params.get("PostgreSQLConnectParam") is not None: self.PostgreSQLConnectParam = PostgreSQLModifyConnectParam() self.PostgreSQLConnectParam._deserialize(params.get("PostgreSQLConnectParam")) if params.get("MariaDBConnectParam") is not None: self.MariaDBConnectParam = MariaDBModifyConnectParam() self.MariaDBConnectParam._deserialize(params.get("MariaDBConnectParam")) if params.get("SQLServerConnectParam") is not None: self.SQLServerConnectParam = SQLServerModifyConnectParam() self.SQLServerConnectParam._deserialize(params.get("SQLServerConnectParam")) if params.get("CtsdbConnectParam") is not None: self.CtsdbConnectParam = CtsdbModifyConnectParam() self.CtsdbConnectParam._deserialize(params.get("CtsdbConnectParam")) if params.get("DorisConnectParam") is not None: self.DorisConnectParam = DorisModifyConnectParam() self.DorisConnectParam._deserialize(params.get("DorisConnectParam")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyConnectResourceResponse(AbstractModel): """ModifyConnectResource返回参数结构体 """ def __init__(self): r""" :param Result: 连接源的Id :type Result: :class:`tencentcloud.ckafka.v20190819.models.ConnectResourceResourceIdResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = ConnectResourceResourceIdResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class ModifyDatahubTaskRequest(AbstractModel): """ModifyDatahubTask请求参数结构体 """ def __init__(self): r""" :param TaskId: 任务id :type TaskId: str :param TaskName: 任务名称 :type TaskName: str """ self.TaskId = None self.TaskName = None def _deserialize(self, params): self.TaskId = params.get("TaskId") self.TaskName = params.get("TaskName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyDatahubTaskResponse(AbstractModel): """ModifyDatahubTask返回参数结构体 """ def __init__(self): r""" :param Result: 任务id 注意:此字段可能返回 null,表示取不到有效值。 :type Result: :class:`tencentcloud.ckafka.v20190819.models.DatahubTaskIdRes` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = DatahubTaskIdRes() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class ModifyGroupOffsetsRequest(AbstractModel): """ModifyGroupOffsets请求参数结构体 """ def __init__(self): r""" :param InstanceId: kafka实例id :type InstanceId: str :param Group: kafka 消费分组 :type Group: str :param Strategy: 重置offset的策略,入参含义 0. 对齐shift-by参数,代表把offset向前或向后移动shift条 1. 对齐参考(by-duration,to-datetime,to-earliest,to-latest),代表把offset移动到指定timestamp的位置 2. 对齐参考(to-offset),代表把offset移动到指定的offset位置 :type Strategy: int :param Topics: 表示需要重置的topics, 不填表示全部 :type Topics: list of str :param Shift: 当strategy为0时,必须包含该字段,可以大于零代表会把offset向后移动shift条,小于零则将offset向前回溯shift条数。正确重置后新的offset应该是(old_offset + shift),需要注意的是如果新的offset小于partition的earliest则会设置为earliest,如果大于partition 的latest则会设置为latest :type Shift: int :param ShiftTimestamp: 单位ms。当strategy为1时,必须包含该字段,其中-2表示重置offset到最开始的位置,-1表示重置到最新的位置(相当于清空),其它值则代表指定的时间,会获取topic中指定时间的offset然后进行重置,需要注意的时,如果指定的时间不存在消息,则获取最末尾的offset。 :type ShiftTimestamp: int :param Offset: 需要重新设置的offset位置。当strategy为2,必须包含该字段。 :type Offset: int :param Partitions: 需要重新设置的partition的列表,如果没有指定Topics参数。则重置全部topics的对应的Partition列表里的partition。指定Topics时则重置指定的topic列表的对应的Partitions列表的partition。 :type Partitions: list of int """ self.InstanceId = None self.Group = None self.Strategy = None self.Topics = None self.Shift = None self.ShiftTimestamp = None self.Offset = None self.Partitions = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Group = params.get("Group") self.Strategy = params.get("Strategy") self.Topics = params.get("Topics") self.Shift = params.get("Shift") self.ShiftTimestamp = params.get("ShiftTimestamp") self.Offset = params.get("Offset") self.Partitions = params.get("Partitions") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyGroupOffsetsResponse(AbstractModel): """ModifyGroupOffsets返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class ModifyInstanceAttributesConfig(AbstractModel): """修改实例属性的配置对象 """ def __init__(self): r""" :param AutoCreateTopicEnable: 自动创建 true 表示开启,false 表示不开启 :type AutoCreateTopicEnable: bool :param DefaultNumPartitions: 可选,如果auto.create.topic.enable设置为true没有设置该值时,默认设置为3 :type DefaultNumPartitions: int :param DefaultReplicationFactor: 如歌auto.create.topic.enable设置为true没有指定该值时默认设置为2 :type DefaultReplicationFactor: int """ self.AutoCreateTopicEnable = None self.DefaultNumPartitions = None self.DefaultReplicationFactor = None def _deserialize(self, params): self.AutoCreateTopicEnable = params.get("AutoCreateTopicEnable") self.DefaultNumPartitions = params.get("DefaultNumPartitions") self.DefaultReplicationFactor = params.get("DefaultReplicationFactor") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyInstanceAttributesRequest(AbstractModel): """ModifyInstanceAttributes请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例id :type InstanceId: str :param MsgRetentionTime: 实例日志的最长保留时间,单位分钟,最大30天,0代表不开启日志保留时间回收策略 :type MsgRetentionTime: int :param InstanceName: 实例名称,是一个不超过 64 个字符的字符串,必须以字母为首字符,剩余部分可以包含字母、数字和横划线(-) :type InstanceName: str :param Config: 实例配置 :type Config: :class:`tencentcloud.ckafka.v20190819.models.ModifyInstanceAttributesConfig` :param DynamicRetentionConfig: 动态消息保留策略配置 :type DynamicRetentionConfig: :class:`tencentcloud.ckafka.v20190819.models.DynamicRetentionTime` :param RebalanceTime: 修改升配置rebalance时间 :type RebalanceTime: int :param PublicNetwork: 时间戳 :type PublicNetwork: int :param DynamicDiskConfig: 动态硬盘扩容策略配置 :type DynamicDiskConfig: :class:`tencentcloud.ckafka.v20190819.models.DynamicDiskConfig` :param MaxMessageByte: 实例级别单条消息大小(单位byte) :type MaxMessageByte: int """ self.InstanceId = None self.MsgRetentionTime = None self.InstanceName = None self.Config = None self.DynamicRetentionConfig = None self.RebalanceTime = None self.PublicNetwork = None self.DynamicDiskConfig = None self.MaxMessageByte = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.MsgRetentionTime = params.get("MsgRetentionTime") self.InstanceName = params.get("InstanceName") if params.get("Config") is not None: self.Config = ModifyInstanceAttributesConfig() self.Config._deserialize(params.get("Config")) if params.get("DynamicRetentionConfig") is not None: self.DynamicRetentionConfig = DynamicRetentionTime() self.DynamicRetentionConfig._deserialize(params.get("DynamicRetentionConfig")) self.RebalanceTime = params.get("RebalanceTime") self.PublicNetwork = params.get("PublicNetwork") if params.get("DynamicDiskConfig") is not None: self.DynamicDiskConfig = DynamicDiskConfig() self.DynamicDiskConfig._deserialize(params.get("DynamicDiskConfig")) self.MaxMessageByte = params.get("MaxMessageByte") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyInstanceAttributesResponse(AbstractModel): """ModifyInstanceAttributes返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class ModifyInstancePreRequest(AbstractModel): """ModifyInstancePre请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例名称 :type InstanceId: str :param DiskSize: 预计磁盘,根据磁盘步长,规格向上调整。 :type DiskSize: int :param BandWidth: 预计带宽,根据带宽步长,规格向上调整。 :type BandWidth: int :param Partition: 预计分区,根据带宽步长,规格向上调整。 :type Partition: int """ self.InstanceId = None self.DiskSize = None self.BandWidth = None self.Partition = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.DiskSize = params.get("DiskSize") self.BandWidth = params.get("BandWidth") self.Partition = params.get("Partition") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyInstancePreResponse(AbstractModel): """ModifyInstancePre返回参数结构体 """ def __init__(self): r""" :param Result: 变更预付费实例配置返回结构 :type Result: :class:`tencentcloud.ckafka.v20190819.models.CreateInstancePreResp` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = CreateInstancePreResp() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class ModifyPasswordRequest(AbstractModel): """ModifyPassword请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例Id :type InstanceId: str :param Name: 用户名称 :type Name: str :param Password: 用户当前密码 :type Password: str :param PasswordNew: 用户新密码 :type PasswordNew: str """ self.InstanceId = None self.Name = None self.Password = None self.PasswordNew = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.Name = params.get("Name") self.Password = params.get("Password") self.PasswordNew = params.get("PasswordNew") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyPasswordResponse(AbstractModel): """ModifyPassword返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class ModifyTopicAttributesRequest(AbstractModel): """ModifyTopicAttributes请求参数结构体 """ def __init__(self): r""" :param InstanceId: 实例 ID。 :type InstanceId: str :param TopicName: 主题名称。 :type TopicName: str :param Note: 主题备注,是一个不超过64个字符的字符串,必须以字母为首字符,剩余部分可以包含字母、数字和横划线-。 :type Note: str :param EnableWhiteList: IP 白名单开关,1:打开;0:关闭。 :type EnableWhiteList: int :param MinInsyncReplicas: 默认为1。 :type MinInsyncReplicas: int :param UncleanLeaderElectionEnable: 默认为 0,0:false;1:true。 :type UncleanLeaderElectionEnable: int :param RetentionMs: 消息保留时间,单位:ms,当前最小值为60000ms。 :type RetentionMs: int :param SegmentMs: Segment 分片滚动的时长,单位:ms,当前最小为86400000ms。 :type SegmentMs: int :param MaxMessageBytes: 主题消息最大值,单位为 Byte,最大值为12582912Byte(即12MB)。 :type MaxMessageBytes: int :param CleanUpPolicy: 消息删除策略,可以选择delete 或者compact :type CleanUpPolicy: str :param IpWhiteList: Ip白名单列表,配额限制,enableWhileList=1时必选 :type IpWhiteList: list of str :param EnableAclRule: 预设ACL规则, 1:打开 0:关闭,默认不打开 :type EnableAclRule: int :param AclRuleName: 预设ACL规则的名称 :type AclRuleName: str :param RetentionBytes: 可选, 保留文件大小. 默认为-1,单位bytes, 当前最小值为1048576B :type RetentionBytes: int :param Tags: 标签列表 :type Tags: list of Tag :param QuotaProducerByteRate: 生产限流,单位 MB/s :type QuotaProducerByteRate: int :param QuotaConsumerByteRate: 消费限流,单位 MB/s :type QuotaConsumerByteRate: int :param ReplicaNum: 调整topic副本数 :type ReplicaNum: int """ self.InstanceId = None self.TopicName = None self.Note = None self.EnableWhiteList = None self.MinInsyncReplicas = None self.UncleanLeaderElectionEnable = None self.RetentionMs = None self.SegmentMs = None self.MaxMessageBytes = None self.CleanUpPolicy = None self.IpWhiteList = None self.EnableAclRule = None self.AclRuleName = None self.RetentionBytes = None self.Tags = None self.QuotaProducerByteRate = None self.QuotaConsumerByteRate = None self.ReplicaNum = None def _deserialize(self, params): self.InstanceId = params.get("InstanceId") self.TopicName = params.get("TopicName") self.Note = params.get("Note") self.EnableWhiteList = params.get("EnableWhiteList") self.MinInsyncReplicas = params.get("MinInsyncReplicas") self.UncleanLeaderElectionEnable = params.get("UncleanLeaderElectionEnable") self.RetentionMs = params.get("RetentionMs") self.SegmentMs = params.get("SegmentMs") self.MaxMessageBytes = params.get("MaxMessageBytes") self.CleanUpPolicy = params.get("CleanUpPolicy") self.IpWhiteList = params.get("IpWhiteList") self.EnableAclRule = params.get("EnableAclRule") self.AclRuleName = params.get("AclRuleName") self.RetentionBytes = params.get("RetentionBytes") if params.get("Tags") is not None: self.Tags = [] for item in params.get("Tags"): obj = Tag() obj._deserialize(item) self.Tags.append(obj) self.QuotaProducerByteRate = params.get("QuotaProducerByteRate") self.QuotaConsumerByteRate = params.get("QuotaConsumerByteRate") self.ReplicaNum = params.get("ReplicaNum") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ModifyTopicAttributesResponse(AbstractModel): """ModifyTopicAttributes返回参数结构体 """ def __init__(self): r""" :param Result: 返回结果集 :type Result: :class:`tencentcloud.ckafka.v20190819.models.JgwOperateResponse` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None def _deserialize(self, params): if params.get("Result") is not None: self.Result = JgwOperateResponse() self.Result._deserialize(params.get("Result")) self.RequestId = params.get("RequestId") class MongoDBConnectParam(AbstractModel): """MongoDB连接源参数 """ def __init__(self): r""" :param Port: MongoDB的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: MongoDB连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: MongoDB连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: MongoDB连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param SelfBuilt: MongoDB连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param ServiceVip: MongoDB连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: MongoDB连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.SelfBuilt = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.SelfBuilt = params.get("SelfBuilt") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MongoDBModifyConnectParam(AbstractModel): """MongoDB修改连接源参数 """ def __init__(self): r""" :param Resource: MongoDB连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: MongoDB的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: MongoDB连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: MongoDB连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: MongoDB连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: MongoDB连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param SelfBuilt: MongoDB连接源是否为自建集群【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.SelfBuilt = None self.IsUpdate = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.SelfBuilt = params.get("SelfBuilt") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MongoDBParam(AbstractModel): """MongoDB类型入参 """ def __init__(self): r""" :param Database: MongoDB的数据库名称 :type Database: str :param Collection: MongoDB的集群 :type Collection: str :param CopyExisting: 是否复制存量数据,默认传参true :type CopyExisting: bool :param Resource: 实例资源 :type Resource: str :param Ip: MongoDB的连接ip :type Ip: str :param Port: MongoDB的连接port :type Port: int :param UserName: MongoDB数据库用户名 :type UserName: str :param Password: MongoDB数据库密码 :type Password: str :param ListeningEvent: 监听事件类型,为空时表示全选。取值包括insert,update,replace,delete,invalidate,drop,dropdatabase,rename,多个类型间使用,逗号分隔 :type ListeningEvent: str :param ReadPreference: 主从优先级,默认主节点 :type ReadPreference: str :param Pipeline: 聚合管道 :type Pipeline: str :param SelfBuilt: 是否为自建集群 :type SelfBuilt: bool """ self.Database = None self.Collection = None self.CopyExisting = None self.Resource = None self.Ip = None self.Port = None self.UserName = None self.Password = None self.ListeningEvent = None self.ReadPreference = None self.Pipeline = None self.SelfBuilt = None def _deserialize(self, params): self.Database = params.get("Database") self.Collection = params.get("Collection") self.CopyExisting = params.get("CopyExisting") self.Resource = params.get("Resource") self.Ip = params.get("Ip") self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.ListeningEvent = params.get("ListeningEvent") self.ReadPreference = params.get("ReadPreference") self.Pipeline = params.get("Pipeline") self.SelfBuilt = params.get("SelfBuilt") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MySQLConnectParam(AbstractModel): """MySQL连接源参数 """ def __init__(self): r""" :param Port: MySQL的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: MySQL连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: MySQL连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: MySQL连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param ServiceVip: MySQL连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: MySQL连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param ClusterId: 当type为TDSQL_C_MYSQL时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ClusterId: str :param SelfBuilt: Mysql 连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None self.ClusterId = None self.SelfBuilt = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") self.ClusterId = params.get("ClusterId") self.SelfBuilt = params.get("SelfBuilt") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MySQLModifyConnectParam(AbstractModel): """MySQL修改连接源参数 """ def __init__(self): r""" :param Resource: MySQL连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: MySQL的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: MySQL连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: MySQL连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: MySQL连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: MySQL连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param ClusterId: 当type为TDSQL_C_MYSQL时 注意:此字段可能返回 null,表示取不到有效值。 :type ClusterId: str :param SelfBuilt: 是否是自建的集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.IsUpdate = None self.ClusterId = None self.SelfBuilt = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.IsUpdate = params.get("IsUpdate") self.ClusterId = params.get("ClusterId") self.SelfBuilt = params.get("SelfBuilt") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MySQLParam(AbstractModel): """MySQL类型入参 """ def __init__(self): r""" :param Database: MySQL的数据库名称,"*"为全数据库 :type Database: str :param Table: MySQL的数据表名称,"*"为所监听的所有数据库中的非系统表,可以","间隔,监听多个数据表,但数据表需要以"数据库名.数据表名"的格式进行填写,需要填入正则表达式时,格式为"数据库名\\.数据表名" :type Table: str :param Resource: 该MySQL在连接管理内的Id :type Resource: str :param SnapshotMode: 复制存量信息(schema_only不复制, initial全量),默认位initial :type SnapshotMode: str :param DdlTopic: 存放MySQL的Ddl信息的Topic,为空则默认不存放 :type DdlTopic: str :param DataSourceMonitorMode: "TABLE" 表示读取项为 table,"QUERY" 表示读取项为 query :type DataSourceMonitorMode: str :param DataSourceMonitorResource: 当 "DataMonitorMode"="TABLE" 时,传入需要读取的 Table;当 "DataMonitorMode"="QUERY" 时,传入需要读取的查询 sql 语句 :type DataSourceMonitorResource: str :param DataSourceIncrementMode: "TIMESTAMP" 表示增量列为时间戳类型,"INCREMENT" 表示增量列为自增 id 类型 :type DataSourceIncrementMode: str :param DataSourceIncrementColumn: 传入需要监听的列名称 :type DataSourceIncrementColumn: str :param DataSourceStartFrom: "HEAD" 表示复制存量 + 增量数据,"TAIL" 表示只复制增量数据 :type DataSourceStartFrom: str :param DataTargetInsertMode: "INSERT" 表示使用 Insert 模式插入,"UPSERT" 表示使用 Upsert 模式插入 :type DataTargetInsertMode: str :param DataTargetPrimaryKeyField: 当 "DataInsertMode"="UPSERT" 时,传入当前 upsert 时依赖的主键 :type DataTargetPrimaryKeyField: str :param DataTargetRecordMapping: 表与消息间的映射关系 :type DataTargetRecordMapping: list of RecordMapping :param TopicRegex: 事件路由到特定主题的正则表达式,默认为(.*) :type TopicRegex: str :param TopicReplacement: TopicRegex的引用组,指定$1、$2等 :type TopicReplacement: str :param KeyColumns: 格式:库1.表1:字段1,字段2;库2.表2:字段2,表之间;(分号)隔开,字段之间,(逗号)隔开。不指定的表默认取表的主键 :type KeyColumns: str :param DropInvalidMessage: Mysql 是否抛弃解析失败的消息,默认为true :type DropInvalidMessage: bool :param DropCls: 当设置成员参数DropInvalidMessageToCls设置为true时,DropInvalidMessage参数失效 :type DropCls: :class:`tencentcloud.ckafka.v20190819.models.DropCls` :param OutputFormat: 输出格式,DEFAULT、CANAL_1、CANAL_2 :type OutputFormat: str :param IsTablePrefix: 当Table输入的是前缀时,该项值为true,否则为false :type IsTablePrefix: bool :param IncludeContentChanges: 如果该值为all,则DDL数据以及DML数据也会写入到选中的topic;若该值为dml,则只有DML数据写入到选中的topic :type IncludeContentChanges: str :param IncludeQuery: 如果该值为true,且MySQL中"binlog_rows_query_log_events"配置项的值为"ON",则流入到topic的数据包含原SQL语句;若该值为false,流入到topic的数据不包含原SQL语句 :type IncludeQuery: bool :param RecordWithSchema: 如果该值为 true,则消息中会携带消息结构体对应的schema,如果该值为false则不会携带 :type RecordWithSchema: bool :param SignalDatabase: 存放信令表的数据库名称 :type SignalDatabase: str :param IsTableRegular: 输入的table是否为正则表达式,如果该选项以及IsTablePrefix同时为true,该选项的判断优先级高于IsTablePrefix :type IsTableRegular: bool """ self.Database = None self.Table = None self.Resource = None self.SnapshotMode = None self.DdlTopic = None self.DataSourceMonitorMode = None self.DataSourceMonitorResource = None self.DataSourceIncrementMode = None self.DataSourceIncrementColumn = None self.DataSourceStartFrom = None self.DataTargetInsertMode = None self.DataTargetPrimaryKeyField = None self.DataTargetRecordMapping = None self.TopicRegex = None self.TopicReplacement = None self.KeyColumns = None self.DropInvalidMessage = None self.DropCls = None self.OutputFormat = None self.IsTablePrefix = None self.IncludeContentChanges = None self.IncludeQuery = None self.RecordWithSchema = None self.SignalDatabase = None self.IsTableRegular = None def _deserialize(self, params): self.Database = params.get("Database") self.Table = params.get("Table") self.Resource = params.get("Resource") self.SnapshotMode = params.get("SnapshotMode") self.DdlTopic = params.get("DdlTopic") self.DataSourceMonitorMode = params.get("DataSourceMonitorMode") self.DataSourceMonitorResource = params.get("DataSourceMonitorResource") self.DataSourceIncrementMode = params.get("DataSourceIncrementMode") self.DataSourceIncrementColumn = params.get("DataSourceIncrementColumn") self.DataSourceStartFrom = params.get("DataSourceStartFrom") self.DataTargetInsertMode = params.get("DataTargetInsertMode") self.DataTargetPrimaryKeyField = params.get("DataTargetPrimaryKeyField") if params.get("DataTargetRecordMapping") is not None: self.DataTargetRecordMapping = [] for item in params.get("DataTargetRecordMapping"): obj = RecordMapping() obj._deserialize(item) self.DataTargetRecordMapping.append(obj) self.TopicRegex = params.get("TopicRegex") self.TopicReplacement = params.get("TopicReplacement") self.KeyColumns = params.get("KeyColumns") self.DropInvalidMessage = params.get("DropInvalidMessage") if params.get("DropCls") is not None: self.DropCls = DropCls() self.DropCls._deserialize(params.get("DropCls")) self.OutputFormat = params.get("OutputFormat") self.IsTablePrefix = params.get("IsTablePrefix") self.IncludeContentChanges = params.get("IncludeContentChanges") self.IncludeQuery = params.get("IncludeQuery") self.RecordWithSchema = params.get("RecordWithSchema") self.SignalDatabase = params.get("SignalDatabase") self.IsTableRegular = params.get("IsTableRegular") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class OperateResponseData(AbstractModel): """操作类型返回的Data结构 """ def __init__(self): r""" :param FlowId: FlowId11 注意:此字段可能返回 null,表示取不到有效值。 :type FlowId: int """ self.FlowId = None def _deserialize(self, params): self.FlowId = params.get("FlowId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Partition(AbstractModel): """分区实体 """ def __init__(self): r""" :param PartitionId: 分区ID :type PartitionId: int """ self.PartitionId = None def _deserialize(self, params): self.PartitionId = params.get("PartitionId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class PartitionOffset(AbstractModel): """分区和位移 """ def __init__(self): r""" :param Partition: Partition,例如"0"或"1" 注意:此字段可能返回 null,表示取不到有效值。 :type Partition: str :param Offset: Offset,例如100 注意:此字段可能返回 null,表示取不到有效值。 :type Offset: int """ self.Partition = None self.Offset = None def _deserialize(self, params): self.Partition = params.get("Partition") self.Offset = params.get("Offset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Partitions(AbstractModel): """partition信息 """ def __init__(self): r""" :param Partition: 分区 :type Partition: int :param Offset: partition 消费位移 :type Offset: int """ self.Partition = None self.Offset = None def _deserialize(self, params): self.Partition = params.get("Partition") self.Offset = params.get("Offset") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class PostgreSQLConnectParam(AbstractModel): """PostgreSQL连接源参数 """ def __init__(self): r""" :param Port: PostgreSQL的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: PostgreSQL连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: PostgreSQL连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: PostgreSQL连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param ServiceVip: PostgreSQL连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: PostgreSQL连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param ClusterId: 当type为TDSQL_C_POSTGRESQL时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ClusterId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param SelfBuilt: PostgreSQL连接源是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.ServiceVip = None self.UniqVpcId = None self.ClusterId = None self.IsUpdate = None self.SelfBuilt = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.ClusterId = params.get("ClusterId") self.IsUpdate = params.get("IsUpdate") self.SelfBuilt = params.get("SelfBuilt") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class PostgreSQLModifyConnectParam(AbstractModel): """PostgreSQL修改连接源参数 """ def __init__(self): r""" :param Resource: PostgreSQL连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: PostgreSQL的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: PostgreSQL连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: PostgreSQL连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: PostgreSQL连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: PostgreSQL连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param ClusterId: 当type为TDSQL_C_POSTGRESQL时,该参数才有值【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ClusterId: str :param IsUpdate: 是否更新到关联的Datahub任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool :param SelfBuilt: 是否为自建集群 注意:此字段可能返回 null,表示取不到有效值。 :type SelfBuilt: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.ClusterId = None self.IsUpdate = None self.SelfBuilt = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.ClusterId = params.get("ClusterId") self.IsUpdate = params.get("IsUpdate") self.SelfBuilt = params.get("SelfBuilt") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class PostgreSQLParam(AbstractModel): """PostgreSQL类型入参 """ def __init__(self): r""" :param Database: PostgreSQL的数据库名称 :type Database: str :param Table: PostgreSQL的数据表名称,"*"为所监听的所有数据库中的非系统表,可以","间隔,监听多个数据表,但数据表需要以"Schema名.数据表名"的格式进行填写,需要填入正则表达式时,格式为"Schema名\\.数据表名" :type Table: str :param Resource: 该PostgreSQL在连接管理内的Id :type Resource: str :param PluginName: 插件名(decoderbufs/pgoutput),默认为decoderbufs :type PluginName: str :param SnapshotMode: 复制存量信息(never增量, initial全量),默认为initial :type SnapshotMode: str :param DataFormat: 上游数据格式(JSON/Debezium), 当数据库同步模式为默认字段匹配时,必填 :type DataFormat: str :param DataTargetInsertMode: "INSERT" 表示使用 Insert 模式插入,"UPSERT" 表示使用 Upsert 模式插入 :type DataTargetInsertMode: str :param DataTargetPrimaryKeyField: 当 "DataInsertMode"="UPSERT" 时,传入当前 upsert 时依赖的主键 :type DataTargetPrimaryKeyField: str :param DataTargetRecordMapping: 表与消息间的映射关系 :type DataTargetRecordMapping: list of RecordMapping :param DropInvalidMessage: 是否抛弃解析失败的消息,默认为true :type DropInvalidMessage: bool :param IsTableRegular: 输入的table是否为正则表达式 :type IsTableRegular: bool :param KeyColumns: 格式:库1.表1:字段1,字段2;库2.表2:字段2,表之间;(分号)隔开,字段之间,(逗号)隔开。不指定的表默认取表的主键 :type KeyColumns: str :param RecordWithSchema: 如果该值为 true,则消息中会携带消息结构体对应的schema,如果该值为false则不会携带 :type RecordWithSchema: bool """ self.Database = None self.Table = None self.Resource = None self.PluginName = None self.SnapshotMode = None self.DataFormat = None self.DataTargetInsertMode = None self.DataTargetPrimaryKeyField = None self.DataTargetRecordMapping = None self.DropInvalidMessage = None self.IsTableRegular = None self.KeyColumns = None self.RecordWithSchema = None def _deserialize(self, params): self.Database = params.get("Database") self.Table = params.get("Table") self.Resource = params.get("Resource") self.PluginName = params.get("PluginName") self.SnapshotMode = params.get("SnapshotMode") self.DataFormat = params.get("DataFormat") self.DataTargetInsertMode = params.get("DataTargetInsertMode") self.DataTargetPrimaryKeyField = params.get("DataTargetPrimaryKeyField") if params.get("DataTargetRecordMapping") is not None: self.DataTargetRecordMapping = [] for item in params.get("DataTargetRecordMapping"): obj = RecordMapping() obj._deserialize(item) self.DataTargetRecordMapping.append(obj) self.DropInvalidMessage = params.get("DropInvalidMessage") self.IsTableRegular = params.get("IsTableRegular") self.KeyColumns = params.get("KeyColumns") self.RecordWithSchema = params.get("RecordWithSchema") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Price(AbstractModel): """消息价格实体 """ def __init__(self): r""" :param RealTotalCost: 折扣价 :type RealTotalCost: float :param TotalCost: 原价 :type TotalCost: float """ self.RealTotalCost = None self.TotalCost = None def _deserialize(self, params): self.RealTotalCost = params.get("RealTotalCost") self.TotalCost = params.get("TotalCost") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class PrivateLinkParam(AbstractModel): """建立私有连接的参数 """ def __init__(self): r""" :param ServiceVip: 客户实例的vip :type ServiceVip: str :param UniqVpcId: 客户实例的vpcId :type UniqVpcId: str """ self.ServiceVip = None self.UniqVpcId = None def _deserialize(self, params): self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class RecordMapping(AbstractModel): """record 与数据库表的映射关系 """ def __init__(self): r""" :param JsonKey: 消息的 key 名称 :type JsonKey: str :param Type: 消息类型 :type Type: str :param AllowNull: 消息是否允许为空 :type AllowNull: bool :param ColumnName: 对应映射列名称 :type ColumnName: str :param ExtraInfo: 数据库表额外字段 :type ExtraInfo: str :param ColumnSize: 当前列大小 :type ColumnSize: str :param DecimalDigits: 当前列精度 :type DecimalDigits: str :param AutoIncrement: 是否为自增列 :type AutoIncrement: bool :param DefaultValue: 数据库表默认参数 :type DefaultValue: str """ self.JsonKey = None self.Type = None self.AllowNull = None self.ColumnName = None self.ExtraInfo = None self.ColumnSize = None self.DecimalDigits = None self.AutoIncrement = None self.DefaultValue = None def _deserialize(self, params): self.JsonKey = params.get("JsonKey") self.Type = params.get("Type") self.AllowNull = params.get("AllowNull") self.ColumnName = params.get("ColumnName") self.ExtraInfo = params.get("ExtraInfo") self.ColumnSize = params.get("ColumnSize") self.DecimalDigits = params.get("DecimalDigits") self.AutoIncrement = params.get("AutoIncrement") self.DefaultValue = params.get("DefaultValue") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class RegexReplaceParam(AbstractModel): """数据处理——Value处理参数——正则替换参数 """ def __init__(self): r""" :param Regex: 正则表达式 :type Regex: str :param NewValue: 替换新值 :type NewValue: str """ self.Regex = None self.NewValue = None def _deserialize(self, params): self.Regex = params.get("Regex") self.NewValue = params.get("NewValue") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Region(AbstractModel): """地域实体对象 """ def __init__(self): r""" :param RegionId: 地域ID :type RegionId: int :param RegionName: 地域名称 :type RegionName: str :param AreaName: 区域名称 :type AreaName: str :param RegionCode: 地域代码 注意:此字段可能返回 null,表示取不到有效值。 :type RegionCode: str :param RegionCodeV3: 地域代码(V3版本) 注意:此字段可能返回 null,表示取不到有效值。 :type RegionCodeV3: str :param Support: NONE:默认值不支持任何特殊机型\nCVM:支持CVM类型 注意:此字段可能返回 null,表示取不到有效值。 :type Support: str :param Ipv6: 是否支持ipv6, 0:表示不支持,1:表示支持 注意:此字段可能返回 null,表示取不到有效值。 :type Ipv6: int :param MultiZone: 是否支持跨可用区, 0:表示不支持,1:表示支持 注意:此字段可能返回 null,表示取不到有效值。 :type MultiZone: int """ self.RegionId = None self.RegionName = None self.AreaName = None self.RegionCode = None self.RegionCodeV3 = None self.Support = None self.Ipv6 = None self.MultiZone = None def _deserialize(self, params): self.RegionId = params.get("RegionId") self.RegionName = params.get("RegionName") self.AreaName = params.get("AreaName") self.RegionCode = params.get("RegionCode") self.RegionCodeV3 = params.get("RegionCodeV3") self.Support = params.get("Support") self.Ipv6 = params.get("Ipv6") self.MultiZone = params.get("MultiZone") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ReplaceParam(AbstractModel): """数据处理——Value处理参数——替换参数 """ def __init__(self): r""" :param OldValue: 被替换值 :type OldValue: str :param NewValue: 替换值 :type NewValue: str """ self.OldValue = None self.NewValue = None def _deserialize(self, params): self.OldValue = params.get("OldValue") self.NewValue = params.get("NewValue") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Route(AbstractModel): """路由实体对象 """ def __init__(self): r""" :param AccessType: 实例接入方式 0:PLAINTEXT (明文方式,没有带用户信息老版本及社区版本都支持) 1:SASL_PLAINTEXT(明文方式,不过在数据开始时,会通过SASL方式登录鉴权,仅社区版本支持) 2:SSL(SSL加密通信,没有带用户信息,老版本及社区版本都支持) 3:SASL_SSL(SSL加密通信,在数据开始时,会通过SASL方式登录鉴权,仅社区版本支持) :type AccessType: int :param RouteId: 路由ID :type RouteId: int :param VipType: vip网络类型(1:外网TGW 2:基础网络 3:VPC网络 4:支撑网络(idc 环境) 5:SSL外网访问方式访问 6:黑石环境vpc 7:支撑网络(cvm 环境) :type VipType: int :param VipList: 虚拟IP列表 :type VipList: list of VipEntity :param Domain: 域名 注意:此字段可能返回 null,表示取不到有效值。 :type Domain: str :param DomainPort: 域名port 注意:此字段可能返回 null,表示取不到有效值。 :type DomainPort: int :param DeleteTimestamp: 时间戳 注意:此字段可能返回 null,表示取不到有效值。 :type DeleteTimestamp: str """ self.AccessType = None self.RouteId = None self.VipType = None self.VipList = None self.Domain = None self.DomainPort = None self.DeleteTimestamp = None def _deserialize(self, params): self.AccessType = params.get("AccessType") self.RouteId = params.get("RouteId") self.VipType = params.get("VipType") if params.get("VipList") is not None: self.VipList = [] for item in params.get("VipList"): obj = VipEntity() obj._deserialize(item) self.VipList.append(obj) self.Domain = params.get("Domain") self.DomainPort = params.get("DomainPort") self.DeleteTimestamp = params.get("DeleteTimestamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class RouteResponse(AbstractModel): """路由信息返回对象 """ def __init__(self): r""" :param Routers: 路由信息列表 注意:此字段可能返回 null,表示取不到有效值。 :type Routers: list of Route """ self.Routers = None def _deserialize(self, params): if params.get("Routers") is not None: self.Routers = [] for item in params.get("Routers"): obj = Route() obj._deserialize(item) self.Routers.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class RowParam(AbstractModel): """数据处理ROW输出格式配置 """ def __init__(self): r""" :param RowContent: 行内容,KEY_VALUE,VALUE :type RowContent: str :param KeyValueDelimiter: key和value间的分隔符 注意:此字段可能返回 null,表示取不到有效值。 :type KeyValueDelimiter: str :param EntryDelimiter: 元素建的分隔符 注意:此字段可能返回 null,表示取不到有效值。 :type EntryDelimiter: str """ self.RowContent = None self.KeyValueDelimiter = None self.EntryDelimiter = None def _deserialize(self, params): self.RowContent = params.get("RowContent") self.KeyValueDelimiter = params.get("KeyValueDelimiter") self.EntryDelimiter = params.get("EntryDelimiter") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SMTParam(AbstractModel): """数据处理——数据处理参数 """ def __init__(self): r""" :param Key: 数据处理KEY :type Key: str :param Operate: 操作,DATE系统预设-时间戳,CUSTOMIZE自定义,MAPPING映射,JSONPATH :type Operate: str :param SchemeType: 数据类型,ORIGINAL原始,STRING,INT64,FLOAT64,BOOLEAN,MAP,ARRAY :type SchemeType: str :param Value: 数据处理VALUE 注意:此字段可能返回 null,表示取不到有效值。 :type Value: str :param ValueOperate: VALUE处理 注意:此字段可能返回 null,表示取不到有效值。 :type ValueOperate: :class:`tencentcloud.ckafka.v20190819.models.ValueParam` :param OriginalValue: 原始VALUE 注意:此字段可能返回 null,表示取不到有效值。 :type OriginalValue: str :param ValueOperates: VALUE处理链 注意:此字段可能返回 null,表示取不到有效值。 :type ValueOperates: list of ValueParam """ self.Key = None self.Operate = None self.SchemeType = None self.Value = None self.ValueOperate = None self.OriginalValue = None self.ValueOperates = None def _deserialize(self, params): self.Key = params.get("Key") self.Operate = params.get("Operate") self.SchemeType = params.get("SchemeType") self.Value = params.get("Value") if params.get("ValueOperate") is not None: self.ValueOperate = ValueParam() self.ValueOperate._deserialize(params.get("ValueOperate")) self.OriginalValue = params.get("OriginalValue") if params.get("ValueOperates") is not None: self.ValueOperates = [] for item in params.get("ValueOperates"): obj = ValueParam() obj._deserialize(item) self.ValueOperates.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SQLServerConnectParam(AbstractModel): """SQLServer连接源参数 """ def __init__(self): r""" :param Port: SQLServer的连接port 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param UserName: SQLServer连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: SQLServer连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param Resource: SQLServer连接源的实例资源 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param ServiceVip: SQLServer连接源的实例vip,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: SQLServer连接源的vpcId,当为腾讯云实例时,必填 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param IsUpdate: 是否更新到关联的Dip任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Port = None self.UserName = None self.Password = None self.Resource = None self.ServiceVip = None self.UniqVpcId = None self.IsUpdate = None def _deserialize(self, params): self.Port = params.get("Port") self.UserName = params.get("UserName") self.Password = params.get("Password") self.Resource = params.get("Resource") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SQLServerModifyConnectParam(AbstractModel): """SQLServer修改连接源参数 """ def __init__(self): r""" :param Resource: SQLServer连接源的实例资源【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Resource: str :param Port: SQLServer的连接port【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type Port: int :param ServiceVip: SQLServer连接源的实例vip【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type ServiceVip: str :param UniqVpcId: SQLServer连接源的vpcId【不支持修改】 注意:此字段可能返回 null,表示取不到有效值。 :type UniqVpcId: str :param UserName: SQLServer连接源的用户名 注意:此字段可能返回 null,表示取不到有效值。 :type UserName: str :param Password: SQLServer连接源的密码 注意:此字段可能返回 null,表示取不到有效值。 :type Password: str :param IsUpdate: 是否更新到关联的Dip任务 注意:此字段可能返回 null,表示取不到有效值。 :type IsUpdate: bool """ self.Resource = None self.Port = None self.ServiceVip = None self.UniqVpcId = None self.UserName = None self.Password = None self.IsUpdate = None def _deserialize(self, params): self.Resource = params.get("Resource") self.Port = params.get("Port") self.ServiceVip = params.get("ServiceVip") self.UniqVpcId = params.get("UniqVpcId") self.UserName = params.get("UserName") self.Password = params.get("Password") self.IsUpdate = params.get("IsUpdate") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SQLServerParam(AbstractModel): """SQLServer类型入参 """ def __init__(self): r""" :param Database: SQLServer的数据库名称 :type Database: str :param Table: SQLServer的数据表名称,"*"为所监听的所有数据库中的非系统表,可以","间隔,监听多个数据表,但数据表需要以"数据库名.数据表名"的格式进行填写 :type Table: str :param Resource: 该SQLServer在连接管理内的Id :type Resource: str :param SnapshotMode: 复制存量信息(schema_only增量, initial全量),默认为initial :type SnapshotMode: str """ self.Database = None self.Table = None self.Resource = None self.SnapshotMode = None def _deserialize(self, params): self.Database = params.get("Database") self.Table = params.get("Table") self.Resource = params.get("Resource") self.SnapshotMode = params.get("SnapshotMode") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SaleInfo(AbstractModel): """标准版销售信息 """ def __init__(self): r""" :param Flag: 手动设置的flag标志 注意:此字段可能返回 null,表示取不到有效值。 :type Flag: bool :param Version: ckakfa版本号(1.1.1/2.4.2/0.10.2) 注意:此字段可能返回 null,表示取不到有效值。 :type Version: str :param Platform: 专业版、标准版标志 注意:此字段可能返回 null,表示取不到有效值。 :type Platform: str :param SoldOut: 售罄标志:true售罄 注意:此字段可能返回 null,表示取不到有效值。 :type SoldOut: bool """ self.Flag = None self.Version = None self.Platform = None self.SoldOut = None def _deserialize(self, params): self.Flag = params.get("Flag") self.Version = params.get("Version") self.Platform = params.get("Platform") self.SoldOut = params.get("SoldOut") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SecondaryAnalyseParam(AbstractModel): """数据处理——二次解析参数 """ def __init__(self): r""" :param Regex: 分隔符 :type Regex: str """ self.Regex = None def _deserialize(self, params): self.Regex = params.get("Regex") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SendMessageRequest(AbstractModel): """SendMessage请求参数结构体 """ def __init__(self): r""" :param DataHubId: DataHub接入ID :type DataHubId: str :param Message: 发送消息内容(单次请求最多500条) :type Message: list of BatchContent """ self.DataHubId = None self.Message = None def _deserialize(self, params): self.DataHubId = params.get("DataHubId") if params.get("Message") is not None: self.Message = [] for item in params.get("Message"): obj = BatchContent() obj._deserialize(item) self.Message.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SendMessageResponse(AbstractModel): """SendMessage返回参数结构体 """ def __init__(self): r""" :param MessageId: 消息ID列表 :type MessageId: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.MessageId = None self.RequestId = None def _deserialize(self, params): self.MessageId = params.get("MessageId") self.RequestId = params.get("RequestId") class SplitParam(AbstractModel): """值支持一拆多,即将一个值拆为一个数组 """ def __init__(self): r""" :param Regex: 分隔符 :type Regex: str """ self.Regex = None def _deserialize(self, params): self.Regex = params.get("Regex") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubscribedInfo(AbstractModel): """订阅信息实体 """ def __init__(self): r""" :param TopicName: 订阅的主题名 :type TopicName: str :param Partition: 订阅的分区 注意:此字段可能返回 null,表示取不到有效值。 :type Partition: list of int :param PartitionOffset: 分区offset信息 注意:此字段可能返回 null,表示取不到有效值。 :type PartitionOffset: list of PartitionOffset :param TopicId: 订阅的主题ID 注意:此字段可能返回 null,表示取不到有效值。 :type TopicId: str """ self.TopicName = None self.Partition = None self.PartitionOffset = None self.TopicId = None def _deserialize(self, params): self.TopicName = params.get("TopicName") self.Partition = params.get("Partition") if params.get("PartitionOffset") is not None: self.PartitionOffset = [] for item in params.get("PartitionOffset"): obj = PartitionOffset() obj._deserialize(item) self.PartitionOffset.append(obj) self.TopicId = params.get("TopicId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class SubstrParam(AbstractModel): """数据处理——Value处理参数——截取参数 """ def __init__(self): r""" :param Start: 截取起始位置 :type Start: int :param End: 截取截止位置 :type End: int """ self.Start = None self.End = None def _deserialize(self, params): self.Start = params.get("Start") self.End = params.get("End") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TableMapping(AbstractModel): """Table、Topic路由 """ def __init__(self): r""" :param Database: 库名 :type Database: str :param Table: 表名,多个表,(逗号)隔开 :type Table: str :param Topic: Topic名称 :type Topic: str :param TopicId: Topic ID :type TopicId: str """ self.Database = None self.Table = None self.Topic = None self.TopicId = None def _deserialize(self, params): self.Database = params.get("Database") self.Table = params.get("Table") self.Topic = params.get("Topic") self.TopicId = params.get("TopicId") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Tag(AbstractModel): """实例详情中的标签对象 """ def __init__(self): r""" :param TagKey: 标签的key :type TagKey: str :param TagValue: 标签的值 :type TagValue: str """ self.TagKey = None self.TagValue = None def _deserialize(self, params): self.TagKey = params.get("TagKey") self.TagValue = params.get("TagValue") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TdwParam(AbstractModel): """Tdw类型入参 """ def __init__(self): r""" :param Bid: Tdw的bid :type Bid: str :param Tid: Tdw的tid :type Tid: str :param IsDomestic: 默认true :type IsDomestic: bool :param TdwHost: TDW地址,默认tl-tdbank-tdmanager.tencent-distribute.com :type TdwHost: str :param TdwPort: TDW端口,默认8099 :type TdwPort: int """ self.Bid = None self.Tid = None self.IsDomestic = None self.TdwHost = None self.TdwPort = None def _deserialize(self, params): self.Bid = params.get("Bid") self.Tid = params.get("Tid") self.IsDomestic = params.get("IsDomestic") self.TdwHost = params.get("TdwHost") self.TdwPort = params.get("TdwPort") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class Topic(AbstractModel): """返回的topic对象 """ def __init__(self): r""" :param TopicId: 主题的ID :type TopicId: str :param TopicName: 主题的名称 :type TopicName: str :param Note: 备注 注意:此字段可能返回 null,表示取不到有效值。 :type Note: str """ self.TopicId = None self.TopicName = None self.Note = None def _deserialize(self, params): self.TopicId = params.get("TopicId") self.TopicName = params.get("TopicName") self.Note = params.get("Note") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicAttributesResponse(AbstractModel): """主题属性返回结果实体 """ def __init__(self): r""" :param TopicId: 主题 ID :type TopicId: str :param CreateTime: 创建时间 :type CreateTime: int :param Note: 主题备注 注意:此字段可能返回 null,表示取不到有效值。 :type Note: str :param PartitionNum: 分区个数 :type PartitionNum: int :param EnableWhiteList: IP 白名单开关,1:打开; 0:关闭 :type EnableWhiteList: int :param IpWhiteList: IP 白名单列表 :type IpWhiteList: list of str :param Config: topic 配置数组 :type Config: :class:`tencentcloud.ckafka.v20190819.models.Config` :param Partitions: 分区详情 :type Partitions: list of TopicPartitionDO :param EnableAclRule: ACL预设策略开关,1:打开; 0:关闭 注意:此字段可能返回 null,表示取不到有效值。 :type EnableAclRule: int :param AclRuleList: 预设策略列表 注意:此字段可能返回 null,表示取不到有效值。 :type AclRuleList: list of AclRule :param QuotaConfig: topic 限流策略 注意:此字段可能返回 null,表示取不到有效值。 :type QuotaConfig: :class:`tencentcloud.ckafka.v20190819.models.InstanceQuotaConfigResp` """ self.TopicId = None self.CreateTime = None self.Note = None self.PartitionNum = None self.EnableWhiteList = None self.IpWhiteList = None self.Config = None self.Partitions = None self.EnableAclRule = None self.AclRuleList = None self.QuotaConfig = None def _deserialize(self, params): self.TopicId = params.get("TopicId") self.CreateTime = params.get("CreateTime") self.Note = params.get("Note") self.PartitionNum = params.get("PartitionNum") self.EnableWhiteList = params.get("EnableWhiteList") self.IpWhiteList = params.get("IpWhiteList") if params.get("Config") is not None: self.Config = Config() self.Config._deserialize(params.get("Config")) if params.get("Partitions") is not None: self.Partitions = [] for item in params.get("Partitions"): obj = TopicPartitionDO() obj._deserialize(item) self.Partitions.append(obj) self.EnableAclRule = params.get("EnableAclRule") if params.get("AclRuleList") is not None: self.AclRuleList = [] for item in params.get("AclRuleList"): obj = AclRule() obj._deserialize(item) self.AclRuleList.append(obj) if params.get("QuotaConfig") is not None: self.QuotaConfig = InstanceQuotaConfigResp() self.QuotaConfig._deserialize(params.get("QuotaConfig")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicDetail(AbstractModel): """主题详情 """ def __init__(self): r""" :param TopicName: 主题名称 :type TopicName: str :param TopicId: 主题ID :type TopicId: str :param PartitionNum: 分区数 :type PartitionNum: int :param ReplicaNum: 副本数 :type ReplicaNum: int :param Note: 备注 注意:此字段可能返回 null,表示取不到有效值。 :type Note: str :param CreateTime: 创建时间 :type CreateTime: int :param EnableWhiteList: 是否开启ip鉴权白名单,true表示开启,false表示不开启 :type EnableWhiteList: bool :param IpWhiteListCount: ip白名单中ip个数 :type IpWhiteListCount: int :param ForwardCosBucket: 数据备份cos bucket: 转存到cos 的bucket地址 注意:此字段可能返回 null,表示取不到有效值。 :type ForwardCosBucket: str :param ForwardStatus: 数据备份cos 状态: 1 不开启数据备份,0 开启数据备份 :type ForwardStatus: int :param ForwardInterval: 数据备份到cos的周期频率 :type ForwardInterval: int :param Config: 高级配置 注意:此字段可能返回 null,表示取不到有效值。 :type Config: :class:`tencentcloud.ckafka.v20190819.models.Config` :param RetentionTimeConfig: 消息保留时间配置(用于动态配置变更记录) 注意:此字段可能返回 null,表示取不到有效值。 :type RetentionTimeConfig: :class:`tencentcloud.ckafka.v20190819.models.TopicRetentionTimeConfigRsp` :param Status: 0:正常,1:已删除,2:删除中 注意:此字段可能返回 null,表示取不到有效值。 :type Status: int """ self.TopicName = None self.TopicId = None self.PartitionNum = None self.ReplicaNum = None self.Note = None self.CreateTime = None self.EnableWhiteList = None self.IpWhiteListCount = None self.ForwardCosBucket = None self.ForwardStatus = None self.ForwardInterval = None self.Config = None self.RetentionTimeConfig = None self.Status = None def _deserialize(self, params): self.TopicName = params.get("TopicName") self.TopicId = params.get("TopicId") self.PartitionNum = params.get("PartitionNum") self.ReplicaNum = params.get("ReplicaNum") self.Note = params.get("Note") self.CreateTime = params.get("CreateTime") self.EnableWhiteList = params.get("EnableWhiteList") self.IpWhiteListCount = params.get("IpWhiteListCount") self.ForwardCosBucket = params.get("ForwardCosBucket") self.ForwardStatus = params.get("ForwardStatus") self.ForwardInterval = params.get("ForwardInterval") if params.get("Config") is not None: self.Config = Config() self.Config._deserialize(params.get("Config")) if params.get("RetentionTimeConfig") is not None: self.RetentionTimeConfig = TopicRetentionTimeConfigRsp() self.RetentionTimeConfig._deserialize(params.get("RetentionTimeConfig")) self.Status = params.get("Status") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicDetailResponse(AbstractModel): """主题详情返回实体 """ def __init__(self): r""" :param TopicList: 返回的主题详情列表 注意:此字段可能返回 null,表示取不到有效值。 :type TopicList: list of TopicDetail :param TotalCount: 符合条件的所有主题详情数量 :type TotalCount: int """ self.TopicList = None self.TotalCount = None def _deserialize(self, params): if params.get("TopicList") is not None: self.TopicList = [] for item in params.get("TopicList"): obj = TopicDetail() obj._deserialize(item) self.TopicList.append(obj) self.TotalCount = params.get("TotalCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicInSyncReplicaInfo(AbstractModel): """topic副本及详细信息 """ def __init__(self): r""" :param Partition: 分区名称 :type Partition: str :param Leader: Leader Id :type Leader: int :param Replica: 副本集 :type Replica: str :param InSyncReplica: ISR :type InSyncReplica: str :param BeginOffset: 起始Offset 注意:此字段可能返回 null,表示取不到有效值。 :type BeginOffset: int :param EndOffset: 末端Offset 注意:此字段可能返回 null,表示取不到有效值。 :type EndOffset: int :param MessageCount: 消息数 注意:此字段可能返回 null,表示取不到有效值。 :type MessageCount: int :param OutOfSyncReplica: 未同步副本集 注意:此字段可能返回 null,表示取不到有效值。 :type OutOfSyncReplica: str """ self.Partition = None self.Leader = None self.Replica = None self.InSyncReplica = None self.BeginOffset = None self.EndOffset = None self.MessageCount = None self.OutOfSyncReplica = None def _deserialize(self, params): self.Partition = params.get("Partition") self.Leader = params.get("Leader") self.Replica = params.get("Replica") self.InSyncReplica = params.get("InSyncReplica") self.BeginOffset = params.get("BeginOffset") self.EndOffset = params.get("EndOffset") self.MessageCount = params.get("MessageCount") self.OutOfSyncReplica = params.get("OutOfSyncReplica") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicInSyncReplicaResult(AbstractModel): """Topic 副本及详情数据集合 """ def __init__(self): r""" :param TopicInSyncReplicaList: Topic详情及副本合集 :type TopicInSyncReplicaList: list of TopicInSyncReplicaInfo :param TotalCount: 总计个数 :type TotalCount: int """ self.TopicInSyncReplicaList = None self.TotalCount = None def _deserialize(self, params): if params.get("TopicInSyncReplicaList") is not None: self.TopicInSyncReplicaList = [] for item in params.get("TopicInSyncReplicaList"): obj = TopicInSyncReplicaInfo() obj._deserialize(item) self.TopicInSyncReplicaList.append(obj) self.TotalCount = params.get("TotalCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicParam(AbstractModel): """Topic配置 """ def __init__(self): r""" :param Resource: 单独售卖Topic的Topic名称 :type Resource: str :param OffsetType: Offset类型,最开始位置earliest,最新位置latest,时间点位置timestamp 注意:此字段可能返回 null,表示取不到有效值。 :type OffsetType: str :param StartTime: Offset类型为timestamp时必传,传时间戳,精确到秒 注意:此字段可能返回 null,表示取不到有效值。 :type StartTime: int :param TopicId: Topic的TopicId【出参】 注意:此字段可能返回 null,表示取不到有效值。 :type TopicId: str :param CompressionType: 写入Topic时是否进行压缩,不开启填"none",开启的话,可选择"gzip", "snappy", "lz4"中的一个进行填写。 注意:此字段可能返回 null,表示取不到有效值。 :type CompressionType: str :param UseAutoCreateTopic: 使用的Topic是否需要自动创建(目前只支持SOURCE流入任务) 注意:此字段可能返回 null,表示取不到有效值。 :type UseAutoCreateTopic: bool """ self.Resource = None self.OffsetType = None self.StartTime = None self.TopicId = None self.CompressionType = None self.UseAutoCreateTopic = None def _deserialize(self, params): self.Resource = params.get("Resource") self.OffsetType = params.get("OffsetType") self.StartTime = params.get("StartTime") self.TopicId = params.get("TopicId") self.CompressionType = params.get("CompressionType") self.UseAutoCreateTopic = params.get("UseAutoCreateTopic") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicPartitionDO(AbstractModel): """分区详情 """ def __init__(self): r""" :param Partition: Partition ID :type Partition: int :param LeaderStatus: Leader 运行状态 :type LeaderStatus: int :param IsrNum: ISR 个数 :type IsrNum: int :param ReplicaNum: 副本个数 :type ReplicaNum: int """ self.Partition = None self.LeaderStatus = None self.IsrNum = None self.ReplicaNum = None def _deserialize(self, params): self.Partition = params.get("Partition") self.LeaderStatus = params.get("LeaderStatus") self.IsrNum = params.get("IsrNum") self.ReplicaNum = params.get("ReplicaNum") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicResult(AbstractModel): """统一返回的TopicResponse """ def __init__(self): r""" :param TopicList: 返回的主题信息列表 注意:此字段可能返回 null,表示取不到有效值。 :type TopicList: list of Topic :param TotalCount: 符合条件的 topic 数量 注意:此字段可能返回 null,表示取不到有效值。 :type TotalCount: int """ self.TopicList = None self.TotalCount = None def _deserialize(self, params): if params.get("TopicList") is not None: self.TopicList = [] for item in params.get("TopicList"): obj = Topic() obj._deserialize(item) self.TopicList.append(obj) self.TotalCount = params.get("TotalCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicRetentionTimeConfigRsp(AbstractModel): """Topic消息保留时间配置返回信息 """ def __init__(self): r""" :param Expect: 期望值,即用户配置的Topic消息保留时间(单位分钟) 注意:此字段可能返回 null,表示取不到有效值。 :type Expect: int :param Current: 当前值,即当前生效值(可能存在动态调整,单位分钟) 注意:此字段可能返回 null,表示取不到有效值。 :type Current: int :param ModTimeStamp: 最近变更时间 注意:此字段可能返回 null,表示取不到有效值。 :type ModTimeStamp: int """ self.Expect = None self.Current = None self.ModTimeStamp = None def _deserialize(self, params): self.Expect = params.get("Expect") self.Current = params.get("Current") self.ModTimeStamp = params.get("ModTimeStamp") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TopicSubscribeGroup(AbstractModel): """DescribeTopicSubscribeGroup接口出参 """ def __init__(self): r""" :param TotalCount: 总数 :type TotalCount: int :param StatusCountInfo: 消费分组状态数量信息 :type StatusCountInfo: str :param GroupsInfo: 消费分组信息 注意:此字段可能返回 null,表示取不到有效值。 :type GroupsInfo: list of GroupInfoResponse :param Status: 此次请求是否异步的状态。实例里分组较少的会直接返回结果,Status为1。当分组较多时,会异步更新缓存,Status为0时不会返回分组信息,直至Status为1更新完毕返回结果。 注意:此字段可能返回 null,表示取不到有效值。 :type Status: int """ self.TotalCount = None self.StatusCountInfo = None self.GroupsInfo = None self.Status = None def _deserialize(self, params): self.TotalCount = params.get("TotalCount") self.StatusCountInfo = params.get("StatusCountInfo") if params.get("GroupsInfo") is not None: self.GroupsInfo = [] for item in params.get("GroupsInfo"): obj = GroupInfoResponse() obj._deserialize(item) self.GroupsInfo.append(obj) self.Status = params.get("Status") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TransformParam(AbstractModel): """数据处理参数 """ def __init__(self): r""" :param AnalysisFormat: 解析格式,JSON,DELIMITER分隔符,REGULAR正则提取 :type AnalysisFormat: str :param OutputFormat: 输出格式 :type OutputFormat: str :param FailureParam: 是否保留解析失败数据 :type FailureParam: :class:`tencentcloud.ckafka.v20190819.models.FailureParam` :param Content: 原始数据 :type Content: str :param SourceType: 数据来源,TOPIC从源topic拉取,CUSTOMIZE自定义 :type SourceType: str :param Regex: 分隔符、正则表达式 :type Regex: str :param MapParam: Map :type MapParam: list of MapParam :param FilterParam: 过滤器 :type FilterParam: list of FilterMapParam :param Result: 测试结果 注意:此字段可能返回 null,表示取不到有效值。 :type Result: str :param AnalyseResult: 解析结果 注意:此字段可能返回 null,表示取不到有效值。 :type AnalyseResult: list of MapParam :param UseEventBus: 底层引擎是否使用eb 注意:此字段可能返回 null,表示取不到有效值。 :type UseEventBus: bool """ self.AnalysisFormat = None self.OutputFormat = None self.FailureParam = None self.Content = None self.SourceType = None self.Regex = None self.MapParam = None self.FilterParam = None self.Result = None self.AnalyseResult = None self.UseEventBus = None def _deserialize(self, params): self.AnalysisFormat = params.get("AnalysisFormat") self.OutputFormat = params.get("OutputFormat") if params.get("FailureParam") is not None: self.FailureParam = FailureParam() self.FailureParam._deserialize(params.get("FailureParam")) self.Content = params.get("Content") self.SourceType = params.get("SourceType") self.Regex = params.get("Regex") if params.get("MapParam") is not None: self.MapParam = [] for item in params.get("MapParam"): obj = MapParam() obj._deserialize(item) self.MapParam.append(obj) if params.get("FilterParam") is not None: self.FilterParam = [] for item in params.get("FilterParam"): obj = FilterMapParam() obj._deserialize(item) self.FilterParam.append(obj) self.Result = params.get("Result") if params.get("AnalyseResult") is not None: self.AnalyseResult = [] for item in params.get("AnalyseResult"): obj = MapParam() obj._deserialize(item) self.AnalyseResult.append(obj) self.UseEventBus = params.get("UseEventBus") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class TransformsParam(AbstractModel): """数据处理参数 """ def __init__(self): r""" :param Content: 原始数据 :type Content: str :param FieldChain: 处理链 :type FieldChain: list of FieldParam :param FilterParam: 过滤器 注意:此字段可能返回 null,表示取不到有效值。 :type FilterParam: list of FilterMapParam :param FailureParam: 失败处理 注意:此字段可能返回 null,表示取不到有效值。 :type FailureParam: :class:`tencentcloud.ckafka.v20190819.models.FailureParam` :param Result: 测试结果 注意:此字段可能返回 null,表示取不到有效值。 :type Result: str :param SourceType: 数据来源 注意:此字段可能返回 null,表示取不到有效值。 :type SourceType: str :param OutputFormat: 输出格式,JSON,ROW,默认为JSON 注意:此字段可能返回 null,表示取不到有效值。 :type OutputFormat: str :param RowParam: 输出格式为ROW必填 注意:此字段可能返回 null,表示取不到有效值。 :type RowParam: :class:`tencentcloud.ckafka.v20190819.models.RowParam` :param KeepMetadata: 是否保留数据源Topic元数据信息(源Topic、Partition、Offset),默认为false 注意:此字段可能返回 null,表示取不到有效值。 :type KeepMetadata: bool """ self.Content = None self.FieldChain = None self.FilterParam = None self.FailureParam = None self.Result = None self.SourceType = None self.OutputFormat = None self.RowParam = None self.KeepMetadata = None def _deserialize(self, params): self.Content = params.get("Content") if params.get("FieldChain") is not None: self.FieldChain = [] for item in params.get("FieldChain"): obj = FieldParam() obj._deserialize(item) self.FieldChain.append(obj) if params.get("FilterParam") is not None: self.FilterParam = [] for item in params.get("FilterParam"): obj = FilterMapParam() obj._deserialize(item) self.FilterParam.append(obj) if params.get("FailureParam") is not None: self.FailureParam = FailureParam() self.FailureParam._deserialize(params.get("FailureParam")) self.Result = params.get("Result") self.SourceType = params.get("SourceType") self.OutputFormat = params.get("OutputFormat") if params.get("RowParam") is not None: self.RowParam = RowParam() self.RowParam._deserialize(params.get("RowParam")) self.KeepMetadata = params.get("KeepMetadata") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class UrlDecodeParam(AbstractModel): """Url解析 """ def __init__(self): r""" :param CharsetName: 编码 注意:此字段可能返回 null,表示取不到有效值。 :type CharsetName: str """ self.CharsetName = None def _deserialize(self, params): self.CharsetName = params.get("CharsetName") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class User(AbstractModel): """用户实体 """ def __init__(self): r""" :param UserId: 用户id :type UserId: int :param Name: 用户名称 :type Name: str :param CreateTime: 创建时间 :type CreateTime: str :param UpdateTime: 最后更新时间 :type UpdateTime: str """ self.UserId = None self.Name = None self.CreateTime = None self.UpdateTime = None def _deserialize(self, params): self.UserId = params.get("UserId") self.Name = params.get("Name") self.CreateTime = params.get("CreateTime") self.UpdateTime = params.get("UpdateTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class UserResponse(AbstractModel): """用户返回实体 """ def __init__(self): r""" :param Users: 符合条件的用户列表 注意:此字段可能返回 null,表示取不到有效值。 :type Users: list of User :param TotalCount: 符合条件的总用户数 :type TotalCount: int """ self.Users = None self.TotalCount = None def _deserialize(self, params): if params.get("Users") is not None: self.Users = [] for item in params.get("Users"): obj = User() obj._deserialize(item) self.Users.append(obj) self.TotalCount = params.get("TotalCount") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ValueParam(AbstractModel): """数据处理——Value处理参数 """ def __init__(self): r""" :param Type: 处理模式,REPLACE替换,SUBSTR截取,DATE日期转换,TRIM去除前后空格,REGEX_REPLACE正则替换,URL_DECODE,LOWERCASE转换为小写 :type Type: str :param Replace: 替换,TYPE=REPLACE时必传 注意:此字段可能返回 null,表示取不到有效值。 :type Replace: :class:`tencentcloud.ckafka.v20190819.models.ReplaceParam` :param Substr: 截取,TYPE=SUBSTR时必传 注意:此字段可能返回 null,表示取不到有效值。 :type Substr: :class:`tencentcloud.ckafka.v20190819.models.SubstrParam` :param Date: 时间转换,TYPE=DATE时必传 注意:此字段可能返回 null,表示取不到有效值。 :type Date: :class:`tencentcloud.ckafka.v20190819.models.DateParam` :param RegexReplace: 正则替换,TYPE=REGEX_REPLACE时必传 注意:此字段可能返回 null,表示取不到有效值。 :type RegexReplace: :class:`tencentcloud.ckafka.v20190819.models.RegexReplaceParam` :param Split: 值支持一拆多,TYPE=SPLIT时必传 注意:此字段可能返回 null,表示取不到有效值。 :type Split: :class:`tencentcloud.ckafka.v20190819.models.SplitParam` :param KV: key-value二次解析,TYPE=KV时必传 注意:此字段可能返回 null,表示取不到有效值。 :type KV: :class:`tencentcloud.ckafka.v20190819.models.KVParam` :param Result: 处理结果 注意:此字段可能返回 null,表示取不到有效值。 :type Result: str :param JsonPathReplace: JsonPath替换,TYPE=JSON_PATH_REPLACE时必传 注意:此字段可能返回 null,表示取不到有效值。 :type JsonPathReplace: :class:`tencentcloud.ckafka.v20190819.models.JsonPathReplaceParam` :param UrlDecode: Url解析 注意:此字段可能返回 null,表示取不到有效值。 :type UrlDecode: :class:`tencentcloud.ckafka.v20190819.models.UrlDecodeParam` """ self.Type = None self.Replace = None self.Substr = None self.Date = None self.RegexReplace = None self.Split = None self.KV = None self.Result = None self.JsonPathReplace = None self.UrlDecode = None def _deserialize(self, params): self.Type = params.get("Type") if params.get("Replace") is not None: self.Replace = ReplaceParam() self.Replace._deserialize(params.get("Replace")) if params.get("Substr") is not None: self.Substr = SubstrParam() self.Substr._deserialize(params.get("Substr")) if params.get("Date") is not None: self.Date = DateParam() self.Date._deserialize(params.get("Date")) if params.get("RegexReplace") is not None: self.RegexReplace = RegexReplaceParam() self.RegexReplace._deserialize(params.get("RegexReplace")) if params.get("Split") is not None: self.Split = SplitParam() self.Split._deserialize(params.get("Split")) if params.get("KV") is not None: self.KV = KVParam() self.KV._deserialize(params.get("KV")) self.Result = params.get("Result") if params.get("JsonPathReplace") is not None: self.JsonPathReplace = JsonPathReplaceParam() self.JsonPathReplace._deserialize(params.get("JsonPathReplace")) if params.get("UrlDecode") is not None: self.UrlDecode = UrlDecodeParam() self.UrlDecode._deserialize(params.get("UrlDecode")) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class VipEntity(AbstractModel): """虚拟IP实体 """ def __init__(self): r""" :param Vip: 虚拟IP :type Vip: str :param Vport: 虚拟端口 :type Vport: str """ self.Vip = None self.Vport = None def _deserialize(self, params): self.Vip = params.get("Vip") self.Vport = params.get("Vport") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ZoneInfo(AbstractModel): """zone信息实体 """ def __init__(self): r""" :param ZoneId: zone的id :type ZoneId: str :param IsInternalApp: 是否内部APP :type IsInternalApp: int :param AppId: app id :type AppId: int :param Flag: 标识 :type Flag: bool :param ZoneName: zone名称 :type ZoneName: str :param ZoneStatus: zone状态 :type ZoneStatus: int :param Exflag: 额外标识 :type Exflag: str :param SoldOut: json对象,key为机型,value true为售罄,false为未售罄 :type SoldOut: str :param SalesInfo: 标准版售罄信息 注意:此字段可能返回 null,表示取不到有效值。 :type SalesInfo: list of SaleInfo """ self.ZoneId = None self.IsInternalApp = None self.AppId = None self.Flag = None self.ZoneName = None self.ZoneStatus = None self.Exflag = None self.SoldOut = None self.SalesInfo = None def _deserialize(self, params): self.ZoneId = params.get("ZoneId") self.IsInternalApp = params.get("IsInternalApp") self.AppId = params.get("AppId") self.Flag = params.get("Flag") self.ZoneName = params.get("ZoneName") self.ZoneStatus = params.get("ZoneStatus") self.Exflag = params.get("Exflag") self.SoldOut = params.get("SoldOut") if params.get("SalesInfo") is not None: self.SalesInfo = [] for item in params.get("SalesInfo"): obj = SaleInfo() obj._deserialize(item) self.SalesInfo.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class ZoneResponse(AbstractModel): """查询kafka的zone信息返回的实体 """ def __init__(self): r""" :param ZoneList: zone列表 :type ZoneList: list of ZoneInfo :param MaxBuyInstanceNum: 最大购买实例个数 :type MaxBuyInstanceNum: int :param MaxBandwidth: 最大购买带宽 单位Mb/s :type MaxBandwidth: int :param UnitPrice: 后付费单位价格 :type UnitPrice: :class:`tencentcloud.ckafka.v20190819.models.Price` :param MessagePrice: 后付费消息单价 :type MessagePrice: :class:`tencentcloud.ckafka.v20190819.models.Price` :param ClusterInfo: 用户独占集群信息 注意:此字段可能返回 null,表示取不到有效值。 :type ClusterInfo: list of ClusterInfo :param Standard: 购买标准版配置 注意:此字段可能返回 null,表示取不到有效值。 :type Standard: str :param StandardS2: 购买标准版S2配置 注意:此字段可能返回 null,表示取不到有效值。 :type StandardS2: str :param Profession: 购买专业版配置 注意:此字段可能返回 null,表示取不到有效值。 :type Profession: str :param Physical: 购买物理独占版配置 注意:此字段可能返回 null,表示取不到有效值。 :type Physical: str :param PublicNetwork: 公网带宽 注意:此字段可能返回 null,表示取不到有效值。 :type PublicNetwork: str :param PublicNetworkLimit: 公网带宽配置 注意:此字段可能返回 null,表示取不到有效值。 :type PublicNetworkLimit: str """ self.ZoneList = None self.MaxBuyInstanceNum = None self.MaxBandwidth = None self.UnitPrice = None self.MessagePrice = None self.ClusterInfo = None self.Standard = None self.StandardS2 = None self.Profession = None self.Physical = None self.PublicNetwork = None self.PublicNetworkLimit = None def _deserialize(self, params): if params.get("ZoneList") is not None: self.ZoneList = [] for item in params.get("ZoneList"): obj = ZoneInfo() obj._deserialize(item) self.ZoneList.append(obj) self.MaxBuyInstanceNum = params.get("MaxBuyInstanceNum") self.MaxBandwidth = params.get("MaxBandwidth") if params.get("UnitPrice") is not None: self.UnitPrice = Price() self.UnitPrice._deserialize(params.get("UnitPrice")) if params.get("MessagePrice") is not None: self.MessagePrice = Price() self.MessagePrice._deserialize(params.get("MessagePrice")) if params.get("ClusterInfo") is not None: self.ClusterInfo = [] for item in params.get("ClusterInfo"): obj = ClusterInfo() obj._deserialize(item) self.ClusterInfo.append(obj) self.Standard = params.get("Standard") self.StandardS2 = params.get("StandardS2") self.Profession = params.get("Profession") self.Physical = params.get("Physical") self.PublicNetwork = params.get("PublicNetwork") self.PublicNetworkLimit = params.get("PublicNetworkLimit") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set))
{ "content_hash": "c9c35ca71435a42f79e5b4ce10f803e4", "timestamp": "", "source": "github", "line_count": 11034, "max_line_length": 211, "avg_line_length": 32.07775965198478, "alnum_prop": 0.6067337955507337, "repo_name": "tzpBingo/github-trending", "id": "d20ea696644822019040916cd90d96d639eb121b", "size": "404200", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "codespace/python/tencentcloud/ckafka/v20190819/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "Go", "bytes": "11470" }, { "name": "HTML", "bytes": "1543" }, { "name": "Python", "bytes": "49985109" }, { "name": "Shell", "bytes": "18039" } ], "symlink_target": "" }
"""Run Google Data Loss Prevention API DeID. All input/output files should be on Google Cloud Storage. Requires Apache Beam client and Google Python API Client: pip install --upgrade apache_beam pip install --upgrade google-api-python-client """ from __future__ import absolute_import import collections import copy from datetime import datetime import io import json import logging import os import posixpath import time import uuid import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions from apiclient import discovery from apiclient import errors from common import mae from common import unicodecsv import httplib2 try: import StringIO # pylint: disable=g-import-not-at-top except ImportError: pass CUSTOM_INFO_TYPES = 'customInfoTypes' DLP_FINDINGS_TIMESTAMP = 'dlp_findings_timestamp' DLP_DEID_TIMESTAMP = 'dlp_deid_timestamp' def _get_index(column_name, headers): """Return the position in the headers list where column_name appears.""" i = 0 for header in headers: if header['name'] == column_name: return i i += 1 return -1 def request_with_retry(fn, num_retries=5): """Makes a service request; and retries if needed.""" for attempt in range(num_retries): try: return fn() except errors.HttpError as error: if attempt == (num_retries - 1): # Give up after num_retries logging.error('last attempt failed. giving up.') raise elif (error.resp.status == 429 or (error.resp.status == 403 and error.resp.reason in ['userRateLimitExceeded', 'quotaExceeded'])): # 429 - Too Many Requests # 403 - Client Rate limit exceeded. Wait and retry. # 403 can also mean app authentication issue, so explicitly check # for rate limit error # https://developers.google.com/drive/web/handle-errors sleep_seconds = 5+2**attempt logging.warn( 'attempt %d failed with 403 or 429 error. retrying in %d sec...', attempt + 1, sleep_seconds) time.sleep(sleep_seconds) elif (error.resp.status == 400 and error.resp.reason == 'Bad Request' and 'Invalid info_type' in str(error)): raise Exception( str(error) + '\nEnsure you are using the correct deid_config_file.') elif (error.resp.status == 403 and error.resp.reason == 'Forbidden' and 'serviceusage.services.use' in str(error)): raise Exception( str(error) + '\nEnsure the service account specified in ' 'GOOGLE_APPLICATION_CREDENTIALS has the ' 'serviceusage.services.use permission.') elif error.resp.status in [500, 502, 503]: sleep_seconds = 10+2**attempt # 500, 503 - Service error. Wait and retry. # 502 - Bad Gateway. Wait and retry. logging.warn('attempt %d failed with 5xx error. retrying in %d sec...', attempt + 1, sleep_seconds) time.sleep(sleep_seconds) else: # Don't retry for client errors. logging.error('attempt %d failed. giving up.', attempt + 1) raise except httplib2.HttpLib2Error: # Don't retry for connection errors. logging.error('attempt %d failed. giving up.', attempt + 1) raise def get_deid_text(deid_response, pass_through_columns, target_columns, timestamp): """Get the de-id'd text from the deidentify() API call response.""" # Sample response for a request with a table as input: # {'item': {'table': { # 'headers': [{'name': 'note'}, {'name': 'first_name'}], # 'rows': [ # {'values': [{'stringValue': 'text'}, {'stringValue': 'Pat'}]} # ] # }}} response = {} for col in pass_through_columns: response[col['name']] = deid_response[col['name']] table = deid_response['item']['table'] for col in target_columns: i = _get_index(col['name'], table['headers']) val = '' if i >= 0 and table['rows']: val = table['rows'][0]['values'][i][col['type']] response[col['name']] = val timestamp = datetime.strftime(timestamp, '%Y-%m-%d %H:%M:%S') response[DLP_DEID_TIMESTAMP] = timestamp return response def _per_row_inspect_config(inspect_config, per_row_types, rows): """Return a copy of inspect_config with the given per-row types added.""" if not per_row_types: return inspect_config inspect_config = copy.deepcopy(inspect_config) if CUSTOM_INFO_TYPES not in inspect_config: inspect_config[CUSTOM_INFO_TYPES] = [] for per_row_type in per_row_types: column_name = per_row_type['columnName'] words = set() for row in rows: if column_name not in row: raise Exception( 'customInfoType column "{}" not found.'.format(column_name)) words.add(row[column_name]) inspect_config[CUSTOM_INFO_TYPES].append({ 'infoType': {'name': per_row_type['infoTypeName']}, 'dictionary': {'wordList': {'words': list(words)}} }) return inspect_config # Creates the 'item' field for a deid or inspect request, e.g.: # 'item': {'table': { # 'headers': [{'name': 'note'}, {'name': 'secondary note'}] # 'rows': [ { # {'values': [{'stringValue': 'text of the note'}, # {'stringValue': 'text of the secondary note'}]}, # {'values': [{'stringValue': 'row2 note text'}, # {'stringValue': 'row2 secondary note'}]} # } ] # }} def _create_item(target_columns, rows): """Creates the 'item' field for a deid or inspect request.""" table = {'headers': [], 'rows': []} for _ in rows: table['rows'].append({'values': []}) for col in target_columns: table['headers'].append({'name': col['name']}) for i in range(len(rows)): if col['name'] not in rows[i]: raise Exception('Expected column "{}" not found in row: "{}"'.format( col['name'], rows[i])) table['rows'][i]['values'].append({col['type']: rows[i][col['name']]}) return {'table': table} def _rebatch_deid(rows, project, deid_config, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name): """Call deid() twice with half the list each time and merge the result.""" half_size = int(len(rows) / 2) ret_a = deid(rows[:half_size], project, deid_config, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name) ret_b = deid(rows[half_size:], project, deid_config, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name) return ret_a + ret_b def deid(rows, project, deid_config, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name): """Put the data through the DLP API DeID method. Args: rows: A list of BigQuery rows with data to send to the DLP API. project: The project to set as the parent in the request. deid_config: DeidentifyConfig map, as defined in the DLP API: https://goo.gl/WrvsDB#DeidentifyTemplate.DeidentifyConfig inspect_config: inspectConfig map, as defined in the DLP API: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig pass_through_columns: List of strings; columns that should not be sent to the DLP API, but should still be included in the final output. target_columns: List of strings; columns that should be sent to the DLP API, and have the DLP API data included in the final output. per_row_types: List of objects representing columns that should be read and sent to the DLP API as custom infoTypes. dlp_api_name: Name of the DLP API to use (generally 'dlp', but may vary for testing purposes). Raises: Exception: If the request fails. Returns: A list of dicts (one per row) containing: - 'item': The 'item' element of the result from the DLP API call. - An entry for each pass-through column. """ dlp = discovery.build(dlp_api_name, 'v2', cache_discovery=False) projects = dlp.projects() content = projects.content() inspect_config = _per_row_inspect_config(inspect_config, per_row_types, rows) req_body = { 'deidentifyConfig': deid_config, 'inspectConfig': inspect_config, # Include pass-through columns as target columns here so they can be used # as the context field for a transformation. We will re-write the response # so they contain the original data. 'item': _create_item(pass_through_columns + target_columns, rows) } parent = 'projects/{0}'.format(project) try: response = request_with_retry( content.deidentify(body=req_body, parent=parent).execute) except errors.HttpError as error: try: error_json = json.loads(error.content) except (TypeError, ValueError): logging.error('Unable to parse JSON from deidentify HttpError content: ' '%s', error) if (error.resp.status != 400 or 'Retry with a smaller request.' not in error_json['error']['message'] or len(rows) == 1): raise error logging.warning('Batch deid() request too large (%s rows). ' 'Retrying as two smaller batches.', len(rows)) return _rebatch_deid(rows, project, deid_config, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name) if 'error' in response: raise Exception('Deidentify() failed: {}'.format(response['error'])) if 'overview' in response: if 'transformationSummaries' in response['overview']: for summary in response['overview']['transformationSummaries']: if 'results' in summary: for result in summary['results']: if 'code' in result and result['code'] == 'ERROR': raise Exception( 'Deidentify() failed: {}: "{}"\n\nFull response:\n{}'.format( summary['field']['name'], result['details'], response)) retvals = [] for i in range(len(rows)): response_row = response['item']['table']['rows'][i] item = {'table': {'headers': response['item']['table']['headers'], 'rows': [response_row]}} ret = {'item': item} for col in pass_through_columns: ret[col['name']] = rows[i][col['name']] retvals.append(ret) return retvals def _rebatch_inspect(rows, project, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name): """Call inspect() twice with half the list each time and merge the result.""" half_size = int(len(rows) / 2) ret_a = inspect(rows[:half_size], project, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name) ret_b = inspect(rows[half_size:], project, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name) # Merge ret_b into ret_a and adjust the row indexes up accordingly. for retval in ret_b: if 'findings' in retval['result']: for finding in retval['result']['findings']: index = 0 # More complicated types, like an image within a pdf, may have multiple # contentLocations, but our simple table will only have one. content_location = finding['location']['contentLocations'][0] table_location = content_location['recordLocation']['tableLocation'] if 'rowIndex' in table_location: index = int(table_location['rowIndex']) table_location['rowIndex'] = index + half_size ret_a.append(retval) return ret_a def inspect(rows, project, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name): """Put the data through the DLP API inspect method. Args: rows: A list of BigQuery rows with data to send to the DLP API. project: The project to set as the parent in the request. inspect_config: inspectConfig map, as defined in the DLP API: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig pass_through_columns: List of strings; columns that should not be sent to the DLP API, but should still be included in the final output. target_columns: List of strings; columns that should be sent to the DLP API, and have the DLP API data included in the final output. per_row_types: List of objects representing columns that should be read and sent to the DLP API as custom infoTypes. dlp_api_name: Name of the DLP API to use (generally 'dlp', but may vary for testing purposes). Raises: Exception: If the request fails. Returns: A list of dicts (one per row) containing: - 'result': The result from the DLP API call. - 'original_note': The original note, to be used in generating MAE output. - An entry for each pass-through column. """ dlp = discovery.build(dlp_api_name, 'v2', cache_discovery=False) projects = dlp.projects() content = projects.content() inspect_config = _per_row_inspect_config(inspect_config, per_row_types, rows) req_body = { 'inspectConfig': inspect_config, 'item': _create_item(target_columns, rows) } parent = 'projects/{0}'.format(project) response = request_with_retry( content.inspect(body=req_body, parent=parent).execute) truncated = 'findingsTruncated' if truncated in response['result'] and response['result'][truncated]: if len(rows) == 1: raise Exception('Inspect() failed; too many findings (> %s).' % len(response['result']['findings'])) logging.warning('Batch inspect() request too large (%s rows). ' 'Retrying as two smaller batches.', len(rows)) return _rebatch_inspect( rows, project, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name) if 'error' in response: raise Exception('Inspect() failed: {}'.format(response['error'])) retvals = [] for row in rows: ret = {'result': {'findings': []}} # Pass the original note along for use in MAE output. if len(target_columns) == 1: ret['original_note'] = row[target_columns[0]['name']] for col in pass_through_columns: if col['name'] not in row: raise Exception( 'Expected column "{}" not found in row: "{}". Adjust your input ' 'table or the "columns" section of your config file.'.format( col['name'], row)) ret[col['name']] = row[col['name']] retvals.append(ret) if 'findings' in response['result']: for finding in response['result']['findings']: # More complicated types, like an image within a pdf, may have multiple # contentLocations, but our simple table will only have one. content_location = finding['location']['contentLocations'][0] table_location = content_location['recordLocation']['tableLocation'] if not table_location: retvals[0]['result']['findings'].append(finding) else: index = int(table_location['rowIndex']) retvals[index]['result']['findings'].append(finding) return retvals def format_findings(inspect_result, pass_through_columns, timestamp): ret = {'findings': json.dumps(inspect_result['result'])} for col in pass_through_columns: ret[col['name']] = inspect_result[col['name']] ret[DLP_FINDINGS_TIMESTAMP] = datetime.strftime(timestamp, '%Y-%m-%d %H:%M:%S') return ret def split_gcs_name(gcs_path): bucket = gcs_path.split('/')[2] blob = gcs_path[len('gs://') + len(bucket) + 1 : ] return bucket, blob def mae_to_bq_row(mae_result): return {'record_id': mae_result.record_id, 'xml': mae_result.mae_xml} def write_mae(mae_result, storage_client_fn, mae_dir): """Write the MAE results to GCS.""" storage_client = storage_client_fn() filename = '{}.xml'.format(mae_result.record_id) bucket_name, blob_dir = split_gcs_name(mae_dir) bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(posixpath.join(blob_dir, filename)) blob.upload_from_string(mae_result.mae_xml) def _write_dtd_to_gcs(storage_client_fn, outdir, mae_tag_categories, task_name): """Write the DTD config file.""" storage_client = storage_client_fn() dtd_contents = mae.generate_dtd(mae_tag_categories, task_name) bucket_name, blob_dir = split_gcs_name(outdir) bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(posixpath.join(blob_dir, 'classification.dtd')) blob.upload_from_string(dtd_contents) def _write_dtd(storage_client_fn, outdir, mae_tag_categories, task_name): if outdir.startswith('gs://'): return _write_dtd_to_gcs( storage_client_fn, outdir, mae_tag_categories, task_name) with open(os.path.join(outdir, 'classification.dtd'), 'w') as f: f.write(mae.generate_dtd(mae_tag_categories, task_name)) def _is_custom_type(type_name, per_row_types, inspect_config): for custom_type in per_row_types: if custom_type['infoTypeName'] == type_name: return True if CUSTOM_INFO_TYPES in inspect_config: for custom_type in inspect_config[CUSTOM_INFO_TYPES]: if custom_type['infoType']['name'] == type_name: return True return False def _find_transformation(info_type_transformations, target_info_type): for transformation in info_type_transformations: for info_type in transformation['infoTypes']: if info_type['name'] == target_info_type: return transformation raise Exception('No transformation specified for infoType %s' % target_info_type) def _get_transforms_for_types(info_type_transformations, info_types): """Get the transformations that apply to the given types.""" included_types = set() transforms = [] for info_type in info_types: if info_type in included_types: continue transformation = copy.deepcopy( _find_transformation(info_type_transformations, info_type)) # Remove all non-specified infoTypes from the transformation. transformation['infoTypes'] = [ it for it in transformation['infoTypes'] if it['name'] in info_types] transforms.append(transformation) for info_type in transformation['infoTypes']: included_types.add(info_type['name']) return transforms def _generate_deid_config(info_type_transformations, target_columns, config_field_transformations): """Generate the deidentifyConfig for the deidentify API calls. The generated config contains a RecordTransformations.FieldTransformation (https://goo.gl/WrvsDB#DeidentifyTemplate.FieldTransformation) for each column in target_columns, where the transformation is the list of all the transformations in info_type_transformations which match the infoTypes specified for that column, or all the transformations if no infoTypes are specified. Args: info_type_transformations: The "infoTypeTransformations" list from the config. target_columns: The "columns.inspect" list from the config file. config_field_transformations: The "fieldTransformations" list from the config. Returns: A DeidentifyConfig. """ if not info_type_transformations: return {} # Include all the field transformations in the config, then add field # transformations containing the relevant infoType transformations for each # target column. field_transformations = list(config_field_transformations) fields_using_all_info_types = set() for col in target_columns: if 'infoTypesToDeId' not in col: fields_using_all_info_types.add(col['name']) continue info_type_transforms = [] info_type_transforms = _get_transforms_for_types( info_type_transformations, col['infoTypesToDeId']) field_transformations.append( {'fields': [{'name': col['name']}], 'infoTypeTransformations': {'transformations': info_type_transforms}}) # Columns which have a fieldTransformation specified in the config should not # have any other transformations applied to them. for transform in config_field_transformations: fields_using_all_info_types -= set([f['name'] for f in transform['fields']]) # All inspect columns that don't specify types are included together here and # will use all the transformations listed in the config. if fields_using_all_info_types: field_transformations.append( {'fields': [{'name': f} for f in fields_using_all_info_types], 'infoTypeTransformations': {'transformations': info_type_transformations}}) return {'recordTransformations': {'fieldTransformations': field_transformations}} def parse_config_file(deid_config_file): """Creates a json object out of a provided deid config file.""" with open(deid_config_file) as f: config_text = f.read() try: cfg = json.loads(config_text, object_pairs_hook=collections.OrderedDict) except (TypeError, ValueError): logging.error('JSON parsing of DeID config file failed.') raise Exception('Invalid JSON DeID Config.') return cfg def generate_configs(cfg, input_query=None, input_table=None, bq_client=None, bq_config_fn=None): """Generate DLP API configs based on the input config file.""" mae_tag_categories = {} per_row_types = [] key_columns = [] if 'tagCategories' in cfg: mae_tag_categories = cfg['tagCategories'] if 'keyColumns' in cfg: key_columns = cfg['keyColumns'] if 'perRowTypes' in cfg: per_row_types = cfg['perRowTypes'] inspect_config = {} per_dataset_types = [] if 'perDatasetTypes' in cfg: per_dataset_types = cfg['perDatasetTypes'] if bq_client: inspect_config[CUSTOM_INFO_TYPES] = _load_per_dataset_types( per_dataset_types, input_query, input_table, bq_client, bq_config_fn) if CUSTOM_INFO_TYPES in cfg: if CUSTOM_INFO_TYPES not in inspect_config: inspect_config[CUSTOM_INFO_TYPES] = [] for custom_info_type in cfg[CUSTOM_INFO_TYPES]: inspect_config[CUSTOM_INFO_TYPES].append(custom_info_type) # Generate an inspectConfig based on all the infoTypes listed in the deid # config's transformations. field_transformations = [] if 'fieldTransformations' in cfg: field_transformations = cfg['fieldTransformations'] info_type_transformations = [] if 'infoTypeTransformations' in cfg: info_type_transformations = cfg['infoTypeTransformations'] info_types = set() for transformation in info_type_transformations: for t in transformation['infoTypes']: # Don't include custom infoTypes in the inspect config or the DLP API # will complain. if _is_custom_type(t['name'], per_row_types, inspect_config): continue info_types.add(t['name']) inspect_config['infoTypes'] = [{'name': t} for t in info_types] if 'experimentalConfig' in cfg: inspect_config['alphiConfig'] = {'model': cfg['experimentalConfig']} if 'columns' not in cfg: raise Exception('Required section "columns" not specified in config.') if 'inspect' not in cfg['columns']: raise Exception('Required section "columns.inspect" not specified in ' 'config.') target_columns = cfg['columns']['inspect'] pass_through_columns = [] if 'passThrough' in cfg['columns']: pass_through_columns = cfg['columns']['passThrough'] field_transform_columns = [] if 'fieldTransform' in cfg['columns']: field_transform_columns = cfg['columns']['fieldTransform'] deid_config = _generate_deid_config(info_type_transformations, target_columns, field_transformations) return (inspect_config, deid_config, mae_tag_categories, key_columns, per_row_types, pass_through_columns, target_columns, field_transform_columns) def _load_per_dataset_types(per_dataset_cfg, input_query, input_table, bq_client, bq_config_fn): """Load data that applies to the whole dataset as custom info types.""" if not input_query and not input_table: return [] custom_info_types = [] saved_query_objects = [] old_api = hasattr(bq_client, 'run_async_query') # Generate the query based on the config options. for type_config in per_dataset_cfg: query = '' if 'bqQuery' in type_config: query = type_config['bqQuery'] elif 'bqTable' in type_config or input_table: table = input_table if 'bqTable' in type_config: table = type_config['bqTable'] columns = [t['columnName'] for t in type_config['infoTypes']] query = 'SELECT %s FROM [%s]' % ( ','.join(columns), table.replace(':', '.')) else: query = input_query query_job = None if old_api: query_job = bq_client.run_async_query(str(uuid.uuid4()), query) query_job.begin() else: job_config = bq_config_fn() job_config.use_legacy_sql = True query_job = bq_client.query(query, job_config=job_config) saved_query_objects.append((query_job, type_config)) for query_job, type_config in saved_query_objects: if old_api: query_job.result() # Wait for the job to complete. query_job.destination.reload() results_table = query_job.destination.fetch_data() else: results_table = query_job.result() # Wait for the job to complete. # Read the results. field_indexes = {} if old_api: for info_type in type_config['infoTypes']: field_indexes[info_type['columnName']] = -1 i = 0 for entry in results_table.schema: if entry.name in field_indexes: field_indexes[entry.name] = i i += 1 type_to_words = collections.defaultdict(set) has_results = False for row in results_table: has_results = True if not old_api and not hasattr(row, 'get'): # Workaround for google-cloud-bigquery==0.28.0, which is the latest # version as of 2017-12-08. field_indexes = row._xxx_field_to_index # pylint: disable=protected-access for info_type in type_config['infoTypes']: column_name = info_type['columnName'] value = None if old_api or not hasattr(row, 'get'): value = row[field_indexes[column_name]] else: value = row.get(column_name) type_to_words[info_type['infoTypeName']].add(value) if not has_results: raise Exception('No results for query: "{0}"'.format(query_job.query)) # Generate custom info types based on the results. for info_type_name, words in type_to_words.items(): custom_info_types.append({ 'infoType': {'name': info_type_name}, 'dictionary': {'wordList': {'words': list(words)}} }) return custom_info_types # These functions take a BigQuery row from either before (old) or after (new) # google-cloud-bigquery v0.28 and convert it to a simple map from field name to # value. This allows us to minimize special handling for supporting both # versions, and is also necessary because the new Row object causes infinite # recursion when Dataflow attempts to encode it. def _convert_new_row(row): new_row = {} for field_name, value in row.items(): new_row[field_name] = value return new_row def _one_exists(objs): """Ensures only one object exists in the provided list.""" return 1 == len([obj for obj in objs if obj is not None]) def _convert_old_row(row, field_indexes): new_row = {} for field_name, index in sorted( list(field_indexes.items()), key=lambda x: x[1]): new_row[field_name] = row[index] return new_row def _generate_schema(columns): """Generate a BigQuery schema with the configured columns.""" m = {'stringValue': 'STRING', 'integerValue': 'INTEGER', 'floatValue': 'FLOAT', 'booleanValue': 'BOOLEAN', 'timestamp': 'TIMESTAMP'} segments = [] for col in columns: segments.append('{0}:{1}'.format(col['name'], m[col['type']])) return ', '.join(segments) def read_csv(p, csv_filename): """Read csv file to the row format expected by deid().""" rows = [] with open(csv_filename) as f: spamreader = unicodecsv.UnicodeReader(f) headers = [] for row in spamreader: if not headers: headers = row continue rowmap = {} for i in range(len(headers)): val = '' if i < len(row): val = row[i] rowmap[headers[i]] = val rows.append([rowmap]) return p | beam.Create(rows) def _to_line(rowmap, headers): stringio = None try: stringio = StringIO.StringIO() except NameError: stringio = io.StringIO() writer = unicodecsv.DictWriter(stringio, headers) writer.writerow(rowmap) return stringio.getvalue() def _get_reads(p, input_table, input_query, bq_client, bq_config_fn, batch_size): """Read data from BigQuery. Args: p: A beam.Pipeline object. input_table: Table to get BigQuery data from. Only one of this and input_query may be set. input_query: Query to get BigQuery data from. Only one of this and input_table may be set. bq_client: A bigquery.Client object. bq_config_fn: The bigquery.job.QueryJobConfig function. batch_size: How many rows to send to the DLP API in each request. If this is 1, we can use Beam's built-in BigQuerySource. Otherwise, we need to read directly from BigQuery and batch the rows together. Returns: A PCollection of rows from the given BigQuery input table or query. """ if batch_size == 1: bq = None if input_table: bq = beam.io.BigQuerySource(input_table) else: bq = beam.io.BigQuerySource(query=input_query) # Wrap each read in a list so it's identical to a batched read of size 1. return (p | 'read' >> beam.io.Read(bq) | 'wrap' >> beam.Map(lambda read: [read])) old_api = hasattr(bq_client, 'run_async_query') query = input_query or 'SELECT * FROM [%s]' % input_table.replace(':', '.') results_table = None field_indexes = {} if old_api: query_job = bq_client.run_async_query(str(uuid.uuid4()), query) query_job.begin() query_job.result() # Wait for the job to complete. query_job.destination.reload() results_table = query_job.destination.fetch_data() i = 0 for entry in results_table.schema: field_indexes[entry.name] = i i += 1 else: job_config = bq_config_fn() job_config.use_legacy_sql = True query_job = bq_client.query(query, job_config=job_config) results_table = query_job.result() buf = [] batched_rows = [] for row in results_table: if old_api: row = _convert_old_row(row, field_indexes) else: row = _convert_new_row(row) buf.append(row) if len(buf) >= batch_size: batched_rows.append(buf) buf = [] if buf: batched_rows.append(buf) return p | beam.Create(batched_rows) def run_pipeline(input_query, input_table, deid_table, findings_table, mae_dir, mae_table, deid_config_json, task_name, project, storage_client_fn, bq_client, bq_config_fn, dlp_api_name, batch_size, dtd_dir, input_csv, output_csv, timestamp, pipeline_args): """Read the records from BigQuery, DeID them, and write them to BigQuery.""" if not _one_exists([input_query, input_table, input_csv]) and not dtd_dir: return ['Exactly one of input method must be set.'] if not deid_config_json: return ['Must provide DeID Config.'] (inspect_config, deid_config, mae_tag_categories, key_columns, per_row_types, pass_through_columns, target_columns, field_transform_columns) = ( generate_configs(deid_config_json, input_query, input_table, bq_client, bq_config_fn)) if dtd_dir: _write_dtd(storage_client_fn, dtd_dir, mae_tag_categories, task_name) if not _one_exists([input_query, input_table, input_csv]): return [] if len(target_columns) > 1 and (mae_dir or mae_table): raise Exception( 'Cannot use --mae_dir or --mae_table when multiple columns are ' 'specified for "inspect" in the config file.') if mae_dir: for col in key_columns: if not [ptc for ptc in pass_through_columns if ptc['name'] == col]: raise Exception( 'Config file error: keyColumns has {}, which is not present in ' 'columns.passThrough". All key columns must be passed through ' 'un-transformed to allow for evals.'.format(col)) p = beam.Pipeline(options=PipelineOptions(pipeline_args)) if _one_exists([input_table, input_query]): reads = _get_reads(p, input_table, input_query, bq_client, bq_config_fn, batch_size) if input_csv: if not output_csv: return ['Must provide --output_csv when --input_csv is set.'] reads = read_csv(p, input_csv) if not timestamp: timestamp = datetime.utcnow() inspect_data = None if findings_table or mae_dir or mae_table: inspect_data = (reads | 'inspect' >> beam.FlatMap( inspect, project, inspect_config, pass_through_columns, target_columns, per_row_types, dlp_api_name)) if findings_table: # Write the inspect result to BigQuery. We don't process the result, even # if it's for multiple columns. schema = _generate_schema(pass_through_columns + [{'name': 'findings', 'type': 'stringValue'}, {'name': DLP_FINDINGS_TIMESTAMP, 'type': 'timestamp'}]) _ = (inspect_data | 'format_findings' >> beam.Map(format_findings, pass_through_columns, timestamp) | 'write_findings' >> beam.io.Write(beam.io.BigQuerySink( findings_table, schema=schema, write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))) if mae_dir: if not mae_dir.startswith('gs://'): return ['--mae_dir must be a GCS path starting with "gs://".'] _write_dtd(storage_client_fn, mae_dir, mae_tag_categories, task_name) mae_data = None if mae_dir or mae_table: if not key_columns: raise Exception('"keyColumns" not specified in the config. Please ' 'specify a list of columns that will be used as the ' 'primary key for identifying MAE results.') mae_data = (inspect_data | 'generate_mae' >> beam.Map( mae.generate_mae, task_name, mae_tag_categories, key_columns)) if mae_dir: _ = (mae_data | 'write_mae_to_gcs' >> beam.Map( write_mae, storage_client_fn, mae_dir)) if mae_table: _ = (mae_data | 'mae_to_bq_row' >> beam.Map(mae_to_bq_row) | 'write_mae_to_bq' >> beam.io.Write(beam.io.BigQuerySink( mae_table, schema=('record_id:STRING,xml:STRING'), write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))) if deid_table or output_csv: if not deid_config_json: return ['Must set --deid_config_file when --deid_table or --output_csv ' 'is set.'] deid_columns = target_columns + field_transform_columns deid_data = (reads | 'deid' >> beam.FlatMap( deid, project, deid_config, inspect_config, pass_through_columns, deid_columns, per_row_types, dlp_api_name) | 'get_deid_text' >> beam.Map( get_deid_text, pass_through_columns, deid_columns, timestamp)) if deid_table: schema = _generate_schema(pass_through_columns + deid_columns + [{'name': DLP_DEID_TIMESTAMP, 'type': 'timestamp'}]) _ = (deid_data | 'write_deid_text' >> beam.io.Write(beam.io.BigQuerySink( deid_table, schema=schema, write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))) if output_csv: stringio = None try: stringio = StringIO.StringIO() except NameError: stringio = io.StringIO() headers = [c['name'] for c in (pass_through_columns + target_columns + [{'name': DLP_DEID_TIMESTAMP, 'type': 'timestamp'}])] writer = unicodecsv.DictWriter(stringio, headers) writer.writeheader() headerstr = stringio.getvalue() _ = (deid_data | 'to_lines' >> beam.Map(_to_line, headers) | 'write_deid_text' >> beam.io.textio.WriteToText( output_csv, num_shards=1, shard_name_template='', header=headerstr, append_trailing_newlines=False)) result = p.run().wait_until_finish() logging.info('DLP DeID result: %s', result) return [] def add_all_args(parser): """Add command-line arguments to the parser.""" parser.add_argument( '--input_query', type=str, required=False, help=('BigQuery query to provide input data. Must yield rows with all ' 'fields specified in the "columns" section of the config file.')) parser.add_argument( '--input_table', type=str, required=False, help=('BigQuery table to provide input data. Must have rows with all ' 'fields specified in the "columns" section of the config file.')) parser.add_argument('--deid_table', type=str, required=False, help='BigQuery table to store DeID\'d data.') parser.add_argument('--findings_table', type=str, required=False, help='BigQuery table to store DeID summary data.') parser.add_argument('--dtd_dir', type=str, help=('Write an MAE DTD file to the given directory (' 'GCS or local).')) parser.add_argument('--mae_dir', type=str, required=False, help=('GCS directory to store inspect() results in MAE ' 'format.')) parser.add_argument('--mae_table', type=str, required=False, help='BQ table to store inspect() results in MAE format.') parser.add_argument('--mae_task_name', type=str, required=False, help='Task name to use in generated MAE files.', default='InspectPhiTask') parser.add_argument('--deid_config_file', type=str, required=False, help='Path to a json file holding the config to use.') parser.add_argument( '--project', type=str, required=False, help=('Defaults to the value specified in GOOGLE_APPLICATION_CREDENTIALS.' ' Is used (1) as the "project" pipeline option when --runner ' 'DataflowRunner is specified, (2) as the project ID for BigQuery ' 'tables that don\'t specify a project, and (3) as the project ' 'where any temporary BigQuery tables will be created. The project ' 'specified in GOOGLE_APPLICATION_CREDENTIALS is always used for ' 'calling the DLP API.')) parser.add_argument('--dlp_api_name', type=str, required=False, help='Name to use in the DLP API url.', default='dlp') parser.add_argument('--batch_size', type=int, required=False, help='How many rows to send in each DLP API call.', default=1) parser.add_argument('--input_csv', type=str, required=False, help='Path to the input CSV file') parser.add_argument('--output_csv', type=str, required=False, help='Path to the CSV file to write the output to')
{ "content_hash": "e2047039a51d50ac3a9551a2a6a1af3b", "timestamp": "", "source": "github", "line_count": 1005, "max_line_length": 83, "avg_line_length": 39.41691542288557, "alnum_prop": 0.6396728429343161, "repo_name": "GoogleCloudPlatform/healthcare-deid", "id": "afb8bef1b307caa6a63d7dc06d0fee52805c00c1", "size": "40212", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dlp/run_deid_lib.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "3004" }, { "name": "Dockerfile", "bytes": "2842" }, { "name": "HTML", "bytes": "19562" }, { "name": "Java", "bytes": "36713" }, { "name": "JavaScript", "bytes": "1782" }, { "name": "Python", "bytes": "320843" }, { "name": "Shell", "bytes": "314" }, { "name": "TypeScript", "bytes": "67344" } ], "symlink_target": "" }
import math import numpy as np import matplotlib.pyplot as plt from network import Network def normalize(v): s = 0 for x in v: if math.fabs(x) > s: s = math.fabs(x) if s == 0: return v else: return v/s dt = 0.0001 totalTime = 0.3 numIterations = int(totalTime/dt) m = 32 n = 256 network = Network(m, n) voltage = np.zeros((n, numIterations)) for t in range(numIterations): for i in range(n): voltage[i, t] = network.S[i].voltage network.update(dt) #plt.plot(voltage[0], 'red') #plt.plot(voltage[1], 'green') #plt.plot(voltage[2], 'blue') #plt.show() plt.plot(normalize(network.X)) S = np.zeros(n) for i in range(n): S[i] = network.S[i].spikeRate print("Neuron firing rates:") print(S) X2 = np.dot(network.A, S) plt.plot(normalize(X2)) plt.show()
{ "content_hash": "22559de91eed6b9bb05f26a6b18ba306", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 44, "avg_line_length": 18.727272727272727, "alnum_prop": 0.6225728155339806, "repo_name": "nspotrepka/neuron-inference", "id": "2d64c16bc8b093ed40ae810c71958053fc9c62fb", "size": "824", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "inference.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "4153" } ], "symlink_target": "" }
import base64 import winrm class PyinfraWinrmSession(winrm.Session): """This is our subclassed Session that allows for env setting""" def run_cmd(self, command, args=(), env=None): shell_id = self.protocol.open_shell(env_vars=env) command_id = self.protocol.run_command(shell_id, command, args) rs = winrm.Response(self.protocol.get_command_output(shell_id, command_id)) self.protocol.cleanup_command(shell_id, command_id) self.protocol.close_shell(shell_id) return rs def run_ps(self, script, env=None): """base64 encodes a Powershell script and executes the powershell encoded script command """ # must use utf16 little endian on windows encoded_ps = base64.b64encode(script.encode("utf_16_le")).decode("ascii") rs = self.run_cmd("powershell -encodedcommand {0}".format(encoded_ps), env=env) if len(rs.std_err): # if there was an error message, clean it it up and make it human # readable rs.std_err = self._clean_error_msg(rs.std_err) return rs
{ "content_hash": "e62f5410a49d8a519cf7748e3023f62f", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 87, "avg_line_length": 39.75, "alnum_prop": 0.6460017969451932, "repo_name": "Fizzadar/pyinfra", "id": "a0905b5477d75c94879bdec41bfe0950bea993ac", "size": "1113", "binary": false, "copies": "1", "ref": "refs/heads/2.x", "path": "pyinfra/connectors/pyinfrawinrmsession/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jinja", "bytes": "57" }, { "name": "Python", "bytes": "861601" }, { "name": "Shell", "bytes": "3448" } ], "symlink_target": "" }
from __future__ import print_function def foo_function(debugger, args, result, dict): print("foobar says " + args, file=result) return None
{ "content_hash": "39a9a15f5550a8bd677b2a56d1395b61", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 47, "avg_line_length": 25, "alnum_prop": 0.6933333333333334, "repo_name": "youtube/cobalt", "id": "6ef71064c9a9dbfc6cf38f40c3bcfd859b331810", "size": "150", "binary": false, "copies": "11", "ref": "refs/heads/master", "path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/command_script/import/foo/bar/foobar.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
if __name__ == '__main__': import validator as v first_name = input("Enter the first name: ") last_name = input("Enter the last name: ") zip_code = input("Enter the ZIP code: ") employee_id = input("Enter an employee ID: ") success, err = v.validate_input( first_name, last_name, zip_code, employee_id) if success: print("There were no errors found.\n", end='') else: print(err, end='')
{ "content_hash": "6b1259143bd16ef62f4838617d0ab142", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 54, "avg_line_length": 34.15384615384615, "alnum_prop": 0.5855855855855856, "repo_name": "yamanobori-old/LanguageExercises", "id": "08cae28a2ff79df49c5aa77fdf19161c78c1cef6", "size": "483", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "27_複数関数をまとめる/python/main.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "127" }, { "name": "C#", "bytes": "18219" }, { "name": "C++", "bytes": "93441" }, { "name": "CSS", "bytes": "30" }, { "name": "Go", "bytes": "76013" }, { "name": "HTML", "bytes": "631" }, { "name": "JavaScript", "bytes": "2677" }, { "name": "Makefile", "bytes": "32650" }, { "name": "Python", "bytes": "57110" }, { "name": "Rust", "bytes": "14101" }, { "name": "Shell", "bytes": "200" }, { "name": "Vue", "bytes": "3028" } ], "symlink_target": "" }
from flask import Flask, render_template, request, session import pkg_resources import sys import os import logging import matplotlib matplotlib.use('TkAgg') # Changes the matplotlib framework import utils.dave_endpoint as DaveEndpoint import utils.dataset_cache as DsCache import utils.gevent_helper as GeHelper import random from utils.np_encoder import NPEncoder from config import CONFIG logsdir = "." if len(sys.argv) > 1 and sys.argv[1] != "": logsdir = sys.argv[1] scriptdir = "." if len(sys.argv) > 2 and sys.argv[2] != "": scriptdir = sys.argv[2] server_port = 5000 if len(sys.argv) > 3 and sys.argv[3] != "": server_port = int(sys.argv[3]) build_version = "0" if len(sys.argv) > 4 and sys.argv[4] != "": build_version = sys.argv[4] logging.basicConfig(filename=logsdir + '/flaskserver.log', level=logging.DEBUG) logging.info("Logs file is " + logsdir + "/flaskserver.log") logging.info("Templates dir is " + scriptdir + "/../resources/templates") app = Flask("dave_srv", template_folder=scriptdir + "/../resources/templates", static_folder=scriptdir + "/../resources/static") APP_ROOT = os.path.dirname(os.path.abspath(__file__)) UPLOADS_TARGET = os.path.join(APP_ROOT, 'uploadeddataset') app.secret_key = os.urandom(24) app.json_encoder = NPEncoder app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False # ------ Flask Server Profiler ----- # from werkzeug.contrib.profiler import ProfilerMiddleware # app.config['PROFILE'] = True # app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[10]) # ------ END Flask Server Profiler ----- # ------ Configure HTTP Compression ----- # Tested on DAVE but the doesn't improves performance, # sure is a good choice for improving DAVE if runs on a remote server # Change environment.yml to add to pip: "- flask-compress==1.4.0" # Add import: "from flask_compress import Compress", # Uncomment following code: # COMPRESS_MIMETYPES = ['text/html', 'text/css', 'text/xml', 'application/json', 'application/javascript'] # COMPRESS_LEVEL = 6 # COMPRESS_MIN_SIZE = 500 # Compress(app) # ------ END Configure HTTP Compression ----- # Routes methods @app.route('/upload', methods=['GET', 'POST']) def upload(): return DaveEndpoint.upload(request.files.getlist("file"), UPLOADS_TARGET) @app.route('/set_config', methods=['POST']) def set_config(): DsCache.clear() return CONFIG.set_config(request.json['CONFIG']) @app.route('/clear_cache', methods=['POST']) def clear_cache(): DsCache.clear() return "" @app.route('/get_dataset_schema', methods=['GET']) def get_dataset_schema(): return DaveEndpoint.get_dataset_schema(request.args['filename'], UPLOADS_TARGET) @app.route('/get_dataset_header', methods=['GET']) def get_dataset_header(): return DaveEndpoint.get_dataset_header(request.args['filename'], UPLOADS_TARGET) @app.route('/append_file_to_dataset', methods=['POST']) def append_file_to_dataset(): return DaveEndpoint.append_file_to_dataset(request.json['filename'], request.json['nextfile'], UPLOADS_TARGET) @app.route('/apply_rmf_file_to_dataset', methods=['GET']) def apply_rmf_file_to_dataset(): return DaveEndpoint.apply_rmf_file_to_dataset(request.args['filename'], request.args['rmf_filename'], request.args['column'], UPLOADS_TARGET) @app.route('/get_plot_data', methods=['POST']) def get_plot_data(): return DaveEndpoint.get_plot_data(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['styles'], request.json['axis']) @app.route('/get_lightcurve', methods=['POST']) def get_lightcurve(): variance_opts = None if "variance_opts" in request.json: variance_opts = request.json['variance_opts'] return DaveEndpoint.get_lightcurve(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), request.json['baseline_opts'], request.json['meanflux_opts'], variance_opts) @app.route('/get_joined_lightcurves', methods=['POST']) def get_joined_lightcurves(): return DaveEndpoint.get_joined_lightcurves(request.json['lc0_filename'], request.json['lc1_filename'], request.json['lc0_bck_filename'], request.json['lc1_bck_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt'])) @app.route('/get_divided_lightcurves_from_colors', methods=['POST']) def get_divided_lightcurves_from_colors(): return DaveEndpoint.get_divided_lightcurves_from_colors(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt'])) @app.route('/get_divided_lightcurve_ds', methods=['POST']) def get_divided_lightcurve_ds(): return DaveEndpoint.get_divided_lightcurve_ds(request.json['lc0_filename'], request.json['lc1_filename'], request.json['lc0_bck_filename'], request.json['lc1_bck_filename'], UPLOADS_TARGET) @app.route('/get_power_density_spectrum', methods=['POST']) def get_power_density_spectrum(): return DaveEndpoint.get_power_density_spectrum(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['type'], float(request.json['df'])) @app.route('/get_dynamical_spectrum', methods=['POST']) def get_dynamical_spectrum(): return DaveEndpoint.get_dynamical_spectrum(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['freq_range'], float(request.json['df'])) @app.route('/get_cross_spectrum', methods=['POST']) def get_cross_spectrum(): return DaveEndpoint.get_cross_spectrum(request.json['filename1'], request.json['bck_filename1'], request.json['gti_filename1'], request.json['filters1'], request.json['axis1'], float(request.json['dt1']), request.json['filename2'], request.json['bck_filename2'], request.json['gti_filename2'], request.json['filters2'], request.json['axis2'], float(request.json['dt2']), UPLOADS_TARGET, float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['type']) @app.route('/get_covariance_spectrum', methods=['POST']) def get_covariance_spectrum(): return DaveEndpoint.get_covariance_spectrum(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], request.json['filters'], UPLOADS_TARGET, float(request.json['dt']), request.json['ref_band_interest'], request.json['energy_range'], int(request.json['n_bands']), float(request.json['std'])) @app.route('/get_phase_lag_spectrum', methods=['POST']) def get_phase_lag_spectrum(): return DaveEndpoint.get_phase_lag_spectrum(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['type'], float(request.json['df']), request.json['freq_range'], request.json['energy_range'], int(request.json['n_bands'])) @app.route('/get_rms_spectrum', methods=['POST']) def get_rms_spectrum(): return DaveEndpoint.get_rms_spectrum(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['type'], float(request.json['df']), request.json['freq_range'], request.json['energy_range'], int(request.json['n_bands']), float(request.json['white_noise'])) @app.route('/get_rms_vs_countrate', methods=['POST']) def get_rms_vs_countrate(): return DaveEndpoint.get_rms_vs_countrate(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), int(request.json['n_bands']), float(request.json['df']), request.json['freq_range'], request.json['energy_range'], float(request.json['white_noise'])) @app.route('/get_plot_data_from_models', methods=['POST']) def get_plot_data_from_models(): return DaveEndpoint.get_plot_data_from_models(request.json['models'], request.json['x_values']) @app.route('/get_fit_powerspectrum_result', methods=['POST']) def get_fit_powerspectrum_result(): priors = None if "priors" in request.json: priors = request.json['priors'] sampling_params = None if "sampling_params" in request.json: sampling_params = request.json['sampling_params'] return DaveEndpoint.get_fit_powerspectrum_result(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['type'], float(request.json['df']), request.json['models'], priors, sampling_params) @app.route('/get_bootstrap_results', methods=['POST']) def get_bootstrap_results(): return DaveEndpoint.get_bootstrap_results(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['nsegm']), float(request.json['segment_size']), request.json['norm'], request.json['type'], float(request.json['df']), request.json['models'], int(request.json['n_iter']), float(request.json['mean']), int(request.json['red_noise']), int(request.json['seed'])) @app.route('/get_intermediate_files', methods=['POST']) def get_intermediate_files(): return DaveEndpoint.get_intermediate_files(request.json['filepaths'], UPLOADS_TARGET) @app.route('/bulk_analisys', methods=['POST']) def bulk_analisys(): return DaveEndpoint.bulk_analisys(request.json['filenames'], request.json['plotConfigs'], request.json['outdir'], UPLOADS_TARGET) @app.route('/get_lomb_scargle_results', methods=['POST']) def get_lomb_scargle_results(): return DaveEndpoint.get_lomb_scargle_results(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), request.json['freq_range'], int(request.json['nyquist_factor']), request.json['ls_norm'], int(request.json['samples_per_peak'])) @app.route('/get_fit_lomb_scargle_result', methods=['POST']) def get_fit_lomb_scargle_result(): priors = None if "priors" in request.json: priors = request.json['priors'] sampling_params = None if "sampling_params" in request.json: sampling_params = request.json['sampling_params'] return DaveEndpoint.get_fit_lomb_scargle_result(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), request.json['freq_range'], int(request.json['nyquist_factor']), request.json['ls_norm'], int(request.json['samples_per_peak']), request.json['models'], priors, sampling_params) @app.route('/get_pulse_search', methods=['POST']) def get_pulse_search(): return DaveEndpoint.get_pulse_search(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), request.json['freq_range'], request.json['mode'], int(request.json['oversampling']), int(request.json['nharm']), int(request.json['nbin']), float(request.json['segment_size'])) @app.route('/get_phaseogram', methods=['POST']) def get_phaseogram(): binary_params= None if "binary_params" in request.json: binary_params = request.json['binary_params'] return DaveEndpoint.get_phaseogram(request.json['filename'], request.json['bck_filename'], request.json['gti_filename'], UPLOADS_TARGET, request.json['filters'], request.json['axis'], float(request.json['dt']), float(request.json['f']), int(request.json['nph']), int(request.json['nt']), float(request.json['fdot']), float(request.json['fddot']), binary_params) # Receives a message from client and send it to all subscribers @app.route("/publish", methods=['POST']) def publish(): return GeHelper.publish(request.json['message']) @app.route("/subscribe") def subscribe(): return GeHelper.subscribe() @app.route('/') def root(): return render_template("master_page.html", get_version=get_version) @app.route('/shutdown') def shutdown(): logging.info('Server shutting down...') shutdown_server() return 'Server shutting down...' def get_version(): if CONFIG.USE_JAVASCRIPT_CACHE: return build_version else: return str(random.randint(0, CONFIG.BIG_NUMBER)) # Shutdown flask server def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') if func is None: logging.warn('shutdown_server: Not running with the Werkzeug Server') exit() func() # Setting error handler def http_error_handler(error): try: logging.error('ERROR: http_error_handler ' + str(error)) return json.dumps(dict(error=str(error))) except: logging.error('ERROR: http_error_handler --> EXCEPT ') for error in (400, 401, 403, 404, 500): # or with other http code you consider as error app.error_handler_spec[None][error] = http_error_handler if __name__ == '__main__': GeHelper.start(server_port, app) app.run(debug=CONFIG.DEBUG_MODE, threaded=True) # Use app.run(host='0.0.0.0') for listen on all interfaces
{ "content_hash": "cf9cfaa378528369153c6261a843b335", "timestamp": "", "source": "github", "line_count": 359, "max_line_length": 145, "avg_line_length": 41.618384401114206, "alnum_prop": 0.6655511679271803, "repo_name": "StingraySoftware/dave", "id": "3b5903f1f0f2364bd51d2b878e4b0265ff579dd7", "size": "14987", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/main/python/server.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "12244" }, { "name": "HTML", "bytes": "19741" }, { "name": "JavaScript", "bytes": "573855" }, { "name": "Jupyter Notebook", "bytes": "19218" }, { "name": "Python", "bytes": "269159" }, { "name": "Shell", "bytes": "24183" } ], "symlink_target": "" }
""" Plot autocorrelation for toy model. """ import os import sys sys.path.append('./code') from models import ISA, GSM, Distribution from numpy import * from numpy import max from numpy.random import * from numpy.linalg import pinv from tools import contours, mapp from pgf import * from pdb import set_trace from time import time from tools import Experiment from copy import deepcopy NUM_SAMPLES = 5000 # used to estimate time transition operator takes NUM_STEPS_MULTIPLIER = 5 # number of transition operator applications for estimating computation time NUM_AUTOCORR = 5000 # number of posterior autocorrelation functions averaged NUM_SECONDS_RUN = 10000 # length of Markov chain used to estimate autocorrelation NUM_SECONDS_VIS = 15 # length of estimated autocorrelation function # transition operator parameters sampling_methods = [ { 'method': 'hmc', 'burn_in_steps': 20, 'parameters': { 'num_steps': 1, 'lf_step_size': 1.25, 'lf_num_steps': 5, 'lf_randomness': 0.05, }, 'color': RGB(0.9, 0.0, 0.0), }, { 'method': 'mala', 'burn_in_steps': 20, 'parameters': { 'num_steps': 5, 'step_width': 3.5, }, 'color': RGB(0.4, 0.2, 0.0), }, { 'method': 'gibbs', 'burn_in_steps': 20, 'parameters': { 'num_steps': 1, }, 'color': RGB(0.1, 0.6, 1.), }, ] def autocorr(X, N, d=1): """ Estimates autocorrelation from a sample of a possibly multivariate stationary Markov chain. """ X = X - mean(X, 1).reshape(-1, 1) v = mean(sum(square(X), 0)) # autocovariance A = [v] for t in range(1, N + 1, d): A.append(mean(sum(X[:, :-t] * X[:, t:], 0))) # normalize by variance return hstack(A) / v def main(argv): seterr(over='raise', divide='raise', invalid='raise') try: if int(os.environ['OMP_NUM_THREADS']) > 1 or int(os.environ['MKL_NUM_THREADS']) > 1: print 'It seems that parallelization is turned on. This will skew the results. To turn it off:' print '\texport OMP_NUM_THREADS=1' print '\texport MKL_NUM_THREADS=1' except: print 'Parallelization of BLAS might be turned on. This could skew results.' experiment = Experiment(seed=42) if os.path.exists('results/toyexample/toyexample.xpck'): results = Experiment('results/toyexample/toyexample.xpck') ica = results['ica'] else: # toy model ica = ISA(1, 3) ica.initialize(method='exponpow') ica.A = 1. + randn(1, 3) / 5. experiment['ica'] = ica experiment.save('results/toyexample/toyexample.xpck') Y_ = ica.sample_prior(NUM_AUTOCORR) X_ = dot(ica.A, Y_) for method in sampling_methods: # disable output and parallelization Distribution.VERBOSITY = 0 mapp.max_processes = 1 Y = ica.sample_prior(NUM_SAMPLES) X = dot(ica.A, Y) # measure time required by transition operator start = time() # increase number of steps to reduce overhead ica.sample_posterior(X, method=(method['method'], dict(method['parameters'], Y=Y, num_steps=method['parameters']['num_steps'] * NUM_STEPS_MULTIPLIER))) # time required per transition operator application duration = (time() - start) / NUM_STEPS_MULTIPLIER # number of mcmc steps to run for this method num_mcmc_steps = int(NUM_SECONDS_RUN / duration + 1.) num_autocorr_steps = int(NUM_SECONDS_VIS / duration + 1.) # enable output and parallelization Distribution.VERBOSITY = 2 mapp.max_processes = 2 # posterior samples Y = [Y_] # Markov chain for i in range(num_mcmc_steps): Y.append(ica.sample_posterior(X_, method=(method['method'], dict(method['parameters'], Y=Y[-1])))) ac = [] for j in range(NUM_AUTOCORR): # collect samples belonging to one posterior distribution S = hstack([Y[k][:, [j]] for k in range(num_mcmc_steps)]) # compute autocorrelation for j-th posterior ac = [autocorr(S, num_autocorr_steps)] # average and plot autocorrelation functions plot(arange(num_autocorr_steps) * duration, mean(ac, 0), '-', color=method['color'], line_width=1.2, comment=str(method['parameters'])) xlabel('time in seconds') ylabel('autocorrelation') title('toy example') gca().width = 7 gca().height = 7 gca().xmin = -1 gca().xmax = NUM_SECONDS_VIS savefig('results/toyexample/toyexample_autocorr2.tex') return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
{ "content_hash": "ea52d99ce9b684eb4408938f39710d89", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 101, "avg_line_length": 24.411428571428573, "alnum_prop": 0.6732209737827716, "repo_name": "lucastheis/isa", "id": "ba1aa0735b179d30186e51e906bbc519ff268067", "size": "4272", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "code/experiments/toyexample_autocorr.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "225674" } ], "symlink_target": "" }
import json import socket import sys import asfgit.cfg as cfg import asfgit.git as git import asfgit.log as log import subprocess, os, time def main(): ghurl = "git@github:apache/%s.git" % cfg.repo_name os.chdir("/x1/git/repos/asf/%s.git" % cfg.repo_name) try: subprocess.check_call(["git", "push", "--all", ghurl]) try: os.unlink("/x1/git/git-dual/broken/%s.txt" % cfg.repo_name) except: pass except Exception as err: with open("/x1/git/git-dual/broken/%s.txt" % cfg.repo_name, "w") as f: f.write("BROKEN AT %s\n" % time.strftime("%c")) f.close() log.exception(err)
{ "content_hash": "9a9791f44077d13a90396579779ff27b", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 77, "avg_line_length": 27.708333333333332, "alnum_prop": 0.5954887218045113, "repo_name": "chtyim/infrastructure-puppet", "id": "47e67f27063e7dd0080d0611bf621aca7b43770e", "size": "690", "binary": false, "copies": "1", "ref": "refs/heads/deployment", "path": "modules/gitserver_dual/files/asfgit/hooks/sync.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "649" }, { "name": "C", "bytes": "44620" }, { "name": "CSS", "bytes": "7809" }, { "name": "HTML", "bytes": "153235" }, { "name": "Lua", "bytes": "66470" }, { "name": "Makefile", "bytes": "1043" }, { "name": "Pascal", "bytes": "181" }, { "name": "Perl", "bytes": "197378" }, { "name": "Puppet", "bytes": "249578" }, { "name": "Python", "bytes": "334751" }, { "name": "Ruby", "bytes": "68723" }, { "name": "Shell", "bytes": "225996" } ], "symlink_target": "" }
from codecs import open from setuptools import setup from libmt94x import __version__ def read_file(filepath): with open(filepath, 'rb+', 'utf-8') as f: content = f.read() return content.strip() CLASSIFIERS = [ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', 'Development Status :: 5 - Production/Stable', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', ] setup( name='python-libmt94x', version=__version__, author='Ginger Payments', author_email='[email protected]', description='This library generates bank statements in MT940/MT942 format', long_description=( '%s\n\n%s' % ( read_file('README.rst'), read_file('HISTORY.rst'), ) ), url='https://github.com/gingerpayments/python-libmt94x', license='MIT', platforms=['OS Independent'], classifiers=CLASSIFIERS, packages=['libmt94x'], include_package_data=True, zip_safe=False, install_requires=[ 'Unidecode<0.5', # translates unicode characters to ascii 'pycountry<2.0', # provides currency codes ], )
{ "content_hash": "2957e554880e9a1d624aca7b5dc94ba3", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 79, "avg_line_length": 26.854166666666668, "alnum_prop": 0.6322730799069046, "repo_name": "gingerpayments/python-libmt94x", "id": "abdc395918aa3987cb16d7f09e423080391b71ff", "size": "1289", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "142375" } ], "symlink_target": "" }
from __future__ import unicode_literals from bottle import HTTPError, ServerAdapter from functools import partial, wraps from threading import Thread RETRY_AFTER_HEADER = str('Retry-After') def abort(code, message=None, headers=None): """ Abort a request and send a response with the given code, and optional message and headers. :raises: :class:`HTTPError` """ raise HTTPError(code, {'message': message}, headers=headers) def retry_after(delay, code=429): """ Abort a request and send a response, including a Retry-After header informing the client when a retry of the request will be accepted. """ abort(code, headers={RETRY_AFTER_HEADER: delay}) def authorize(method): """Decorator for a method that requires authorization. Unauthorized requests will be aborted with a 401.""" @wraps(method) def authorized_method(self, *args, **kwargs): skip_auth = kwargs.pop('skip_auth', False) if not skip_auth: self.check_authorization_header() return method(self, *args, **kwargs) return authorized_method def rate_limit(method): """Decorator for a method that requires rate limiting. Too many requests will be aborted with a 429.""" @wraps(method) def limited_method(self, *args, **kwargs): skip_limit = kwargs.pop('skip_limit', False) if not skip_limit: self.check_rate_limits() return method(self, *args, **kwargs) return limited_method def _route(verb, app, route): """Helper decorator to apply methods to routes.""" def routed_method(method): setattr(method, 'verb', verb) setattr(method, 'app', app) setattr(method, 'route', route) return method return routed_method def log_request(method): """Decorator for a method to add its request to the request log.""" @wraps(method) def logged_method(self, *args, **kwargs): skip_log = kwargs.pop('skip_log', False) if not skip_log: self.append_to_request_log() return method(self, *args, **kwargs) return logged_method GET = partial(_route, 'GET') POST = partial(_route, 'POST') PUT = partial(_route, 'PUT') DELETE = partial(_route, 'DELETE') OPTIONS = partial(_route, 'OPTIONS') class StoppableWSGIRefServer(ServerAdapter): """ Subclass of built-in Bottle server adapter that allows the server to be stopped. This is important for testing, since we don't want to "serve forever". """ def __init__(self, host='127.0.0.1', port=8080, **options): super(StoppableWSGIRefServer, self).__init__(host, port, **options) self.srv = None self._thread = None def run(self, app): from wsgiref.simple_server import WSGIRequestHandler, WSGIServer from wsgiref.simple_server import make_server class FixedHandler(WSGIRequestHandler): def address_string(self): return self.client_address[0] parent = self def log_request(self, *args, **kw): if not self.parent.quiet: return WSGIRequestHandler.log_request(self, *args, **kw) handler_cls = self.options.get('handler_class', FixedHandler) server_cls = self.options.get('server_class', WSGIServer) self.srv = make_server(self.host, self.port, app, server_cls, handler_cls) thread = Thread(target=self.srv.serve_forever) thread.daemon = True thread.start() self._thread = thread self.srv.wait = self.wait return self.srv def wait(self): self.srv.server_close() self._thread.join()
{ "content_hash": "71d78674802b516e03fb6b1b04226512", "timestamp": "", "source": "github", "line_count": 114, "max_line_length": 111, "avg_line_length": 32.25438596491228, "alnum_prop": 0.642099537666576, "repo_name": "Tusky/box-python-sdk", "id": "19dc15681cb749aec02c6e4d12a03ffda1ccf747", "size": "3694", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "test/functional/mock_box/util/http_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "348572" }, { "name": "Smarty", "bytes": "527" } ], "symlink_target": "" }
import sqlite3 from airflow.hooks.dbapi_hook import DbApiHook class SqliteHook(DbApiHook): """ Interact with SQLite. """ conn_name_attr = 'sqlite_conn_id' default_conn_name = 'sqlite_default' supports_autocommit = False def get_conn(self): """ Returns a sqlite connection object """ conn = self.get_connection(self.sqlite_conn_id) conn = sqlite3.connect(conn.host) return conn
{ "content_hash": "b9613d6d6a3793d1853c231c3e9b29fa", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 55, "avg_line_length": 20.863636363636363, "alnum_prop": 0.6252723311546841, "repo_name": "mtagle/airflow", "id": "2af589c524e51e93f07f9840901dd05c75168646", "size": "1247", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "airflow/providers/sqlite/hooks/sqlite.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "13715" }, { "name": "Dockerfile", "bytes": "17280" }, { "name": "HTML", "bytes": "148492" }, { "name": "JavaScript", "bytes": "25360" }, { "name": "Jupyter Notebook", "bytes": "2933" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "10006634" }, { "name": "Shell", "bytes": "217011" }, { "name": "TSQL", "bytes": "879" } ], "symlink_target": "" }
from django.apps import AppConfig class JDPagesConfig(AppConfig): name = 'website.jdpages' label = 'jdpages' verbose_name = "JD Pages" class CoreConfig(AppConfig): name = 'website.core' label = 'jdcore' # prevent name collision with mezzanine.core verbose_name = "Website Core" class EventsConfig(AppConfig): name = "swingtime" label = "events" verbose_name = "Events"
{ "content_hash": "c600e2fe5574284867a95f119d4daf89", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 66, "avg_line_length": 21.68421052631579, "alnum_prop": 0.6796116504854369, "repo_name": "jonge-democraten/website", "id": "9170d104f82c88cc6fa745e639f8245c4accd7ae", "size": "412", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "website/apps.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "65915" }, { "name": "HTML", "bytes": "51004" }, { "name": "JavaScript", "bytes": "13921" }, { "name": "Python", "bytes": "132291" }, { "name": "Shell", "bytes": "466" } ], "symlink_target": "" }
"""Support for Somfy Smart Thermostat.""" from __future__ import annotations from typing import Any, cast from pyoverkiz.enums import OverkizCommand, OverkizCommandParam, OverkizState from homeassistant.components.climate import ( PRESET_AWAY, PRESET_HOME, PRESET_NONE, ClimateEntity, ClimateEntityFeature, HVACMode, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from ..coordinator import OverkizDataUpdateCoordinator from ..entity import OverkizEntity PRESET_FREEZE = "freeze" PRESET_NIGHT = "night" STATE_DEROGATION_ACTIVE = "active" STATE_DEROGATION_INACTIVE = "inactive" OVERKIZ_TO_HVAC_MODES: dict[str, HVACMode] = { STATE_DEROGATION_ACTIVE: HVACMode.HEAT, STATE_DEROGATION_INACTIVE: HVACMode.AUTO, } HVAC_MODES_TO_OVERKIZ = {v: k for k, v in OVERKIZ_TO_HVAC_MODES.items()} OVERKIZ_TO_PRESET_MODES: dict[OverkizCommandParam, str] = { OverkizCommandParam.AT_HOME_MODE: PRESET_HOME, OverkizCommandParam.AWAY_MODE: PRESET_AWAY, OverkizCommandParam.FREEZE_MODE: PRESET_FREEZE, OverkizCommandParam.MANUAL_MODE: PRESET_NONE, OverkizCommandParam.SLEEPING_MODE: PRESET_NIGHT, OverkizCommandParam.SUDDEN_DROP_MODE: PRESET_NONE, } PRESET_MODES_TO_OVERKIZ = {v: k for k, v in OVERKIZ_TO_PRESET_MODES.items()} TARGET_TEMP_TO_OVERKIZ = { PRESET_HOME: OverkizState.SOMFY_THERMOSTAT_AT_HOME_TARGET_TEMPERATURE, PRESET_AWAY: OverkizState.SOMFY_THERMOSTAT_AWAY_MODE_TARGET_TEMPERATURE, PRESET_FREEZE: OverkizState.SOMFY_THERMOSTAT_FREEZE_MODE_TARGET_TEMPERATURE, PRESET_NIGHT: OverkizState.SOMFY_THERMOSTAT_SLEEPING_MODE_TARGET_TEMPERATURE, } # controllableName is somfythermostat:SomfyThermostatTemperatureSensor TEMPERATURE_SENSOR_DEVICE_INDEX = 2 class SomfyThermostat(OverkizEntity, ClimateEntity): """Representation of Somfy Smart Thermostat.""" _attr_temperature_unit = TEMP_CELSIUS _attr_supported_features = ( ClimateEntityFeature.PRESET_MODE | ClimateEntityFeature.TARGET_TEMPERATURE ) _attr_hvac_modes = [*HVAC_MODES_TO_OVERKIZ] _attr_preset_modes = [*PRESET_MODES_TO_OVERKIZ] # Both min and max temp values have been retrieved from the Somfy Application. _attr_min_temp = 15.0 _attr_max_temp = 26.0 def __init__( self, device_url: str, coordinator: OverkizDataUpdateCoordinator ) -> None: """Init method.""" super().__init__(device_url, coordinator) self.temperature_device = self.executor.linked_device( TEMPERATURE_SENSOR_DEVICE_INDEX ) @property def hvac_mode(self) -> str: """Return hvac operation ie. heat, cool mode.""" return OVERKIZ_TO_HVAC_MODES[ cast( str, self.executor.select_state(OverkizState.CORE_DEROGATION_ACTIVATION) ) ] @property def preset_mode(self) -> str: """Return the current preset mode, e.g., home, away, temp.""" if self.hvac_mode == HVACMode.AUTO: state_key = OverkizState.SOMFY_THERMOSTAT_HEATING_MODE else: state_key = OverkizState.SOMFY_THERMOSTAT_DEROGATION_HEATING_MODE state = cast(str, self.executor.select_state(state_key)) return OVERKIZ_TO_PRESET_MODES[OverkizCommandParam(state)] @property def current_temperature(self) -> float | None: """Return the current temperature.""" if temperature := self.temperature_device.states[OverkizState.CORE_TEMPERATURE]: return cast(float, temperature.value) return None @property def target_temperature(self) -> float | None: """Return the temperature we try to reach.""" if self.hvac_mode == HVACMode.AUTO: if self.preset_mode == PRESET_NONE: return None return cast( float, self.executor.select_state(TARGET_TEMP_TO_OVERKIZ[self.preset_mode]), ) return cast( float, self.executor.select_state(OverkizState.CORE_DEROGATED_TARGET_TEMPERATURE), ) async def async_set_temperature(self, **kwargs: Any) -> None: """Set new target temperature.""" temperature = kwargs[ATTR_TEMPERATURE] await self.executor.async_execute_command( OverkizCommand.SET_DEROGATION, temperature, OverkizCommandParam.FURTHER_NOTICE, ) await self.executor.async_execute_command( OverkizCommand.SET_MODE_TEMPERATURE, OverkizCommandParam.MANUAL_MODE, temperature, ) await self.executor.async_execute_command(OverkizCommand.REFRESH_STATE) async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None: """Set new target hvac mode.""" if hvac_mode == HVACMode.AUTO: await self.executor.async_execute_command(OverkizCommand.EXIT_DEROGATION) await self.executor.async_execute_command(OverkizCommand.REFRESH_STATE) else: await self.async_set_preset_mode(PRESET_NONE) async def async_set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" if preset_mode in [PRESET_FREEZE, PRESET_NIGHT, PRESET_AWAY, PRESET_HOME]: await self.executor.async_execute_command( OverkizCommand.SET_DEROGATION, PRESET_MODES_TO_OVERKIZ[preset_mode], OverkizCommandParam.FURTHER_NOTICE, ) elif preset_mode == PRESET_NONE: await self.executor.async_execute_command( OverkizCommand.SET_DEROGATION, self.target_temperature, OverkizCommandParam.FURTHER_NOTICE, ) await self.executor.async_execute_command( OverkizCommand.SET_MODE_TEMPERATURE, OverkizCommandParam.MANUAL_MODE, self.target_temperature, ) await self.executor.async_execute_command(OverkizCommand.REFRESH_STATE)
{ "content_hash": "e1f58db3b425dfd6e3e6044897b36fdf", "timestamp": "", "source": "github", "line_count": 162, "max_line_length": 88, "avg_line_length": 37.098765432098766, "alnum_prop": 0.6623960066555741, "repo_name": "w1ll1am23/home-assistant", "id": "608b26b8c9d6486aa24c495aeacf56a38c74b0f8", "size": "6010", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/overkiz/climate_entities/somfy_thermostat.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "52277012" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
from troveclient import exceptions from nose.plugins.skip import SkipTest from proboscis import after_class from proboscis import before_class from proboscis import test from proboscis.asserts import * from proboscis.decorators import time_out from trove import tests from trove.tests.api.instances import instance_info from trove.tests.util import test_config from trove.tests.util import create_dbaas_client from trove.tests.util import poll_until from trove.tests.config import CONFIG from trove.tests.util.users import Requirements from trove.tests.api.instances import existing_instance GROUP = "dbaas.api.mgmt.accounts" @test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES], depends_on_groups=["services.initialize"]) class AccountsBeforeInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_invalid_account(self): raise SkipTest("Don't have a good way to know if accounts are valid.") assert_raises(exceptions.NotFound, self.client.accounts.show, "asd#4#@fasdf") @test def test_invalid_account_fails(self): account_info = self.client.accounts.show("badaccount") assert_not_equal(self.user.tenant_id, account_info.id) @test def test_account_zero_instances(self): account_info = self.client.accounts.show(self.user.tenant_id) expected_instances = 0 if not existing_instance() else 1 assert_equal(expected_instances, len(account_info.instances)) expected = self.user.tenant_id if expected is None: expected = "None" assert_equal(expected, account_info.id) @test def test_list_empty_accounts(self): accounts_info = self.client.accounts.index() expected_accounts = 0 if not existing_instance() else 1 assert_equal(expected_accounts, len(accounts_info.accounts)) @test(groups=[tests.INSTANCES, GROUP], depends_on_groups=["dbaas.listing"]) class AccountsAfterInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_account_details_available(self): if test_config.auth_strategy == "fake": raise SkipTest("Skipping this as auth is faked anyway.") account_info = self.client.accounts.show(instance_info.user.tenant_id) # Now check the results. expected = instance_info.user.tenant_id if expected is None: expected = "None" print("account_id.id = '%s'" % account_info.id) print("expected = '%s'" % expected) assert_equal(account_info.id, expected) # Instances: Here we know we've only created one instance. assert_equal(1, len(account_info.instances)) assert_is_not_none(account_info.instances[0]['host']) # We know the there's only 1 instance instance = account_info.instances[0] print("instances in account: %s" % instance) assert_equal(instance['id'], instance_info.id) assert_equal(instance['name'], instance_info.name) assert_equal(instance['status'], "ACTIVE") assert_is_not_none(instance['host']) @test def test_list_accounts(self): if test_config.auth_strategy == "fake": raise SkipTest("Skipping this as auth is faked anyway.") accounts_info = self.client.accounts.index() assert_equal(1, len(accounts_info.accounts)) account = accounts_info.accounts[0] assert_equal(1, account['num_instances']) assert_equal(instance_info.user.tenant_id, account['id']) @test(groups=[tests.POST_INSTANCES, GROUP], depends_on_groups=["dbaas.guest.shutdown"]) class AccountsAfterInstanceDeletion(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) @test def test_no_details_empty_account(self): account_info = self.client.accounts.show(instance_info.user.tenant_id) assert_equal(0, len(account_info.instances)) @test(groups=["fake.dbaas.api.mgmt.allaccounts"], depends_on_groups=["services.initialize"]) class AllAccounts(object): max = 5 def _delete_instances_for_users(self): for user in self.users: user_client = create_dbaas_client(user) while True: deleted_count = 0 user_instances = user_client.instances.list() for instance in user_instances: try: instance.delete() except exceptions.NotFound: deleted_count += 1 except Exception: print("Failed to delete instance") if deleted_count == len(user_instances): break def _create_instances_for_users(self): for user in self.users: user_client = create_dbaas_client(user) for index in range(self.max): name = "instance-%s-%03d" % (user.auth_user, index) user_client.instances.create(name, 1, {'size': 1}, [], []) @before_class def setUp(self): admin_req = Requirements(is_admin=True) self.admin_user = test_config.users.find_user(admin_req) self.admin_client = create_dbaas_client(self.admin_user) user_req = Requirements(is_admin=False) self.users = test_config.users.find_all_users_who_satisfy(user_req) self.user_tenant_ids = [user.tenant_id for user in self.users] self._create_instances_for_users() @test def test_list_accounts_with_multiple_users(self): accounts_info = self.admin_client.accounts.index() for account in accounts_info.accounts: assert_true(account['id'] in self.user_tenant_ids) assert_equal(self.max, account['num_instances']) @after_class(always_run=True) @time_out(60) def tear_down(self): self._delete_instances_for_users() @test(groups=["fake.%s.broken" % GROUP], depends_on_groups=["services.initialize"]) class AccountWithBrokenInstance(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.client = create_dbaas_client(self.user) self.name = 'test_SERVER_ERROR' # Create an instance with a broken compute instance. volume = None if CONFIG.trove_volume_support: volume = {'size': 1} self.response = self.client.instances.create( self.name, instance_info.dbaas_flavor_href, volume, []) poll_until(lambda: self.client.instances.get(self.response.id), lambda instance: instance.status == 'ERROR', time_out=10) self.instance = self.client.instances.get(self.response.id) print("Status: %s" % self.instance.status) msg = "Instance did not drop to error after server prov failure." assert_equal(self.instance.status, "ERROR", msg) @test def no_compute_instance_no_problem(self): '''Get account by ID shows even instances lacking computes''' if test_config.auth_strategy == "fake": raise SkipTest("Skipping this as auth is faked anyway.") account_info = self.client.accounts.show(self.user.tenant_id) # All we care about is that accounts.show doesn't 500 on us # for having a broken instance in the roster. assert_equal(len(account_info.instances), 1) instance = account_info.instances[0] assert_true(isinstance(instance['id'], basestring)) assert_equal(len(instance['id']), 36) assert_equal(instance['name'], self.name) assert_equal(instance['status'], "ERROR") assert_is_none(instance['host']) @after_class def tear_down(self): self.client.instances.delete(self.response.id)
{ "content_hash": "e7c6df57a51db5131f0b5817dd7162d7", "timestamp": "", "source": "github", "line_count": 212, "max_line_length": 78, "avg_line_length": 38.783018867924525, "alnum_prop": 0.6425443930917052, "repo_name": "citrix-openstack-build/trove", "id": "cceed886917b111875b1518db74f8b92cc3cfba5", "size": "8838", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "trove/tests/api/mgmt/accounts.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "19900" }, { "name": "JavaScript", "bytes": "7403" }, { "name": "Python", "bytes": "1725275" }, { "name": "Shell", "bytes": "5512" } ], "symlink_target": "" }
"""Translate the raw json files into python specific descriptions.""" import os import re from copy import deepcopy import jmespath from botocore_eb.compat import OrderedDict, json from botocore_eb.utils import merge_dicts from botocore_eb import xform_name class ModelFiles(object): """Container object to hold all the various parsed json files. Includes: * The json service description. * The _retry.json file. * The <service>.extra.json enhancements file. * The name of the service. """ def __init__(self, model, retry, enhancements, name=''): self.model = model self.retry = retry self.enhancements = enhancements self.name = name def load_model_files(args): model = json.load(open(args.modelfile), object_pairs_hook=OrderedDict) retry = json.load(open(args.retry_file), object_pairs_hook=OrderedDict) enhancements = _load_enhancements_file(args.enhancements_file) service_name = os.path.splitext(os.path.basename(args.modelfile))[0] return ModelFiles(model, retry, enhancements, name=service_name) def _load_enhancements_file(file_path): if not os.path.isfile(file_path): return {} else: return json.load(open(file_path), object_pairs_hook=OrderedDict) def translate(model): new_model = deepcopy(model.model) new_model.update(model.enhancements.get('extra', {})) try: del new_model['pagination'] except KeyError: pass handle_op_renames(new_model, model.enhancements) handle_remove_deprecated_params(new_model, model.enhancements) handle_remove_deprecated_operations(new_model, model.enhancements) handle_filter_documentation(new_model, model.enhancements) handle_rename_params(new_model, model.enhancements) add_pagination_configs( new_model, model.enhancements.get('pagination', {})) add_waiter_configs( new_model, model.enhancements.get('waiters', {})) # Merge in any per operation overrides defined in the .extras.json file. merge_dicts(new_model['operations'], model.enhancements.get('operations', {})) add_retry_configs( new_model, model.retry.get('retry', {}), definitions=model.retry.get('definitions', {})) return new_model def handle_op_renames(new_model, enhancements): # This allows for operations to be renamed. The only # implemented transformation is removing part of the operation name # (because that's all we currently need.) remove = enhancements.get('transformations', {}).get( 'operation-name', {}).get('remove') if remove is not None: # We're going to recreate the dictionary because we want to preserve # the order. This is the only option we have unless we have our own # custom OrderedDict. remove_regex = re.compile(remove) operations = new_model['operations'] new_operation = OrderedDict() for key in operations: new_key = remove_regex.sub('', key) new_operation[new_key] = operations[key] new_model['operations'] = new_operation def handle_remove_deprecated_operations(new_model, enhancements): # This removes any operation whose documentation string contains # the specified phrase that marks a deprecated parameter. keyword = enhancements.get('transformations', {}).get( 'remove-deprecated-operations', {}).get('deprecated_keyword') remove = [] if keyword is not None: operations = new_model['operations'] for op_name in operations: operation = operations[op_name] if operation: docs = operation['documentation'] if docs and docs.find(keyword) >= 0: remove.append(op_name) for op in remove: del new_model['operations'][op] def handle_remove_deprecated_params(new_model, enhancements): # This removes any parameter whose documentation string contains # the specified phrase that marks a deprecated parameter. keyword = enhancements.get('transformations', {}).get( 'remove-deprecated-params', {}).get('deprecated_keyword') if keyword is not None: operations = new_model['operations'] for op_name in operations: operation = operations[op_name] params = operation.get('input', {}).get('members') if params: new_params = OrderedDict() for param_name in params: param = params[param_name] docs = param['documentation'] if docs and docs.find(keyword) >= 0: continue new_params[param_name] = param operation['input']['members'] = new_params def _filter_param_doc(param, replacement, regex): # Recurse into complex parameters looking for documentation. doc = param.get('documentation') if doc: param['documentation'] = regex.sub(replacement, doc) if param['type'] == 'structure': for member_name in param['members']: member = param['members'][member_name] _filter_param_doc(member, replacement, regex) if param['type'] == 'map': _filter_param_doc(param['keys'], replacement, regex) _filter_param_doc(param['members'], replacement, regex) elif param['type'] == 'list': _filter_param_doc(param['members'], replacement, regex) def handle_filter_documentation(new_model, enhancements): # This provides a way to filter undesireable content (e.g. CDATA) # from documentation strings. doc_filter = enhancements.get('transformations', {}).get( 'filter-documentation', {}).get('filter') if doc_filter is not None: filter_regex = re.compile(doc_filter.get('regex', ''), re.DOTALL) replacement = doc_filter.get('replacement') operations = new_model['operations'] for op_name in operations: operation = operations[op_name] doc = operation.get('documentation') if doc: new_doc = filter_regex.sub(replacement, doc) operation['documentation'] = new_doc params = operation.get('input', {}).get('members') if params: for param_name in params: param = params[param_name] _filter_param_doc(param, replacement, filter_regex) def handle_rename_params(new_model, enhancements): renames = enhancements.get('transformations', {}).get( 'renames', {}) if not renames: return # This is *extremely* specific to botocore's translations, but # we support a restricted set of argument renames based on a # jmespath expression. for expression, new_value in renames.items(): # First we take everything up until the last dot. parent_expression, key = expression.rsplit('.', 1) matched = jmespath.search(parent_expression, new_model['operations']) current = matched[key] del matched[key] matched[new_value] = current def resembles_jmespath_exp(value): # For now, we'll do a naive check. if '.' in value or '[' in value: return True return False def add_pagination_configs(new_model, pagination): # Adding in pagination configs means copying the config to a top level # 'pagination' key in the new model, and it also means adding the # pagination config to each individual operation. # Also, the input_token needs to be transformed to the python specific # name, so we're adding a py_input_token (e.g. NextToken -> next_token). if pagination: new_model['pagination'] = pagination for name in pagination: config = pagination[name] _check_known_pagination_keys(config) if 'py_input_token' not in config: _add_py_input_token(config) _validate_result_key_exists(config) _validate_referenced_operation_exists(new_model, name) operation = new_model['operations'][name] _validate_operation_has_output(operation, name) _check_input_keys_match(config, operation) _check_output_keys_match(config, operation, new_model.get('endpoint_prefix', '')) operation['pagination'] = config.copy() def _validate_operation_has_output(operation, name): if not operation['output']: raise ValueError("Trying to add pagination config for an " "operation with no output members: %s" % name) def _validate_referenced_operation_exists(new_model, name): if name not in new_model['operations']: raise ValueError("Trying to add pagination config for non " "existent operation: %s" % name) def _validate_result_key_exists(config): # result_key must be defined. if 'result_key' not in config: raise ValueError("Required key 'result_key' is missing from " "from pagination config: %s" % config) def _add_py_input_token(config): input_token = config['input_token'] if isinstance(input_token, list): py_input_token = [] for token in input_token: py_input_token.append(xform_name(token)) config['py_input_token'] = py_input_token else: config['py_input_token'] = xform_name(input_token) def add_waiter_configs(new_model, waiters): if waiters: denormalized = denormalize_waiters(waiters) # Before adding it to the new model, we need to verify the # final denormalized model. for value in denormalized.values(): if value['operation'] not in new_model['operations']: raise ValueError() new_model['waiters'] = denormalized def denormalize_waiters(waiters): # The waiter configuration is normalized to avoid duplication. # You can inherit defaults, and extend from other definitions. # We're going to denormalize this so that the implementation for # consuming waiters is simple. default = waiters.get('__default__', {}) new_waiters = {} for key, value in waiters.items(): if key.startswith('__'): # Keys that start with '__' are considered abstract/internal # and are only used for inheritance. Because we're going # to denormalize the configs and perform all the lookups # during this translation process, the abstract/internal # configs don't need to make it into the final translated # config so we can just skip these. continue new_waiters[key] = denormalize_single_waiter(value, default, waiters) return new_waiters def denormalize_single_waiter(value, default, waiters): """Denormalize a single waiter config. :param value: The dictionary of a single waiter config, e.g. the ``InstanceRunning`` or ``TableExists`` config. This is the config we're going to denormalize. :param default: The ``__default__`` (if any) configuration. This is needed to resolve the lookup process. :param waiters: The full configuration of the waiters. This is needed if we need to look up at parent class that the current config extends. :return: The denormalized config. :rtype: dict """ # First we need to resolve all the keys based on the inheritance # hierarchy. The lookup process is: # The most bottom/leaf class is ``value``. From there we need # to look up anything it inherits from (denoted via the ``extends`` # key). We need to perform this process recursively until we hit # a config that has no ``extends`` key. # And finally if we haven't found our value yet, we check in the # ``__default__`` key. # So the first thing we need to do is build the lookup chain that # starts with ``value`` and ends with ``__default__``. lookup_chain = [value] current = value while True: if 'extends' not in current: break current = waiters[current.get('extends')] lookup_chain.append(current) lookup_chain.append(default) new_waiter = {} # Now that we have this lookup chain we can build the entire set # of values by starting at the most parent class and walking down # to the children. At each step the child is merged onto the parent's # config items. This is the desired behavior as a child's values # overrides its parents. This is what the ``reversed(...)`` call # is for. for element in reversed(lookup_chain): new_waiter.update(element) # We don't care about 'extends' so we can safely remove that key. new_waiter.pop('extends', {}) # Now we need to resolve the success/failure values. We # want to completely remove the acceptor types. # The logic here is that if there is no success/failure_* variable # defined, it inherits this value from the matching acceptor_* variable. new_waiter['success_type'] = new_waiter.get( 'success_type', new_waiter.get('acceptor_type')) new_waiter['success_path'] = new_waiter.get( 'success_path', new_waiter.get('acceptor_path')) new_waiter['success_value'] = new_waiter.get( 'success_value', new_waiter.get('acceptor_value')) new_waiter['failure_type'] = new_waiter.get( 'failure_type', new_waiter.get('acceptor_type')) new_waiter['failure_path'] = new_waiter.get( 'failure_path', new_waiter.get('acceptor_path')) new_waiter['failure_value'] = new_waiter.get( 'failure_value', new_waiter.get('acceptor_value')) # We can remove acceptor_* vars because they're only used for lookups # and we've already performed this step in the lines above. new_waiter.pop('acceptor_type', '') new_waiter.pop('acceptor_path', '') new_waiter.pop('acceptor_value', '') # Remove any keys with a None value. for key in list(new_waiter.keys()): if new_waiter[key] is None: del new_waiter[key] # Check required keys. for required in ['operation', 'success_type']: if required not in new_waiter: raise ValueError('Missing required waiter configuration ' 'value "%s": %s' % (required, new_waiter)) if new_waiter.get(required) is None: raise ValueError('Required waiter configuration ' 'value cannot be None "%s": %s' % (required, new_waiter)) # Finally, success/failure values can be a scalar or a list. We're going # to just always make them a list. if 'success_value' in new_waiter and not \ isinstance(new_waiter['success_value'], list): new_waiter['success_value'] = [new_waiter['success_value']] if 'failure_value' in new_waiter and not \ isinstance(new_waiter['failure_value'], list): new_waiter['failure_value'] = [new_waiter['failure_value']] _transform_waiter(new_waiter) return new_waiter def _transform_waiter(new_waiter): # This transforms the waiters into a format that's slightly # easier to consume. if 'success_type' in new_waiter: success = {'type': new_waiter.pop('success_type')} if 'success_path' in new_waiter: success['path'] = new_waiter.pop('success_path') if 'success_value' in new_waiter: success['value'] = new_waiter.pop('success_value') new_waiter['success'] = success if 'failure_type' in new_waiter: failure = {'type': new_waiter.pop('failure_type')} if 'failure_path' in new_waiter: failure['path'] = new_waiter.pop('failure_path') if 'failure_value' in new_waiter: failure['value'] = new_waiter.pop('failure_value') new_waiter['failure'] = failure def _check_known_pagination_keys(config): # Verify that the pagination config only has keys we expect to see. expected = set(['input_token', 'py_input_token', 'output_token', 'result_key', 'limit_key', 'more_results', 'non_aggregate_keys']) for key in config: if key not in expected: raise ValueError("Unknown key in pagination config: %s" % key) def _check_output_keys_match(config, operation, service_name): output_members = list(operation['output']['members']) jmespath_seen = False for output_key in _get_all_page_output_keys(config): if resembles_jmespath_exp(output_key): # We don't validate jmespath expressions for now. jmespath_seen = True continue if output_key not in output_members: raise ValueError("Key %r is not an output member: %s" % (output_key, output_members)) output_members.remove(output_key) # Some services echo the input parameters in the response # output. We should not trigger a validation error # if those params are still not accounted for. for input_name in operation['input']['members']: if input_name in output_members: output_members.remove(input_name) if not jmespath_seen and output_members: # Because we can't validate jmespath expressions yet, # we can't say for user if output_members actually has # remaining keys or not. if service_name == 's3' and output_members == ['Name']: # The S3 model uses 'Name' for the output key, which # actually maps to the 'Bucket' input param so we don't # need to validate this output member. This is the only # model that has this, so we can just special case this # for now. return raise ValueError("Output members still exist for operation %s: %s" % ( operation['name'], output_members)) def _get_all_page_output_keys(config): if not isinstance(config['result_key'], list): yield config['result_key'] else: for result_key in config['result_key']: yield result_key if not isinstance(config['output_token'], list): yield config['output_token'] else: for result_key in config['output_token']: yield result_key if 'more_results' in config: yield config['more_results'] for key in config.get('non_aggregate_keys', []): yield key def _check_input_keys_match(config, operation): input_tokens = config['input_token'] if not isinstance(input_tokens, list): input_tokens = [input_tokens] valid_input_names = operation['input']['members'] for token in input_tokens: if token not in valid_input_names: raise ValueError("input_token refers to a non existent " "input name for operation %s: %s. " "Must be one of: %s" % (operation['name'], token, list(valid_input_names))) if 'limit_key' in config and config['limit_key'] not in valid_input_names: raise ValueError("limit_key refers to a non existent input name for " "operation %s: %s. Must be one of: %s" % ( operation['name'], config['limit_key'], list(valid_input_names))) def add_retry_configs(new_model, retry_model, definitions): if not retry_model: new_model['retry'] = {} return # The service specific retry config is keyed off of the endpoint # prefix as defined in the JSON model. endpoint_prefix = new_model.get('endpoint_prefix', '') final_retry_config = build_retry_config(endpoint_prefix, retry_model, definitions) new_model['retry'] = final_retry_config def build_retry_config(endpoint_prefix, retry_model, definitions): service_config = retry_model.get(endpoint_prefix, {}) resolve_references(service_config, definitions) # We want to merge the global defaults with the service specific # defaults, with the service specific defaults taking precedence. # So we use the global defaults as the base. final_retry_config = {'__default__': retry_model.get('__default__', {})} resolve_references(final_retry_config, definitions) # The merge the service specific config on top. merge_dicts(final_retry_config, service_config) return final_retry_config def resolve_references(config, definitions): """Recursively replace $ref keys. To cut down on duplication, common definitions can be declared (and passed in via the ``definitions`` attribute) and then references as {"$ref": "name"}, when this happens the reference dict is placed with the value from the ``definition`` dict. This is recursively done. """ for key, value in config.items(): if isinstance(value, dict): if len(value) == 1 and list(value.keys())[0] == '$ref': # Then we need to resolve this reference. config[key] = definitions[list(value.values())[0]] else: resolve_references(value, definitions)
{ "content_hash": "813d1634a48f47ed16f6120abcb36485", "timestamp": "", "source": "github", "line_count": 510, "max_line_length": 78, "avg_line_length": 41.90196078431372, "alnum_prop": 0.62817033224146, "repo_name": "ianblenke/awsebcli", "id": "5385329311fc9c9873f359697d4280c14de41406", "size": "21997", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "botocore_eb/translate.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Groff", "bytes": "208" }, { "name": "Makefile", "bytes": "633" }, { "name": "Python", "bytes": "3447856" }, { "name": "Shell", "bytes": "280" } ], "symlink_target": "" }
""" Error messages, data and custom validation code used in django-registration's various user-registration form classes. """ import re from django.conf import settings from django.core.exceptions import ValidationError from django.core.validators import RegexValidator from django.utils import six from django.utils.translation import ugettext_lazy as _ from confusable_homoglyphs import confusables CONFUSABLE = _(u"This name cannot be registered. " "Please choose a different name.") CONFUSABLE_EMAIL = _(u"This email address cannot be registered. " "Please supply a different email address.") DUPLICATE_EMAIL = _(u"This email address is already in use. " u"Please supply a different email address.") FREE_EMAIL = _(u"Registration using free email addresses is prohibited. " u"Please supply a different email address.") RESERVED_NAME = _(u"This name is reserved and cannot be registered.") TOS_REQUIRED = _(u"You must agree to the terms to register") # Below we construct a large but non-exhaustive list of names which # users probably should not be able to register with, due to various # risks: # # * For a site which creates email addresses from username, important # common addresses must be reserved. # # * For a site which creates subdomains from usernames, important # common hostnames/domain names must be reserved. # # * For a site which uses the username to generate a URL to the user's # profile, common well-known filenames must be reserved. # # etc., etc. # # Credit for basic idea and most of the list to Geoffrey Thomas's blog # post about names to reserve: # https://ldpreload.com/blog/names-to-reserve SPECIAL_HOSTNAMES = [ # Hostnames with special/reserved meaning. 'autoconfig', # Thunderbird autoconfig 'autodiscover', # MS Outlook/Exchange autoconfig 'broadcasthost', # Network broadcast hostname 'isatap', # IPv6 tunnel autodiscovery 'localdomain', # Loopback 'localhost', # Loopback 'wpad', # Proxy autodiscovery ] PROTOCOL_HOSTNAMES = [ # Common protocol hostnames. 'ftp', 'imap', 'mail', 'news', 'pop', 'pop3', 'smtp', 'usenet', 'uucp', 'webmail', 'www', ] CA_ADDRESSES = [ # Email addresses known used by certificate authorities during # verification. 'admin', 'administrator', 'hostmaster', 'info', 'is', 'it', 'mis', 'postmaster', 'root', 'ssladmin', 'ssladministrator', 'sslwebmaster', 'sysadmin', 'webmaster', ] RFC_2142 = [ # RFC-2142-defined names not already covered. 'abuse', 'marketing', 'noc', 'sales', 'security', 'support', ] NOREPLY_ADDRESSES = [ # Common no-reply email addresses. 'mailer-daemon', 'nobody', 'noreply', 'no-reply', ] SENSITIVE_FILENAMES = [ # Sensitive filenames. 'clientaccesspolicy.xml', # Silverlight cross-domain policy file. 'crossdomain.xml', # Flash cross-domain policy file. 'favicon.ico', 'humans.txt', 'keybase.txt', # Keybase ownership-verification URL. 'robots.txt', '.htaccess', '.htpasswd', ] OTHER_SENSITIVE_NAMES = [ # Other names which could be problems depending on URL/subdomain # structure. 'account', 'accounts', 'blog', 'buy', 'clients', 'contact', 'contactus', 'contact-us', 'copyright', 'dashboard', 'doc', 'docs', 'download', 'downloads', 'enquiry', 'faq', 'help', 'inquiry', 'license', 'login', 'logout', 'me', 'myaccount', 'payments', 'plans', 'portfolio', 'preferences', 'pricing', 'privacy', 'profile', 'register' 'secure', 'settings', 'signin', 'signup', 'ssl', 'status', 'subscribe', 'terms', 'tos', 'user', 'users' 'weblog', 'work', ] DEFAULT_RESERVED_NAMES = (SPECIAL_HOSTNAMES + PROTOCOL_HOSTNAMES + CA_ADDRESSES + RFC_2142 + NOREPLY_ADDRESSES + SENSITIVE_FILENAMES + OTHER_SENSITIVE_NAMES) class ReservedNameValidator(object): """ Validator which disallows many reserved names as form field values. """ def __init__(self, reserved_names=DEFAULT_RESERVED_NAMES): self.reserved_names = reserved_names def __call__(self, value): # GH issue 82: this validator only makes sense when the # username field is a string type. if not isinstance(value, six.text_type): return if value in self.reserved_names or \ value.startswith('.well-known'): raise ValidationError( RESERVED_NAME, code='invalid' ) def validate_confusables(value): """ Validator which disallows 'dangerous' usernames likely to represent homograph attacks. A username is 'dangerous' if it is mixed-script (as defined by Unicode 'Script' property) and contains one or more characters appearing in the Unicode Visually Confusable Characters file. """ if not isinstance(value, six.text_type): return if confusables.is_dangerous(value): raise ValidationError(CONFUSABLE, code='invalid') def validate_confusables_email(value): """ Validator which disallows 'dangerous' email addresses likely to represent homograph attacks. An email address is 'dangerous' if either the local-part or the domain, considered on their own, are mixed-script and contain one or more characters appearing in the Unicode Visually Confusable Characters file. """ if '@' not in value: return local_part, domain = value.split('@') if confusables.is_dangerous(local_part) or \ confusables.is_dangerous(domain): raise ValidationError(CONFUSABLE_EMAIL, code='invalid') namespace_regex = re.compile(r'[a-z0-9]+[a-z0-9-]*[a-z0-9]+$') LOWERCASE_NUMBERS_HYPHENS_HELP = _( "Enter a value consisting of lower-case letters, numbers or hyphens. " "Hyphens can not occur at the start or end of the chosen value." ) def validate_namespace(namespace_value): if namespace_value in getattr(settings, 'RESERVED_PROFILE_NAMESPACE_LIST'): raise ValidationError( _('You cannot use this reserved namespace.')) RegexValidator(regex=namespace_regex, message=LOWERCASE_NUMBERS_HYPHENS_HELP, code='invalid')(namespace_value)
{ "content_hash": "c5982f1f1f3f8ed56a4c062701f9ac2e", "timestamp": "", "source": "github", "line_count": 248, "max_line_length": 79, "avg_line_length": 26.45967741935484, "alnum_prop": 0.6405059433099665, "repo_name": "memodir/cv", "id": "e70b31b7a24854ade776c9020ce7f2440f09be78", "size": "6664", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/myaccount/validators.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "497484" }, { "name": "Dockerfile", "bytes": "1149" }, { "name": "HTML", "bytes": "50820" }, { "name": "JavaScript", "bytes": "696134" }, { "name": "Makefile", "bytes": "5820" }, { "name": "Python", "bytes": "188601" }, { "name": "Shell", "bytes": "1268" }, { "name": "Vue", "bytes": "6858" } ], "symlink_target": "" }
import os try: from setuptools import setup except ImportError: from distutils.core import setup # Get version and release info, which is all stored in LUH2/version.py ver_file = os.path.join('LUH2', 'version.py') with open(ver_file) as f: exec(f.read()) opts = dict(name=NAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, classifiers=CLASSIFIERS, author=AUTHOR, author_email=AUTHOR_EMAIL, platforms=PLATFORMS, version=VERSION, packages=PACKAGES, package_data=PACKAGE_DATA, requires=REQUIRES) if __name__ == '__main__': setup(**opts)
{ "content_hash": "7123dca01c9a5ad28a3de55a01e84648", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 70, "avg_line_length": 27.870967741935484, "alnum_prop": 0.5949074074074074, "repo_name": "ritviksahajpal/LUH2", "id": "0fc9a61ae250809868c76b4b03c432d3715a8471", "size": "864", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "47269" }, { "name": "Python", "bytes": "393255" }, { "name": "R", "bytes": "1531" } ], "symlink_target": "" }
from qiime2.plugin import SemanticType from ..plugin_setup import plugin from ..sample_data import SampleData from . import (QIIME1DemuxDirFmt, SingleLanePerSampleSingleEndFastqDirFmt, SingleLanePerSamplePairedEndFastqDirFmt) Sequences = SemanticType('Sequences', variant_of=SampleData.field['type']) SequencesWithQuality = SemanticType( 'SequencesWithQuality', variant_of=SampleData.field['type']) PairedEndSequencesWithQuality = SemanticType( 'PairedEndSequencesWithQuality', variant_of=SampleData.field['type']) JoinedSequencesWithQuality = SemanticType( 'JoinedSequencesWithQuality', variant_of=SampleData.field['type']) plugin.register_semantic_types(Sequences, SequencesWithQuality, PairedEndSequencesWithQuality, JoinedSequencesWithQuality) plugin.register_artifact_class( SampleData[Sequences], directory_format=QIIME1DemuxDirFmt, description=("Collections of sequences associated with specified samples " "(i.e., demultiplexed sequences).") ) plugin.register_artifact_class( SampleData[SequencesWithQuality], directory_format=SingleLanePerSampleSingleEndFastqDirFmt, description=("Collections of sequences with quality scores associated " "with specified samples (i.e., demultiplexed sequences).") ) plugin.register_artifact_class( SampleData[JoinedSequencesWithQuality], directory_format=SingleLanePerSampleSingleEndFastqDirFmt, description=("Collections of joined paired-end sequences with quality " "scores associated with specified samples (i.e., " "demultiplexed sequences).") ) plugin.register_artifact_class( SampleData[PairedEndSequencesWithQuality], directory_format=SingleLanePerSamplePairedEndFastqDirFmt, description=("Collections of unjoined paired-end sequences with quality " "scores associated with specified samples (i.e., " "demultiplexed sequences).") )
{ "content_hash": "50305a3a28dc0f4d307c9a062e24d6e1", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 78, "avg_line_length": 44.04347826086956, "alnum_prop": 0.736426456071076, "repo_name": "qiime2/q2-types", "id": "1297b6619b7cdb4c37a09e6446f54827737ed4a3", "size": "2376", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "q2_types/per_sample_sequences/_type.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "269" }, { "name": "Python", "bytes": "418135" }, { "name": "TeX", "bytes": "1121" } ], "symlink_target": "" }
"""TensorFlow Lite Python Interface: Sanity check.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.lite.python import convert from tensorflow.lite.python import lite_constants from tensorflow.lite.python import op_hint from tensorflow.lite.python.interpreter import Interpreter from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes from tensorflow.python.framework.graph_util_impl import _extract_graph_summary from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class ConvertTest(test_util.TensorFlowTestCase): def testBasic(self): in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3], dtype=dtypes.float32) out_tensor = in_tensor + in_tensor sess = session.Session() # Try running on valid graph tflite_model = convert.toco_convert(sess.graph_def, [in_tensor], [out_tensor]) self.assertTrue(tflite_model) # TODO(aselle): remove tests that fail (we must get TOCO to not fatal # all the time). # Try running on identity graph (known fail) # with self.assertRaisesRegexp(RuntimeError, "!model->operators.empty()"): # result = convert.toco_convert(sess.graph_def, [in_tensor], [in_tensor]) def testQuantization(self): in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3], dtype=dtypes.float32) out_tensor = array_ops.fake_quant_with_min_max_args(in_tensor + in_tensor, min=0., max=1.) sess = session.Session() tflite_model = convert.toco_convert( sess.graph_def, [in_tensor], [out_tensor], inference_type=lite_constants.QUANTIZED_UINT8, quantized_input_stats=[(0., 1.)]) self.assertTrue(tflite_model) def testGraphDefBasic(self): in_tensor = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input") _ = in_tensor + in_tensor sess = session.Session() tflite_model = convert.toco_convert_graph_def( sess.graph_def, [("input", [1, 16, 16, 3])], ["add"], inference_type=lite_constants.FLOAT) self.assertTrue(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(1, len(input_details)) self.assertEqual("input", input_details[0]["name"]) self.assertEqual(np.float32, input_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all()) self.assertEqual((0., 0.), input_details[0]["quantization"]) output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual("add", output_details[0]["name"]) self.assertEqual(np.float32, output_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all()) self.assertEqual((0., 0.), output_details[0]["quantization"]) def testGraphDefQuantization(self): in_tensor_1 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA") in_tensor_2 = array_ops.placeholder( shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB") _ = array_ops.fake_quant_with_min_max_args( in_tensor_1 + in_tensor_2, min=0., max=1., name="output") sess = session.Session() input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])] output_arrays = ["output"] tflite_model = convert.toco_convert_graph_def( sess.graph_def, input_arrays_map, output_arrays, inference_type=lite_constants.QUANTIZED_UINT8, quantized_input_stats=[(0., 1.), (0., 1.)]) self.assertTrue(tflite_model) # Check values from converted model. interpreter = Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(2, len(input_details)) self.assertEqual("inputA", input_details[0]["name"]) self.assertEqual(np.uint8, input_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all()) self.assertEqual((1., 0.), input_details[0]["quantization"]) # scale, zero_point self.assertEqual("inputB", input_details[1]["name"]) self.assertEqual(np.uint8, input_details[1]["dtype"]) self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all()) self.assertEqual((1., 0.), input_details[1]["quantization"]) # scale, zero_point output_details = interpreter.get_output_details() self.assertEqual(1, len(output_details)) self.assertEqual("output", output_details[0]["name"]) self.assertEqual(np.uint8, output_details[0]["dtype"]) self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all()) self.assertTrue(output_details[0]["quantization"][0] > 0) # scale class ConvertTestOpHint(test_util.TensorFlowTestCase): """Test the hint to stub functionality.""" def _getGraphOpTypes(self, graphdef, output_nodes): """Returns used op types in `graphdef` reachable from `output_nodes`. This is used to check that after the stub transformation the expected nodes are there. NOTE: this is not a exact test that the graph is the correct output, but it balances compact expressibility of test with sanity checking. Args: graphdef: TensorFlow proto graphdef. output_nodes: A list of output node names that we need to reach. Returns: A set of node types reachable from `output_nodes`. """ name_to_input_name, name_to_node, _ = ( _extract_graph_summary(graphdef)) # Find all nodes that are needed by the outputs used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name) return set([name_to_node[node_name].op for node_name in used_node_names]) def _countIdentities(self, nodes): """Count the number of "Identity" op types in the list of proto nodes. Args: nodes: NodeDefs of the graph. Returns: The number of nodes with op type "Identity" found. """ return len([x for x in nodes if x.op == "Identity"]) def testSwishLiteHint(self): """Makes a custom op swish and makes sure it gets converted as a unit.""" image = array_ops.constant([1., 2., 3., 4.]) swish_scale = array_ops.constant(1.0) def _swish(input_tensor, scale): custom = op_hint.OpHint("cool_activation") input_tensor, scale = custom.add_inputs(input_tensor, scale) output = math_ops.sigmoid(input_tensor) * input_tensor * scale output, = custom.add_outputs(output) return output output = array_ops.identity(_swish(image, swish_scale), name="ModelOutput") with self.cached_session() as sess: # check if identities have been put into the graph (2 input, 1 output, # and 1 final output). self.assertEqual(self._countIdentities(sess.graph_def.node), 4) stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["cool_activation", "Const", "Identity"])) def testScaleAndBiasAndIdentity(self): """This tests a scaled add which has 3 inputs and 2 outputs.""" a = array_ops.constant(1.) x = array_ops.constant([2., 3.]) b = array_ops.constant([4., 5.]) def _scaled_and_bias_and_identity(a, x, b): custom = op_hint.OpHint("scale_and_bias_and_identity") a, x, b = custom.add_inputs(a, x, b) return custom.add_outputs(a * x + b, x) output = array_ops.identity(_scaled_and_bias_and_identity(a, x, b), name="ModelOutput") with self.cached_session() as sess: # make sure one identity for each input (3) and output (2) => 3 + 2 = 5 # +1 for the final output self.assertEqual(self._countIdentities(sess.graph_def.node), 6) stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"])) def testTwoFunctions(self): """Tests if two functions are converted correctly.""" a = array_ops.constant([1.]) b = array_ops.constant([1.]) def _double_values(x): custom = op_hint.OpHint("add_test") x, = custom.add_inputs(x) output = math_ops.multiply(x, x) output, = custom.add_outputs(output) return output output = array_ops.identity( math_ops.add(_double_values(a), _double_values(b)), name="ModelOutput") with self.cached_session() as sess: # make sure one identity for each input (2) and output (2) => 2 + 2 # +1 for the final output self.assertEqual(self._countIdentities(sess.graph_def.node), 5) stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["add_test", "Const", "Identity", "Add"])) def _get_input_index(self, x): return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i def _get_output_index(self, x): return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i def _get_sort_index(self, x): return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i def testTags(self): """Test if multiple args with the same tag are grouped.""" a = array_ops.constant([1.]) b = array_ops.constant([2.]) c = array_ops.constant([3.]) d = array_ops.constant([4.]) custom = op_hint.OpHint("test_tag") a = custom.add_input(a, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK) b, = custom.add_inputs(b) c = custom.add_input(c, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK) d = custom.add_input(d, tag="mytag2", aggregate=op_hint.OpHint.AGGREGATE_STACK) res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b)) custom.add_outputs([res]) with self.cached_session(): self.assertEqual(self._get_input_index(a), 0) self.assertEqual(self._get_sort_index(a), 0) self.assertEqual(self._get_input_index(b), 1) self.assertEqual(self._get_input_index(c), 0) self.assertEqual(self._get_sort_index(c), 1) def testOverrideIndex(self): a = array_ops.constant([1.]) b = array_ops.constant([2.]) c = array_ops.constant([3.]) custom = op_hint.OpHint("test_override") b = custom.add_input(b) # should auto assign 0 a = custom.add_input(a, index_override=1) c = custom.add_input(c) # should auto assign 2 with self.cached_session(): self.assertEqual(self._get_input_index(a), 1) self.assertEqual(self._get_input_index(b), 0) self.assertEqual(self._get_input_index(c), 2) def testAggregate(self): a = array_ops.constant([3., 4.]) b = array_ops.constant([5., 6.]) hint = op_hint.OpHint("agg") a0, a1 = array_ops.unstack(a) b0, b1 = array_ops.unstack(b) a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK) b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK) a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK) b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK) c0 = math_ops.add(a0, b0, name="addleft") c1 = math_ops.add(a1, b1, name="addright") c0 = hint.add_output( c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK) c1 = hint.add_output( c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK) curr = array_ops.stack([c0, c1]) output = array_ops.identity(curr, name="FINAL_OUTPUT") with self.cached_session() as sess: stubbed_graphdef = op_hint.convert_op_hints_to_stubs( graph_def=sess.graph_def) self.assertEqual( self._getGraphOpTypes( stubbed_graphdef, output_nodes=[op_hint._tensor_name_base(output.name)]), set(["agg", "Const", "Identity"])) if __name__ == "__main__": test.main()
{ "content_hash": "aaf2c569404c7b5a675ae98641e30f4b", "timestamp": "", "source": "github", "line_count": 320, "max_line_length": 80, "avg_line_length": 40.28125, "alnum_prop": 0.6409619860356865, "repo_name": "seanli9jan/tensorflow", "id": "7a0bce921b599f0dad9012c3148abd7a86496594", "size": "13579", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tensorflow/lite/python/convert_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "3301" }, { "name": "Batchfile", "bytes": "10132" }, { "name": "C", "bytes": "446293" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "50950243" }, { "name": "CMake", "bytes": "198845" }, { "name": "Dockerfile", "bytes": "36908" }, { "name": "Go", "bytes": "1285854" }, { "name": "HTML", "bytes": "4681865" }, { "name": "Java", "bytes": "869263" }, { "name": "Jupyter Notebook", "bytes": "2611125" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "62216" }, { "name": "Objective-C", "bytes": "15634" }, { "name": "Objective-C++", "bytes": "101475" }, { "name": "PHP", "bytes": "5191" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "40335927" }, { "name": "RobotFramework", "bytes": "891" }, { "name": "Ruby", "bytes": "553" }, { "name": "Shell", "bytes": "487251" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
import sys import os import struct import binascii from time import sleep from ctypes import (CDLL, get_errno) from ctypes.util import find_library from socket import (socket, AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI, SOL_HCI, HCI_FILTER,) os.system("hciconfig hci0 down") os.system("hciconfig hci0 up") if not os.geteuid() == 0: sys.exit("script only works as root") btlib = find_library("bluetooth") if not btlib: raise Exception( "Can't find required bluetooth libraries" " (need to install bluez)" ) bluez = CDLL(btlib, use_errno=True) dev_id = bluez.hci_get_route(None) sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI) sock.bind((dev_id,)) err = bluez.hci_le_set_scan_parameters(sock.fileno(), 0, 0x10, 0x10, 0, 0, 1000); if err < 0: raise Exception("Set scan parameters failed") # occurs when scanning is still enabled from previous call # allows LE advertising events hci_filter = struct.pack( "<IQH", 0x00000010, 0x4000000000000000, 0 ) sock.setsockopt(SOL_HCI, HCI_FILTER, hci_filter) err = bluez.hci_le_set_scan_enable( sock.fileno(), 1, # 1 - turn on; 0 - turn off 0, # 0-filtering disabled, 1-filter out duplicates 1000 # timeout ) if err < 0: errnum = get_errno() raise Exception("{} {}".format( errno.errorcode[errnum], os.strerror(errnum) )) distanceAway = 1 # distance away from the estimote beacon in meter with open("RSSI_data" + str(distanceAway) + ".csv","w") as out_file: for x in range (1,100): data = sock.recv(1024) RSSI = int(binascii.b2a_hex(data[-1]),16)-255 out_string = "" out_string += str(RSSI) out_string += "\n" out_file.write(out_string) sock.close() sys.exit()
{ "content_hash": "8bf9f81eabe23d696a5e88fcf7edbd0c", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 86, "avg_line_length": 26.075757575757574, "alnum_prop": 0.6676350958744915, "repo_name": "jsantoso91/smartlighting", "id": "22cb086fbb9e77e3b70d9d7d970047c565c16d3e", "size": "1721", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "characterizeRSSI.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "43737" } ], "symlink_target": "" }
""" Contains the unittests for genomes """ # Internal modules # from genomes import Assembly # Unittesting module # try: import unittest2 as unittest except ImportError: import unittest # Nosetest flag # __test__ = True ################################################################################### class Test(unittest.TestCase): def runTest(self): # Test the guessing # a = Assembly('sacCer2') tests = [(1,'chrI'),('chr1','chrI'),('chrI','chrI'),('I','chrI')] for input,expected in tests: got = a.guess_chromosome_name(input) self.assertEqual(got, expected) # Test unicode # a = Assembly(u'sacCer2') tests = [('Q','chrM')] for input,expected in tests: got = a.guess_chromosome_name(input) self.assertEqual(got, expected)
{ "content_hash": "ff83b81b09ba2caa52cd4f97b87215e8", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 83, "avg_line_length": 27.580645161290324, "alnum_prop": 0.5368421052631579, "repo_name": "xapple/genomes", "id": "f2eb80ab1e84f9f74f87f3ee84305350c0c21741", "size": "855", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "genomes/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "4584" }, { "name": "Python", "bytes": "19378" } ], "symlink_target": "" }
import datetime import time import boto import redis import requests import random import zlib import re from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.template.loader import render_to_string from django.db import IntegrityError from django.db.models import Q from django.views.decorators.cache import never_cache from django.core.urlresolvers import reverse from django.contrib.auth import login as login_user from django.contrib.auth import logout as logout_user from django.contrib.auth.models import User from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404, UnreadablePostError from django.conf import settings from django.core.mail import mail_admins from django.core.validators import email_re from django.core.mail import EmailMultiAlternatives from django.contrib.sites.models import Site from django.utils import feedgenerator from django.utils.encoding import smart_unicode from mongoengine.queryset import OperationError from mongoengine.queryset import NotUniqueError from apps.recommendations.models import RecommendedFeed from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed from apps.profile.models import Profile, MCustomStyling from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature from apps.reader.forms import SignupForm, LoginForm, FeatureForm from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts, MSavedSearch from apps.notifications.models import MUserFeedNotification from apps.search.models import MUserSearch from apps.statistics.models import MStatistics, MAnalyticsLoader # from apps.search.models import SearchStarredStory try: from apps.rss_feeds.models import Feed, MFeedPage, DuplicateFeed, MStory, MStarredStory except: pass from apps.social.models import MSharedStory, MSocialProfile, MSocialServices from apps.social.models import MSocialSubscription, MActivity, MInteraction from apps.categories.models import MCategory from apps.social.views import load_social_page from apps.rss_feeds.tasks import ScheduleImmediateFetches from utils import json_functions as json from utils.user_functions import get_user, ajax_login_required from utils.feed_functions import relative_timesince from utils.story_functions import format_story_link_date__short from utils.story_functions import format_story_link_date__long from utils.story_functions import strip_tags from utils import log as logging from utils.view_functions import get_argument_or_404, render_to, is_true from utils.view_functions import required_params from utils.ratelimit import ratelimit from vendor.timezones.utilities import localtime_for_timezone import tweepy BANNED_URLS = [ "brentozar.com", ] @never_cache @render_to('reader/dashboard.xhtml') def index(request, **kwargs): if request.method == "GET" and request.subdomain and request.subdomain not in ['dev', 'www', 'debug']: username = request.subdomain if '.' in username: username = username.split('.')[0] user = User.objects.filter(username=username) if not user: user = User.objects.filter(username__iexact=username) if user: user = user[0] if not user: return HttpResponseRedirect('http://%s%s' % ( Site.objects.get_current().domain, reverse('index'))) return load_social_page(request, user_id=user.pk, username=request.subdomain, **kwargs) if request.user.is_anonymous(): return welcome(request, **kwargs) else: return dashboard(request, **kwargs) def dashboard(request, **kwargs): user = request.user feed_count = UserSubscription.objects.filter(user=request.user).count() recommended_feeds = RecommendedFeed.objects.filter(is_public=True, approved_date__lte=datetime.datetime.now() ).select_related('feed')[:2] unmoderated_feeds = [] if user.is_staff: unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False, declined_date__isnull=True ).select_related('feed')[:2] statistics = MStatistics.all() social_profile = MSocialProfile.get_user(user.pk) custom_styling = MCustomStyling.get_user(user.pk) start_import_from_google_reader = request.session.get('import_from_google_reader', False) if start_import_from_google_reader: del request.session['import_from_google_reader'] if not user.is_active: url = "https://%s%s" % (Site.objects.get_current().domain, reverse('stripe-form')) return HttpResponseRedirect(url) logging.user(request, "~FBLoading dashboard") return { 'user_profile' : user.profile, 'feed_count' : feed_count, 'custom_styling' : custom_styling, 'account_images' : range(1, 4), 'recommended_feeds' : recommended_feeds, 'unmoderated_feeds' : unmoderated_feeds, 'statistics' : statistics, 'social_profile' : social_profile, 'start_import_from_google_reader': start_import_from_google_reader, 'debug' : settings.DEBUG, }, "reader/dashboard.xhtml" def welcome(request, **kwargs): user = get_user(request) statistics = MStatistics.all() social_profile = MSocialProfile.get_user(user.pk) if request.method == "POST": if request.POST.get('submit', '').startswith('log'): login_form = LoginForm(request.POST, prefix='login') signup_form = SignupForm(prefix='signup') else: signup_form = SignupForm(request.POST, prefix='signup') return { "form": signup_form }, "accounts/signup.xhtml" else: login_form = LoginForm(prefix='login') signup_form = SignupForm(prefix='signup') logging.user(request, "~FBLoading welcome") return { 'user_profile' : hasattr(user, 'profile') and user.profile, 'login_form' : login_form, 'signup_form' : signup_form, 'statistics' : statistics, 'social_profile' : social_profile, 'post_request' : request.method == 'POST', }, "reader/welcome.xhtml" @never_cache def login(request): code = -1 message = "" if request.method == "POST": form = LoginForm(request.POST, prefix='login') if form.is_valid(): login_user(request, form.get_user()) if request.POST.get('api'): logging.user(form.get_user(), "~FG~BB~SKiPhone Login~FW") code = 1 else: logging.user(form.get_user(), "~FG~BBLogin~FW") return HttpResponseRedirect(reverse('index')) else: message = form.errors.items()[0][1][0] if request.POST.get('api'): return HttpResponse(json.encode(dict(code=code, message=message)), mimetype='application/json') else: return index(request) @never_cache @render_to('accounts/signup.html') def signup(request): if request.method == "POST": signup_form = SignupForm(request.POST, prefix='signup') return { "form": signup_form } # form = SignupForm(prefix='signup', data=request.POST) # if form.is_valid(): # new_user = form.save() # login_user(request, new_user) # logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email) # if not new_user.is_active: # url = "https://%s%s" % (Site.objects.get_current().domain, # reverse('stripe-form')) # return HttpResponseRedirect(url) return index(request) @never_cache def logout(request): logging.user(request, "~FG~BBLogout~FW") logout_user(request) if request.GET.get('api'): return HttpResponse(json.encode(dict(code=1)), mimetype='application/json') else: return HttpResponseRedirect(reverse('index')) def autologin(request, username, secret): next = request.GET.get('next', '') if not username or not secret: return HttpResponseForbidden() profile = Profile.objects.filter(user__username=username, secret_token=secret) if not profile: return HttpResponseForbidden() user = profile[0].user user.backend = settings.AUTHENTICATION_BACKENDS[0] login_user(request, user) logging.user(user, "~FG~BB~SKAuto-Login. Next stop: %s~FW" % (next if next else 'Homepage',)) if next and not next.startswith('/'): next = '?next=' + next return HttpResponseRedirect(reverse('index') + next) elif next: return HttpResponseRedirect(next) else: return HttpResponseRedirect(reverse('index')) @ratelimit(minutes=1, requests=60) @never_cache @json.json_view def load_feeds(request): user = get_user(request) feeds = {} include_favicons = is_true(request.REQUEST.get('include_favicons', False)) flat = is_true(request.REQUEST.get('flat', False)) update_counts = is_true(request.REQUEST.get('update_counts', False)) version = int(request.REQUEST.get('v', 1)) if include_favicons == 'false': include_favicons = False if update_counts == 'false': update_counts = False if flat == 'false': flat = False if flat: return load_feeds_flat(request) try: folders = UserSubscriptionFolders.objects.get(user=user) except UserSubscriptionFolders.DoesNotExist: data = dict(feeds=[], folders=[]) return data except UserSubscriptionFolders.MultipleObjectsReturned: UserSubscriptionFolders.objects.filter(user=user)[1:].delete() folders = UserSubscriptionFolders.objects.get(user=user) user_subs = UserSubscription.objects.select_related('feed').filter(user=user) notifications = MUserFeedNotification.feeds_for_user(user.pk) day_ago = datetime.datetime.now() - datetime.timedelta(days=1) scheduled_feeds = [] for sub in user_subs: pk = sub.feed_id if update_counts and sub.needs_unread_recalc: sub.calculate_feed_scores(silent=True) feeds[pk] = sub.canonical(include_favicon=include_favicons) if not sub.active: continue if pk in notifications: feeds[pk].update(notifications[pk]) if not sub.feed.active and not sub.feed.has_feed_exception: scheduled_feeds.append(sub.feed.pk) elif sub.feed.active_subscribers <= 0: scheduled_feeds.append(sub.feed.pk) elif sub.feed.next_scheduled_update < day_ago: scheduled_feeds.append(sub.feed.pk) if len(scheduled_feeds) > 0 and request.user.is_authenticated(): logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." % len(scheduled_feeds)) ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk)) starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True) if not starred_count and len(starred_counts): starred_count = MStarredStory.objects(user_id=user.pk).count() saved_searches = MSavedSearch.user_searches(user.pk) social_params = { 'user_id': user.pk, 'include_favicon': include_favicons, 'update_counts': update_counts, } social_feeds = MSocialSubscription.feeds(**social_params) social_profile = MSocialProfile.profile(user.pk) social_services = MSocialServices.profile(user.pk) categories = None if not user_subs: categories = MCategory.serialize() logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials%s" % ( len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else '')) data = { 'feeds': feeds.values() if version == 2 else feeds, 'social_feeds': social_feeds, 'social_profile': social_profile, 'social_services': social_services, 'user_profile': user.profile, "is_staff": user.is_staff, 'user_id': user.pk, 'folders': json.decode(folders.folders), 'starred_count': starred_count, 'starred_counts': starred_counts, 'saved_searches': saved_searches, 'categories': categories } return data @json.json_view def load_feed_favicons(request): user = get_user(request) feed_ids = request.REQUEST.getlist('feed_ids') or request.REQUEST.getlist('feed_ids[]') if not feed_ids: user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True) feed_ids = [sub['feed__pk'] for sub in user_subs.values('feed__pk')] feed_icons = dict([(i.feed_id, i.data) for i in MFeedIcon.objects(feed_id__in=feed_ids)]) return feed_icons def load_feeds_flat(request): user = request.user include_favicons = is_true(request.REQUEST.get('include_favicons', False)) update_counts = is_true(request.REQUEST.get('update_counts', True)) include_inactive = is_true(request.REQUEST.get('include_inactive', False)) background_ios = is_true(request.REQUEST.get('background_ios', False)) feeds = {} inactive_feeds = {} day_ago = datetime.datetime.now() - datetime.timedelta(days=1) scheduled_feeds = [] iphone_version = "2.1" # Preserved forever. Don't change. latest_ios_build = "52" latest_ios_version = "5.0.0b2" if include_favicons == 'false': include_favicons = False if update_counts == 'false': update_counts = False if not user.is_authenticated(): return HttpResponseForbidden() try: folders = UserSubscriptionFolders.objects.get(user=user) except UserSubscriptionFolders.DoesNotExist: folders = [] user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True) notifications = MUserFeedNotification.feeds_for_user(user.pk) if not user_subs and folders: folders.auto_activate() user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True) if include_inactive: inactive_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=False) for sub in user_subs: pk = sub.feed_id if update_counts and sub.needs_unread_recalc: sub.calculate_feed_scores(silent=True) feeds[pk] = sub.canonical(include_favicon=include_favicons) if not sub.feed.active and not sub.feed.has_feed_exception: scheduled_feeds.append(sub.feed.pk) elif sub.feed.active_subscribers <= 0: scheduled_feeds.append(sub.feed.pk) elif sub.feed.next_scheduled_update < day_ago: scheduled_feeds.append(sub.feed.pk) if pk in notifications: feeds[pk].update(notifications[pk]) if include_inactive: for sub in inactive_subs: inactive_feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons) if len(scheduled_feeds) > 0 and request.user.is_authenticated(): logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." % len(scheduled_feeds)) ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk)) flat_folders = [] flat_folders_with_inactive = [] if folders: flat_folders = folders.flatten_folders(feeds=feeds) flat_folders_with_inactive = folders.flatten_folders(feeds=feeds, inactive_feeds=inactive_feeds) social_params = { 'user_id': user.pk, 'include_favicon': include_favicons, 'update_counts': update_counts, } social_feeds = MSocialSubscription.feeds(**social_params) social_profile = MSocialProfile.profile(user.pk) social_services = MSocialServices.profile(user.pk) starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True) if not starred_count and len(starred_counts): starred_count = MStarredStory.objects(user_id=user.pk).count() categories = None if not user_subs: categories = MCategory.serialize() logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB/~FR%s~FB feeds/socials/inactive ~FMflat~FB%s%s" % ( len(feeds.keys()), len(social_feeds), len(inactive_feeds), '. ~FCUpdating counts.' if update_counts else '', ' ~BB(background fetch)' if background_ios else '')) data = { "flat_folders": flat_folders, "flat_folders_with_inactive": flat_folders_with_inactive, "feeds": feeds if not include_inactive else {"0": "Don't include `include_inactive=true` if you want active feeds."}, "inactive_feeds": inactive_feeds if include_inactive else {"0": "Include `include_inactive=true`"}, "social_feeds": social_feeds, "social_profile": social_profile, "social_services": social_services, "user": user.username, "user_id": user.pk, "is_staff": user.is_staff, "user_profile": user.profile, "iphone_version": iphone_version, "latest_ios_build": latest_ios_build, "latest_ios_version": latest_ios_version, "categories": categories, 'starred_count': starred_count, 'starred_counts': starred_counts, 'share_ext_token': user.profile.secret_token, } return data @ratelimit(minutes=1, requests=30) @never_cache @json.json_view def refresh_feeds(request): start = datetime.datetime.now() start_time = time.time() user = get_user(request) feed_ids = request.REQUEST.getlist('feed_id') or request.REQUEST.getlist('feed_id[]') check_fetch_status = request.REQUEST.get('check_fetch_status') favicons_fetching = request.REQUEST.getlist('favicons_fetching') or request.REQUEST.getlist('favicons_fetching[]') social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id] feed_ids = list(set(feed_ids) - set(social_feed_ids)) feeds = {} if feed_ids or (not social_feed_ids and not feed_ids): feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, check_fetch_status=check_fetch_status) checkpoint1 = datetime.datetime.now() social_feeds = {} if social_feed_ids or (not social_feed_ids and not feed_ids): social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids) checkpoint2 = datetime.datetime.now() favicons_fetching = [int(f) for f in favicons_fetching if f] feed_icons = {} if favicons_fetching: feed_icons = dict([(i.feed_id, i) for i in MFeedIcon.objects(feed_id__in=favicons_fetching)]) for feed_id, feed in feeds.items(): if feed_id in favicons_fetching and feed_id in feed_icons: feeds[feed_id]['favicon'] = feed_icons[feed_id].data feeds[feed_id]['favicon_color'] = feed_icons[feed_id].color feeds[feed_id]['favicon_fetching'] = feed.get('favicon_fetching') user_subs = UserSubscription.objects.filter(user=user, active=True).only('feed') sub_feed_ids = [s.feed_id for s in user_subs] if favicons_fetching: moved_feed_ids = [f for f in favicons_fetching if f not in sub_feed_ids] for moved_feed_id in moved_feed_ids: duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=moved_feed_id) if duplicate_feeds and duplicate_feeds[0].feed.pk in feeds: feeds[moved_feed_id] = feeds[duplicate_feeds[0].feed_id] feeds[moved_feed_id]['dupe_feed_id'] = duplicate_feeds[0].feed_id if check_fetch_status: missing_feed_ids = list(set(feed_ids) - set(sub_feed_ids)) if missing_feed_ids: duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id__in=missing_feed_ids) for duplicate_feed in duplicate_feeds: feeds[duplicate_feed.duplicate_feed_id] = {'id': duplicate_feed.feed_id} interactions_count = MInteraction.user_unread_count(user.pk) if True or settings.DEBUG or check_fetch_status: end = datetime.datetime.now() extra_fetch = "" if check_fetch_status or favicons_fetching: extra_fetch = "(%s/%s)" % (check_fetch_status, len(favicons_fetching)) logging.user(request, "~FBRefreshing %s+%s feeds %s (%.4s/%.4s/%.4s)" % ( len(feeds.keys()), len(social_feeds.keys()), extra_fetch, (checkpoint1-start).total_seconds(), (checkpoint2-start).total_seconds(), (end-start).total_seconds(), )) MAnalyticsLoader.add(page_load=time.time()-start_time) return { 'feeds': feeds, 'social_feeds': social_feeds, 'interactions_count': interactions_count, } @json.json_view def interactions_count(request): user = get_user(request) interactions_count = MInteraction.user_unread_count(user.pk) return { 'interactions_count': interactions_count, } @never_cache @ajax_login_required @json.json_view def feed_unread_count(request): start = time.time() user = request.user feed_ids = request.REQUEST.getlist('feed_id') or request.REQUEST.getlist('feed_id[]') force = request.REQUEST.get('force', False) social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id] feed_ids = list(set(feed_ids) - set(social_feed_ids)) feeds = {} if feed_ids: feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, force=force) social_feeds = {} if social_feed_ids: social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids) if len(feed_ids) == 1: if settings.DEBUG: feed_title = Feed.get_by_id(feed_ids[0]).feed_title else: feed_title = feed_ids[0] elif len(social_feed_ids) == 1: feed_title = MSocialProfile.objects.get(user_id=social_feed_ids[0].replace('social:', '')).username else: feed_title = "%s feeds" % (len(feeds) + len(social_feeds)) logging.user(request, "~FBUpdating unread count on: %s" % feed_title) MAnalyticsLoader.add(page_load=time.time()-start) return {'feeds': feeds, 'social_feeds': social_feeds} def refresh_feed(request, feed_id): start = time.time() user = get_user(request) feed = get_object_or_404(Feed, pk=feed_id) feed = feed.update(force=True, compute_scores=False) usersub = UserSubscription.objects.get(user=user, feed=feed) usersub.calculate_feed_scores(silent=False) logging.user(request, "~FBRefreshing feed: %s" % feed) MAnalyticsLoader.add(page_load=time.time()-start) return load_single_feed(request, feed_id) @never_cache @json.json_view def load_single_feed(request, feed_id): start = time.time() user = get_user(request) # offset = int(request.REQUEST.get('offset', 0)) # limit = int(request.REQUEST.get('limit', 6)) limit = 6 page = int(request.REQUEST.get('page', 1)) delay = int(request.REQUEST.get('delay', 0)) offset = limit * (page-1) order = request.REQUEST.get('order', 'newest') read_filter = request.REQUEST.get('read_filter', 'all') query = request.REQUEST.get('query', '').strip() include_story_content = is_true(request.REQUEST.get('include_story_content', True)) include_hidden = is_true(request.REQUEST.get('include_hidden', False)) include_feeds = is_true(request.REQUEST.get('include_feeds', False)) message = None user_search = None dupe_feed_id = None user_profiles = [] now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) if not feed_id: raise Http404 feed_address = request.REQUEST.get('feed_address') feed = Feed.get_by_id(feed_id, feed_address=feed_address) if not feed: raise Http404 try: usersub = UserSubscription.objects.get(user=user, feed=feed) except UserSubscription.DoesNotExist: usersub = None if feed.is_newsletter and not usersub: # User must be subscribed to a newsletter in order to read it raise Http404 if page > 200: logging.user(request, "~BR~FK~SBOver page 200 on single feed: %s" % page) raise Http404 if query: if user.profile.is_premium: user_search = MUserSearch.get_user(user.pk) user_search.touch_search_date() stories = feed.find_stories(query, order=order, offset=offset, limit=limit) else: stories = [] message = "You must be a premium subscriber to search." elif read_filter == 'starred': mstories = MStarredStory.objects( user_id=user.pk, story_feed_id=feed_id ).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit] stories = Feed.format_stories(mstories) elif usersub and (read_filter == 'unread' or order == 'oldest'): stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit, default_cutoff_date=user.profile.unread_cutoff) else: stories = feed.get_stories(offset, limit) checkpoint1 = time.time() try: stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk) except redis.ConnectionError: logging.user(request, "~BR~FK~SBRedis is unavailable for shared stories.") checkpoint2 = time.time() # Get intelligence classifier for user if usersub and usersub.is_trained: classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id, social_user_id=0)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id)) else: classifier_feeds = [] classifier_authors = [] classifier_titles = [] classifier_tags = [] classifiers = get_classifiers_for_user(user, feed_id=feed_id, classifier_feeds=classifier_feeds, classifier_authors=classifier_authors, classifier_titles=classifier_titles, classifier_tags=classifier_tags) checkpoint3 = time.time() unread_story_hashes = [] if stories: if (read_filter == 'all' or query) and usersub: unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread', feed_ids=[usersub.feed_id], usersubs=[usersub], group_by_feed=False, cutoff_date=user.profile.unread_cutoff) story_hashes = [story['story_hash'] for story in stories if story['story_hash']] starred_stories = MStarredStory.objects(user_id=user.pk, story_feed_id=feed.pk, story_hash__in=story_hashes)\ .hint([('user_id', 1), ('story_hash', 1)])\ .only('story_hash', 'starred_date', 'user_tags') shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes) shared_stories = [] if shared_story_hashes: shared_stories = MSharedStory.objects(user_id=user.pk, story_hash__in=shared_story_hashes)\ .hint([('story_hash', 1)])\ .only('story_hash', 'shared_date', 'comments') starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date, user_tags=story.user_tags)) for story in starred_stories]) shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date, comments=story.comments)) for story in shared_stories]) checkpoint4 = time.time() for story in stories: if not include_story_content: del story['story_content'] story_date = localtime_for_timezone(story['story_date'], user.profile.timezone) nowtz = localtime_for_timezone(now, user.profile.timezone) story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz) story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz) if usersub: story['read_status'] = 1 if story['story_date'] < user.profile.unread_cutoff: story['read_status'] = 1 elif (read_filter == 'all' or query) and usersub: story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0 elif read_filter == 'unread' and usersub: story['read_status'] = 0 if story['story_hash'] in starred_stories: story['starred'] = True starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'], user.profile.timezone) story['starred_date'] = format_story_link_date__long(starred_date, now) story['starred_timestamp'] = starred_date.strftime('%s') story['user_tags'] = starred_stories[story['story_hash']]['user_tags'] if story['story_hash'] in shared_stories: story['shared'] = True shared_date = localtime_for_timezone(shared_stories[story['story_hash']]['shared_date'], user.profile.timezone) story['shared_date'] = format_story_link_date__long(shared_date, now) story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments']) else: story['read_status'] = 1 story['intelligence'] = { 'feed': apply_classifier_feeds(classifier_feeds, feed), 'author': apply_classifier_authors(classifier_authors, story), 'tags': apply_classifier_tags(classifier_tags, story), 'title': apply_classifier_titles(classifier_titles, story), } story['score'] = UserSubscription.score_story(story['intelligence']) # Intelligence feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else [] feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else [] if include_feeds: feeds = Feed.objects.filter(pk__in=set([story['story_feed_id'] for story in stories])) feeds = [f.canonical(include_favicon=False) for f in feeds] if usersub: usersub.feed_opens += 1 usersub.needs_unread_recalc = True usersub.save(update_fields=['feed_opens', 'needs_unread_recalc']) diff1 = checkpoint1-start diff2 = checkpoint2-start diff3 = checkpoint3-start diff4 = checkpoint4-start timediff = time.time()-start last_update = relative_timesince(feed.last_update) time_breakdown = "" if timediff > 1 or settings.DEBUG: time_breakdown = "~SN~FR(~SB%.4s/%.4s/%.4s/%.4s~SN)" % ( diff1, diff2, diff3, diff4) search_log = "~SN~FG(~SB%s~SN) " % query if query else "" logging.user(request, "~FYLoading feed: ~SB%s%s (%s/%s) %s%s" % ( feed.feed_title[:22], ('~SN/p%s' % page) if page > 1 else '', order, read_filter, search_log, time_breakdown)) MAnalyticsLoader.add(page_load=timediff) if not include_hidden: hidden_stories_removed = 0 new_stories = [] for story in stories: if story['score'] >= 0: new_stories.append(story) else: hidden_stories_removed += 1 stories = new_stories data = dict(stories=stories, user_profiles=user_profiles, feed_tags=feed_tags, feed_authors=feed_authors, classifiers=classifiers, updated=last_update, user_search=user_search, feed_id=feed.pk, elapsed_time=round(float(timediff), 2), message=message) if include_feeds: data['feeds'] = feeds if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id if not usersub: data.update(feed.canonical()) # if not usersub and feed.num_subscribers <= 1: # data = dict(code=-1, message="You must be subscribed to this feed.") if delay and user.is_staff: # import random # time.sleep(random.randint(2, 7) / 10.0) # time.sleep(random.randint(1, 10)) time.sleep(delay) # if page == 2: # assert False return data def load_feed_page(request, feed_id): if not feed_id: raise Http404 feed = Feed.get_by_id(feed_id) if feed and feed.has_page and not feed.has_page_exception: if settings.BACKED_BY_AWS.get('pages_on_node'): url = "http://%s/original_page/%s" % ( settings.ORIGINAL_PAGE_SERVER, feed.pk, ) try: page_response = requests.get(url) except requests.ConnectionError: page_response = None if page_response and page_response.status_code == 200: response = HttpResponse(page_response.content, mimetype="text/html; charset=utf-8") response['Content-Encoding'] = 'gzip' response['Last-Modified'] = page_response.headers.get('Last-modified') response['Etag'] = page_response.headers.get('Etag') response['Content-Length'] = str(len(page_response.content)) logging.user(request, "~FYLoading original page (%s), proxied from node: ~SB%s bytes" % (feed_id, len(page_response.content))) return response if settings.BACKED_BY_AWS['pages_on_s3'] and feed.s3_page: if settings.PROXY_S3_PAGES: key = settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME).get_key(feed.s3_pages_key) if key: compressed_data = key.get_contents_as_string() response = HttpResponse(compressed_data, mimetype="text/html; charset=utf-8") response['Content-Encoding'] = 'gzip' logging.user(request, "~FYLoading original page, proxied: ~SB%s bytes" % (len(compressed_data))) return response else: logging.user(request, "~FYLoading original page, non-proxied") return HttpResponseRedirect('//%s/%s' % (settings.S3_PAGES_BUCKET_NAME, feed.s3_pages_key)) data = MFeedPage.get_data(feed_id=feed_id) if not data or not feed or not feed.has_page or feed.has_page_exception: logging.user(request, "~FYLoading original page, ~FRmissing") return render(request, 'static/404_original_page.xhtml', {}, content_type='text/html', status=404) logging.user(request, "~FYLoading original page, from the db") return HttpResponse(data, mimetype="text/html; charset=utf-8") @json.json_view def load_starred_stories(request): user = get_user(request) offset = int(request.REQUEST.get('offset', 0)) limit = int(request.REQUEST.get('limit', 10)) page = int(request.REQUEST.get('page', 0)) query = request.REQUEST.get('query', '').strip() order = request.REQUEST.get('order', 'newest') tag = request.REQUEST.get('tag') story_hashes = request.REQUEST.getlist('h') or request.REQUEST.getlist('h[]') story_hashes = story_hashes[:100] version = int(request.REQUEST.get('v', 1)) now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) message = None order_by = '-' if order == "newest" else "" if page: offset = limit * (page - 1) if query: # results = SearchStarredStory.query(user.pk, query) # story_ids = [result.db_id for result in results] if user.profile.is_premium: stories = MStarredStory.find_stories(query, user.pk, tag=tag, offset=offset, limit=limit, order=order) else: stories = [] message = "You must be a premium subscriber to search." elif tag: if user.profile.is_premium: mstories = MStarredStory.objects( user_id=user.pk, user_tags__contains=tag ).order_by('%sstarred_date' % order_by)[offset:offset+limit] stories = Feed.format_stories(mstories) else: stories = [] message = "You must be a premium subscriber to read saved stories by tag." elif story_hashes: limit = 100 mstories = MStarredStory.objects( user_id=user.pk, story_hash__in=story_hashes ).order_by('%sstarred_date' % order_by)[offset:offset+limit] stories = Feed.format_stories(mstories) else: mstories = MStarredStory.objects( user_id=user.pk ).order_by('%sstarred_date' % order_by)[offset:offset+limit] stories = Feed.format_stories(mstories) stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True) story_hashes = [story['story_hash'] for story in stories] story_feed_ids = list(set(s['story_feed_id'] for s in stories)) usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk') usersub_ids = [us['feed__pk'] for us in usersub_ids] unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids))) unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids) unsub_feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in unsub_feeds) for story in stories: if story['story_feed_id'] in unsub_feeds: continue duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=story['story_feed_id']) if not duplicate_feed: continue feed_id = duplicate_feed[0].feed_id try: saved_story = MStarredStory.objects.get(user_id=user.pk, story_hash=story['story_hash']) saved_story.feed_id = feed_id _, story_hash = MStory.split_story_hash(story['story_hash']) saved_story.story_hash = "%s:%s" % (feed_id, story_hash) saved_story.story_feed_id = feed_id story['story_hash'] = saved_story.story_hash story['story_feed_id'] = saved_story.story_feed_id saved_story.save() logging.user(request, "~FCSaving new feed for starred story: ~SB%s -> %s" % (story['story_hash'], feed_id)) except (MStarredStory.DoesNotExist): logging.user(request, "~FCCan't find feed for starred story: ~SB%s" % (story['story_hash'])) continue shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes) shared_stories = [] if shared_story_hashes: shared_stories = MSharedStory.objects(user_id=user.pk, story_hash__in=shared_story_hashes)\ .hint([('story_hash', 1)])\ .only('story_hash', 'shared_date', 'comments') shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date, comments=story.comments)) for story in shared_stories]) nowtz = localtime_for_timezone(now, user.profile.timezone) for story in stories: story_date = localtime_for_timezone(story['story_date'], user.profile.timezone) story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz) story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz) starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone) story['starred_date'] = format_story_link_date__long(starred_date, nowtz) story['starred_timestamp'] = starred_date.strftime('%s') story['read_status'] = 1 story['starred'] = True story['intelligence'] = { 'feed': 1, 'author': 0, 'tags': 0, 'title': 0, } if story['story_hash'] in shared_stories: story['shared'] = True story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments']) search_log = "~SN~FG(~SB%s~SN)" % query if query else "" logging.user(request, "~FCLoading starred stories: ~SB%s stories %s" % (len(stories), search_log)) return { "stories": stories, "user_profiles": user_profiles, 'feeds': unsub_feeds.values() if version == 2 else unsub_feeds, "message": message, } @json.json_view def starred_story_hashes(request): user = get_user(request) include_timestamps = is_true(request.REQUEST.get('include_timestamps', False)) mstories = MStarredStory.objects( user_id=user.pk ).only('story_hash', 'starred_date').order_by('-starred_date') if include_timestamps: story_hashes = [(s.story_hash, s.starred_date.strftime("%s")) for s in mstories] else: story_hashes = [s.story_hash for s in mstories] logging.user(request, "~FYLoading ~FCstarred story hashes~FY: %s story hashes" % (len(story_hashes))) return dict(starred_story_hashes=story_hashes) def starred_stories_rss_feed(request, user_id, secret_token, tag_slug): try: user = User.objects.get(pk=user_id) except User.DoesNotExist: raise Http404 try: tag_counts = MStarredStoryCounts.objects.get(user_id=user_id, slug=tag_slug) except MStarredStoryCounts.MultipleObjectsReturned: tag_counts = MStarredStoryCounts.objects(user_id=user_id, slug=tag_slug).first() except MStarredStoryCounts.DoesNotExist: raise Http404 data = {} data['title'] = "Saved Stories - %s" % tag_counts.tag data['link'] = "%s%s" % ( settings.NEWSBLUR_URL, reverse('saved-stories-tag', kwargs=dict(tag_name=tag_slug))) data['description'] = "Stories saved by %s on NewsBlur with the tag \"%s\"." % (user.username, tag_counts.tag) data['lastBuildDate'] = datetime.datetime.utcnow() data['generator'] = 'NewsBlur - %s' % settings.NEWSBLUR_URL data['docs'] = None data['author_name'] = user.username data['feed_url'] = "%s%s" % ( settings.NEWSBLUR_URL, reverse('starred-stories-rss-feed', kwargs=dict(user_id=user_id, secret_token=secret_token, tag_slug=tag_slug)), ) rss = feedgenerator.Atom1Feed(**data) if not tag_counts.tag: starred_stories = MStarredStory.objects( user_id=user.pk ).order_by('-starred_date').limit(25) else: starred_stories = MStarredStory.objects( user_id=user.pk, user_tags__contains=tag_counts.tag ).order_by('-starred_date').limit(25) for starred_story in starred_stories: story_data = { 'title': starred_story.story_title, 'link': starred_story.story_permalink, 'description': (starred_story.story_content_z and zlib.decompress(starred_story.story_content_z)), 'author_name': starred_story.story_author_name, 'categories': starred_story.story_tags, 'unique_id': starred_story.story_guid, 'pubdate': starred_story.starred_date, } rss.add_item(**story_data) logging.user(request, "~FBGenerating ~SB%s~SN's saved story RSS feed (%s, %s stories): ~FM%s" % ( user.username, tag_counts.tag, tag_counts.count, request.META.get('HTTP_USER_AGENT', "")[:24] )) return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml') def folder_rss_feed(request, user_id, secret_token, unread_filter, folder_slug): domain = Site.objects.get_current().domain try: user = User.objects.get(pk=user_id) except User.DoesNotExist: raise Http404 user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=user) feed_ids, folder_title = user_sub_folders.feed_ids_under_folder_slug(folder_slug) usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids) if feed_ids and user.profile.is_premium: params = { "user_id": user.pk, "feed_ids": feed_ids, "offset": 0, "limit": 20, "order": 'newest', "read_filter": 'all', "cache_prefix": "RSS:" } story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params) else: story_hashes = [] mstories = MStory.objects(story_hash__in=story_hashes).order_by('-story_date') stories = Feed.format_stories(mstories) filtered_stories = [] found_feed_ids = list(set([story['story_feed_id'] for story in stories])) trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained] found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids)) if found_trained_feed_ids: classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids, social_user_id=0)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids)) else: classifier_feeds = [] classifier_authors = [] classifier_titles = [] classifier_tags = [] sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids, classifier_feeds=classifier_feeds, classifier_authors=classifier_authors, classifier_titles=classifier_titles, classifier_tags=classifier_tags) for story in stories: story['intelligence'] = { 'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']), 'author': apply_classifier_authors(classifier_authors, story), 'tags': apply_classifier_tags(classifier_tags, story), 'title': apply_classifier_titles(classifier_titles, story), } story['score'] = UserSubscription.score_story(story['intelligence']) if unread_filter == 'focus' and story['score'] >= 1: filtered_stories.append(story) elif unread_filter == 'unread' and story['score'] >= 0: filtered_stories.append(story) stories = filtered_stories data = {} data['title'] = "%s from %s (%s sites)" % (folder_title, user.username, len(feed_ids)) data['link'] = "https://%s%s" % ( domain, reverse('folder', kwargs=dict(folder_name=folder_title))) data['description'] = "Unread stories in %s on NewsBlur. From %s's account and contains %s sites." % ( folder_title, user.username, len(feed_ids)) data['lastBuildDate'] = datetime.datetime.utcnow() data['generator'] = 'NewsBlur - %s' % settings.NEWSBLUR_URL data['docs'] = None data['author_name'] = user.username data['feed_url'] = "https://%s%s" % ( domain, reverse('folder-rss-feed', kwargs=dict(user_id=user_id, secret_token=secret_token, unread_filter=unread_filter, folder_slug=folder_slug)), ) rss = feedgenerator.Atom1Feed(**data) for story in stories: feed = Feed.get_by_id(story['story_feed_id']) story_content = """%s<br><br><img src="//%s/rss_feeds/icon/%s" width="16" height="16"> %s""" % ( smart_unicode(story['story_content']), Site.objects.get_current().domain, story['story_feed_id'], feed.feed_title if feed else "" ) story_data = { 'title': "%s%s" % (("%s: " % feed.feed_title) if feed else "", story['story_title']), 'link': story['story_permalink'], 'description': story_content, 'categories': story['story_tags'], 'unique_id': 'https://%s/site/%s/%s/' % (domain, story['story_feed_id'], story['guid_hash']), 'pubdate': localtime_for_timezone(story['story_date'], user.profile.timezone), } if story['story_authors']: story_data['author_name'] = story['story_authors'] rss.add_item(**story_data) if not user.profile.is_premium: story_data = { 'title': "You must have a premium account on NewsBlur to have RSS feeds for folders.", 'link': "https://%s" % domain, 'description': "You must have a premium account on NewsBlur to have RSS feeds for folders.", 'unique_id': "https://%s/premium_only" % domain, 'pubdate': localtime_for_timezone(datetime.datetime.now(), user.profile.timezone), } rss.add_item(**story_data) logging.user(request, "~FBGenerating ~SB%s~SN's folder RSS feed (%s, %s stories): ~FM%s" % ( user.username, folder_title, len(stories), request.META.get('HTTP_USER_AGENT', "")[:24] )) return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml') @json.json_view def load_read_stories(request): user = get_user(request) offset = int(request.REQUEST.get('offset', 0)) limit = int(request.REQUEST.get('limit', 10)) page = int(request.REQUEST.get('page', 0)) order = request.REQUEST.get('order', 'newest') query = request.REQUEST.get('query', '').strip() now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) message = None if page: offset = limit * (page - 1) if query: stories = [] message = "Not implemented yet." # if user.profile.is_premium: # stories = MStarredStory.find_stories(query, user.pk, offset=offset, limit=limit) # else: # stories = [] # message = "You must be a premium subscriber to search." else: story_hashes = RUserStory.get_read_stories(user.pk, offset=offset, limit=limit, order=order) mstories = MStory.objects(story_hash__in=story_hashes) stories = Feed.format_stories(mstories) stories = sorted(stories, key=lambda story: story_hashes.index(story['story_hash']), reverse=bool(order=="oldest")) stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True) story_hashes = [story['story_hash'] for story in stories] story_feed_ids = list(set(s['story_feed_id'] for s in stories)) usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk') usersub_ids = [us['feed__pk'] for us in usersub_ids] unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids))) unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids) unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds] shared_stories = MSharedStory.objects(user_id=user.pk, story_hash__in=story_hashes)\ .hint([('story_hash', 1)])\ .only('story_hash', 'shared_date', 'comments') shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date, comments=story.comments)) for story in shared_stories]) starred_stories = MStarredStory.objects(user_id=user.pk, story_hash__in=story_hashes)\ .hint([('user_id', 1), ('story_hash', 1)])\ .only('story_hash', 'starred_date') starred_stories = dict([(story.story_hash, story.starred_date) for story in starred_stories]) nowtz = localtime_for_timezone(now, user.profile.timezone) for story in stories: story_date = localtime_for_timezone(story['story_date'], user.profile.timezone) story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz) story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz) story['read_status'] = 1 story['intelligence'] = { 'feed': 1, 'author': 0, 'tags': 0, 'title': 0, } if story['story_hash'] in starred_stories: story['starred'] = True starred_date = localtime_for_timezone(starred_stories[story['story_hash']], user.profile.timezone) story['starred_date'] = format_story_link_date__long(starred_date, now) story['starred_timestamp'] = starred_date.strftime('%s') if story['story_hash'] in shared_stories: story['shared'] = True story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments']) search_log = "~SN~FG(~SB%s~SN)" % query if query else "" logging.user(request, "~FCLoading read stories: ~SB%s stories %s" % (len(stories), search_log)) return { "stories": stories, "user_profiles": user_profiles, "feeds": unsub_feeds, "message": message, } @json.json_view def load_river_stories__redis(request): limit = int(request.REQUEST.get('limit', 12)) start = time.time() user = get_user(request) message = None feed_ids = request.REQUEST.getlist('feeds') or request.REQUEST.getlist('feeds[]') feed_ids = [int(feed_id) for feed_id in feed_ids if feed_id] if not feed_ids: feed_ids = request.REQUEST.getlist('f') or request.REQUEST.getlist('f[]') feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('f') if feed_id] story_hashes = request.REQUEST.getlist('h') or request.REQUEST.getlist('h[]') story_hashes = story_hashes[:100] original_feed_ids = list(feed_ids) page = int(request.REQUEST.get('page', 1)) order = request.REQUEST.get('order', 'newest') read_filter = request.REQUEST.get('read_filter', 'unread') query = request.REQUEST.get('query', '').strip() include_hidden = is_true(request.REQUEST.get('include_hidden', False)) include_feeds = is_true(request.REQUEST.get('include_feeds', False)) initial_dashboard = is_true(request.REQUEST.get('initial_dashboard', False)) infrequent = is_true(request.REQUEST.get('infrequent', False)) if infrequent: infrequent = request.REQUEST.get('infrequent') now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) usersubs = [] code = 1 user_search = None offset = (page-1) * limit story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-') if infrequent: feed_ids = Feed.low_volume_feeds(feed_ids, stories_per_month=infrequent) if story_hashes: unread_feed_story_hashes = None read_filter = 'all' mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order) stories = Feed.format_stories(mstories) elif query: if user.profile.is_premium: user_search = MUserSearch.get_user(user.pk) user_search.touch_search_date() usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids, read_filter='all') feed_ids = [sub.feed_id for sub in usersubs] if infrequent: feed_ids = Feed.low_volume_feeds(feed_ids, stories_per_month=infrequent) stories = Feed.find_feed_stories(feed_ids, query, order=order, offset=offset, limit=limit) mstories = stories unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids, read_filter="unread", order=order, group_by_feed=False, cutoff_date=user.profile.unread_cutoff) else: stories = [] mstories = [] message = "You must be a premium subscriber to search." elif read_filter == 'starred': mstories = MStarredStory.objects( user_id=user.pk, story_feed_id__in=feed_ids ).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit] stories = Feed.format_stories(mstories) else: usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids, read_filter=read_filter) all_feed_ids = [f for f in feed_ids] feed_ids = [sub.feed_id for sub in usersubs] if infrequent: feed_ids = Feed.low_volume_feeds(feed_ids, stories_per_month=infrequent) if feed_ids: params = { "user_id": user.pk, "feed_ids": feed_ids, "all_feed_ids": all_feed_ids, "offset": offset, "limit": limit, "order": order, "read_filter": read_filter, "usersubs": usersubs, "cutoff_date": user.profile.unread_cutoff, "cache_prefix": "dashboard:" if initial_dashboard else "", } story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params) else: story_hashes = [] unread_feed_story_hashes = [] mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order) stories = Feed.format_stories(mstories) found_feed_ids = list(set([story['story_feed_id'] for story in stories])) stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk) if not usersubs: usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=found_feed_ids, read_filter=read_filter) trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained] found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids)) # Find starred stories if found_feed_ids: if read_filter == 'starred': starred_stories = mstories else: story_hashes = [s['story_hash'] for s in stories] starred_stories = MStarredStory.objects( user_id=user.pk, story_hash__in=story_hashes ).only('story_hash', 'starred_date', 'user_tags') starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date, user_tags=story.user_tags)) for story in starred_stories]) else: starred_stories = {} # Intelligence classifiers for all feeds involved if found_trained_feed_ids: classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids, social_user_id=0)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id__in=found_trained_feed_ids)) else: classifier_feeds = [] classifier_authors = [] classifier_titles = [] classifier_tags = [] classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids, classifier_feeds=classifier_feeds, classifier_authors=classifier_authors, classifier_titles=classifier_titles, classifier_tags=classifier_tags) # Just need to format stories nowtz = localtime_for_timezone(now, user.profile.timezone) for story in stories: if read_filter == 'starred': story['read_status'] = 1 else: story['read_status'] = 0 if read_filter == 'all' or query: if (unread_feed_story_hashes is not None and story['story_hash'] not in unread_feed_story_hashes): story['read_status'] = 1 story_date = localtime_for_timezone(story['story_date'], user.profile.timezone) story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz) story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz) if story['story_hash'] in starred_stories: story['starred'] = True starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'], user.profile.timezone) story['starred_date'] = format_story_link_date__long(starred_date, now) story['starred_timestamp'] = starred_date.strftime('%s') story['user_tags'] = starred_stories[story['story_hash']]['user_tags'] story['intelligence'] = { 'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']), 'author': apply_classifier_authors(classifier_authors, story), 'tags': apply_classifier_tags(classifier_tags, story), 'title': apply_classifier_titles(classifier_titles, story), } story['score'] = UserSubscription.score_story(story['intelligence']) if include_feeds: feeds = Feed.objects.filter(pk__in=set([story['story_feed_id'] for story in stories])) feeds = [feed.canonical(include_favicon=False) for feed in feeds] if not user.profile.is_premium and not include_feeds: message = "The full River of News is a premium feature." code = 0 # if page > 1: # stories = [] # else: # stories = stories[:5] if not include_hidden: hidden_stories_removed = 0 new_stories = [] for story in stories: if story['score'] >= 0: new_stories.append(story) else: hidden_stories_removed += 1 stories = new_stories # Clean stories to remove potentially old stories on dashboard if initial_dashboard: new_stories = [] now = datetime.datetime.utcnow() hour = now + datetime.timedelta(hours=1) month_ago = now - datetime.timedelta(days=settings.DAYS_OF_UNREAD) for story in stories: if story['story_date'] >= month_ago and story['story_date'] < hour: new_stories.append(story) stories = new_stories # if page >= 1: # import random # time.sleep(random.randint(3, 6)) diff = time.time() - start timediff = round(float(diff), 2) logging.user(request, "~FYLoading ~FC%sriver stories~FY: ~SBp%s~SN (%s/%s " "stories, ~SN%s/%s/%s feeds, %s/%s)" % ("~FB~SBinfrequent~SN~FC " if infrequent else "", page, len(stories), len(mstories), len(found_feed_ids), len(feed_ids), len(original_feed_ids), order, read_filter)) MAnalyticsLoader.add(page_load=diff) data = dict(code=code, message=message, stories=stories, classifiers=classifiers, elapsed_time=timediff, user_search=user_search, user_profiles=user_profiles) if include_feeds: data['feeds'] = feeds if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed return data @json.json_view def complete_river(request): user = get_user(request) feed_ids = request.POST.getlist('feeds') or request.POST.getlist('feeds[]') feed_ids = [int(feed_id) for feed_id in feed_ids if feed_id] page = int(request.POST.get('page', 1)) read_filter = request.POST.get('read_filter', 'unread') stories_truncated = 0 usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids, read_filter=read_filter) feed_ids = [sub.feed_id for sub in usersubs] if feed_ids: stories_truncated = UserSubscription.truncate_river(user.pk, feed_ids, read_filter, cache_prefix="dashboard:") logging.user(request, "~FC~BBRiver complete on page ~SB%s~SN, truncating ~SB%s~SN stories from ~SB%s~SN feeds" % (page, stories_truncated, len(feed_ids))) return dict(code=1, message="Truncated %s stories from %s" % (stories_truncated, len(feed_ids))) @json.json_view def unread_story_hashes__old(request): user = get_user(request) feed_ids = request.REQUEST.getlist('feed_id') or request.REQUEST.getlist('feed_id[]') feed_ids = [int(feed_id) for feed_id in feed_ids if feed_id] include_timestamps = is_true(request.REQUEST.get('include_timestamps', False)) usersubs = {} if not feed_ids: usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) | Q(unread_count_positive__gt=0), user=user, active=True) feed_ids = [sub.feed_id for sub in usersubs] else: usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) | Q(unread_count_positive__gt=0), user=user, active=True, feed__in=feed_ids) unread_feed_story_hashes = {} story_hash_count = 0 usersubs = dict((sub.feed_id, sub) for sub in usersubs) for feed_id in feed_ids: if feed_id in usersubs: us = usersubs[feed_id] else: continue if not us.unread_count_neutral and not us.unread_count_positive: continue unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500, withscores=include_timestamps, hashes_only=True, default_cutoff_date=user.profile.unread_cutoff) story_hash_count += len(unread_feed_story_hashes[feed_id]) logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" % (len(feed_ids), len(story_hash_count))) return dict(unread_feed_story_hashes=unread_feed_story_hashes) @json.json_view def unread_story_hashes(request): user = get_user(request) feed_ids = request.REQUEST.getlist('feed_id') or request.REQUEST.getlist('feed_id[]') feed_ids = [int(feed_id) for feed_id in feed_ids if feed_id] include_timestamps = is_true(request.REQUEST.get('include_timestamps', False)) order = request.REQUEST.get('order', 'newest') read_filter = request.REQUEST.get('read_filter', 'unread') story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids, order=order, read_filter=read_filter, include_timestamps=include_timestamps, cutoff_date=user.profile.unread_cutoff) logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" % (len(feed_ids), len(story_hashes))) return dict(unread_feed_story_hashes=story_hashes) @ajax_login_required @json.json_view def mark_all_as_read(request): code = 1 try: days = int(request.REQUEST.get('days', 0)) except ValueError: return dict(code=-1, message="Days parameter must be an integer, not: %s" % request.REQUEST.get('days')) read_date = datetime.datetime.utcnow() - datetime.timedelta(days=days) feeds = UserSubscription.objects.filter(user=request.user) infrequent = is_true(request.REQUEST.get('infrequent', False)) if infrequent: infrequent = request.REQUEST.get('infrequent') feed_ids = Feed.low_volume_feeds([usersub.feed.pk for usersub in feeds], stories_per_month=infrequent) feeds = UserSubscription.objects.filter(user=request.user, feed_id__in=feed_ids) socialsubs = MSocialSubscription.objects.filter(user_id=request.user.pk) for subtype in [feeds, socialsubs]: for sub in subtype: if days == 0: sub.mark_feed_read() else: if sub.mark_read_date < read_date: sub.needs_unread_recalc = True sub.mark_read_date = read_date sub.save() r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') logging.user(request, "~FMMarking %s as read: ~SB%s days" % (("all" if not infrequent else "infrequent stories"), days,)) return dict(code=code) @ajax_login_required @json.json_view def mark_story_as_read(request): story_ids = request.REQUEST.getlist('story_id') or request.REQUEST.getlist('story_id[]') try: feed_id = int(get_argument_or_404(request, 'feed_id')) except ValueError: return dict(code=-1, errors=["You must pass a valid feed_id: %s" % request.REQUEST.get('feed_id')]) try: usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id) except Feed.DoesNotExist: duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id) if duplicate_feed: feed_id = duplicate_feed[0].feed_id try: usersub = UserSubscription.objects.get(user=request.user, feed=duplicate_feed[0].feed) except (Feed.DoesNotExist): return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id]) else: return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id]) except UserSubscription.DoesNotExist: usersub = None if usersub: data = usersub.mark_story_ids_as_read(story_ids, request=request) else: data = dict(code=-1, errors=["User is not subscribed to this feed."]) return data @ajax_login_required @json.json_view def mark_story_hashes_as_read(request): retrying_failed = is_true(request.POST.get('retrying_failed', False)) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) try: story_hashes = request.REQUEST.getlist('story_hash') or request.REQUEST.getlist('story_hash[]') except UnreadablePostError: return dict(code=-1, message="Missing `story_hash` list parameter.") feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes, username=request.user.username) if friend_ids: socialsubs = MSocialSubscription.objects.filter( user_id=request.user.pk, subscription_user_id__in=friend_ids) for socialsub in socialsubs: if not socialsub.needs_unread_recalc: socialsub.needs_unread_recalc = True socialsub.save() r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id) # Also count on original subscription for feed_id in feed_ids: usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id) if usersubs: usersub = usersubs[0] usersub.last_read_date = datetime.datetime.now() if not usersub.needs_unread_recalc: usersub.needs_unread_recalc = True usersub.save(update_fields=['needs_unread_recalc', 'last_read_date']) else: usersub.save(update_fields=['last_read_date']) r.publish(request.user.username, 'feed:%s' % feed_id) hash_count = len(story_hashes) logging.user(request, "~FYRead %s %s in feed/socialsubs: %s/%s: %s %s" % ( hash_count, 'story' if hash_count == 1 else 'stories', feed_ids, friend_ids, story_hashes, '(retrying failed)' if retrying_failed else '')) return dict(code=1, story_hashes=story_hashes, feed_ids=feed_ids, friend_user_ids=friend_ids) @ajax_login_required @json.json_view def mark_feed_stories_as_read(request): r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) feeds_stories = request.REQUEST.get('feeds_stories', "{}") feeds_stories = json.decode(feeds_stories) data = { 'code': -1, 'message': 'Nothing was marked as read' } for feed_id, story_ids in feeds_stories.items(): try: feed_id = int(feed_id) except ValueError: continue try: usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id) data = usersub.mark_story_ids_as_read(story_ids, request=request) except UserSubscription.DoesNotExist: return dict(code=-1, error="You are not subscribed to this feed_id: %d" % feed_id) except Feed.DoesNotExist: duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id) try: if not duplicate_feed: raise Feed.DoesNotExist usersub = UserSubscription.objects.get(user=request.user, feed=duplicate_feed[0].feed) data = usersub.mark_story_ids_as_read(story_ids, request=request) except (UserSubscription.DoesNotExist, Feed.DoesNotExist): return dict(code=-1, error="No feed exists for feed_id: %d" % feed_id) r.publish(request.user.username, 'feed:%s' % feed_id) return data @ajax_login_required @json.json_view def mark_social_stories_as_read(request): code = 1 errors = [] data = {} r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) users_feeds_stories = request.REQUEST.get('users_feeds_stories', "{}") users_feeds_stories = json.decode(users_feeds_stories) for social_user_id, feeds in users_feeds_stories.items(): for feed_id, story_ids in feeds.items(): feed_id = int(feed_id) try: socialsub = MSocialSubscription.objects.get(user_id=request.user.pk, subscription_user_id=social_user_id) data = socialsub.mark_story_ids_as_read(story_ids, feed_id, request=request) except OperationError, e: code = -1 errors.append("Already read story: %s" % e) except MSocialSubscription.DoesNotExist: MSocialSubscription.mark_unsub_story_ids_as_read(request.user.pk, social_user_id, story_ids, feed_id, request=request) except Feed.DoesNotExist: duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id) if duplicate_feed: try: socialsub = MSocialSubscription.objects.get(user_id=request.user.pk, subscription_user_id=social_user_id) data = socialsub.mark_story_ids_as_read(story_ids, duplicate_feed[0].feed.pk, request=request) except (UserSubscription.DoesNotExist, Feed.DoesNotExist): code = -1 errors.append("No feed exists for feed_id %d." % feed_id) else: continue r.publish(request.user.username, 'feed:%s' % feed_id) r.publish(request.user.username, 'social:%s' % social_user_id) data.update(code=code, errors=errors) return data @required_params('story_id', feed_id=int) @ajax_login_required @json.json_view def mark_story_as_unread(request): story_id = request.REQUEST.get('story_id', None) feed_id = int(request.REQUEST.get('feed_id', 0)) try: usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id) feed = usersub.feed except UserSubscription.DoesNotExist: usersub = None feed = Feed.get_by_id(feed_id) if usersub and not usersub.needs_unread_recalc: usersub.needs_unread_recalc = True usersub.save(update_fields=['needs_unread_recalc']) data = dict(code=0, payload=dict(story_id=story_id)) story, found_original = MStory.find_story(feed_id, story_id) if not story: logging.user(request, "~FY~SBUnread~SN story in feed: %s (NOT FOUND)" % (feed)) return dict(code=-1, message="Story not found.") if usersub: data = usersub.invert_read_stories_after_unread_story(story, request) message = RUserStory.story_can_be_marked_read_by_user(story, request.user) if message: data['code'] = -1 data['message'] = message return data social_subs = MSocialSubscription.mark_dirty_sharing_story(user_id=request.user.pk, story_feed_id=feed_id, story_guid_hash=story.guid_hash) dirty_count = social_subs and social_subs.count() dirty_count = ("(%s social_subs)" % dirty_count) if dirty_count else "" RUserStory.mark_story_hash_unread(request.user, story_hash=story.story_hash) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'feed:%s' % feed_id) logging.user(request, "~FY~SBUnread~SN story in feed: %s %s" % (feed, dirty_count)) return data @ajax_login_required @json.json_view @required_params('story_hash') def mark_story_hash_as_unread(request): r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) story_hash = request.REQUEST.get('story_hash') feed_id, _ = MStory.split_story_hash(story_hash) story, _ = MStory.find_story(feed_id, story_hash) if not story: data = dict(code=-1, message="That story has been removed from the feed, no need to mark it unread.") return data message = RUserStory.story_can_be_marked_read_by_user(story, request.user) if message: data = dict(code=-1, message=message) return data # Also count on original subscription usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id) if usersubs: usersub = usersubs[0] if not usersub.needs_unread_recalc: usersub.needs_unread_recalc = True usersub.save(update_fields=['needs_unread_recalc']) data = usersub.invert_read_stories_after_unread_story(story, request) r.publish(request.user.username, 'feed:%s' % feed_id) feed_id, friend_ids = RUserStory.mark_story_hash_unread(request.user, story_hash) if friend_ids: socialsubs = MSocialSubscription.objects.filter( user_id=request.user.pk, subscription_user_id__in=friend_ids) for socialsub in socialsubs: if not socialsub.needs_unread_recalc: socialsub.needs_unread_recalc = True socialsub.save() r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id) logging.user(request, "~FYUnread story in feed/socialsubs: %s/%s" % (feed_id, friend_ids)) return dict(code=1, story_hash=story_hash, feed_id=feed_id, friend_user_ids=friend_ids) @ajax_login_required @json.json_view def mark_feed_as_read(request): r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) feed_ids = request.POST.getlist('feed_id') or request.POST.getlist('feed_id[]') cutoff_timestamp = int(request.REQUEST.get('cutoff_timestamp', 0)) direction = request.REQUEST.get('direction', 'older') infrequent = is_true(request.REQUEST.get('infrequent', False)) if infrequent: infrequent = request.REQUEST.get('infrequent') multiple = len(feed_ids) > 1 code = 1 errors = [] cutoff_date = datetime.datetime.fromtimestamp(cutoff_timestamp) if cutoff_timestamp else None if infrequent: feed_ids = Feed.low_volume_feeds(feed_ids, stories_per_month=infrequent) feed_ids = [unicode(f) for f in feed_ids] # This method expects strings if cutoff_date: logging.user(request, "~FMMark %s feeds read, %s - cutoff: %s/%s" % (len(feed_ids), direction, cutoff_timestamp, cutoff_date)) for feed_id in feed_ids: if 'social:' in feed_id: user_id = int(feed_id.replace('social:', '')) try: sub = MSocialSubscription.objects.get(user_id=request.user.pk, subscription_user_id=user_id) except MSocialSubscription.DoesNotExist: logging.user(request, "~FRCouldn't find socialsub: %s" % user_id) continue if not multiple: sub_user = User.objects.get(pk=sub.subscription_user_id) logging.user(request, "~FMMarking social feed as read: ~SB%s" % (sub_user.username,)) else: try: feed = Feed.objects.get(id=feed_id) sub = UserSubscription.objects.get(feed=feed, user=request.user) if not multiple: logging.user(request, "~FMMarking feed as read: ~SB%s" % (feed,)) except (Feed.DoesNotExist, UserSubscription.DoesNotExist), e: errors.append("User not subscribed: %s" % e) continue except (ValueError), e: errors.append("Invalid feed_id: %s" % e) continue if not sub: errors.append("User not subscribed: %s" % feed_id) continue try: if direction == "older": marked_read = sub.mark_feed_read(cutoff_date=cutoff_date) else: marked_read = sub.mark_newer_stories_read(cutoff_date=cutoff_date) if marked_read and not multiple: r.publish(request.user.username, 'feed:%s' % feed_id) except IntegrityError, e: errors.append("Could not mark feed as read: %s" % e) code = -1 if multiple: logging.user(request, "~FMMarking ~SB%s~SN feeds as read" % len(feed_ids)) r.publish(request.user.username, 'refresh:%s' % ','.join(feed_ids)) if errors: logging.user(request, "~FMMarking read had errors: ~FR%s" % errors) return dict(code=code, errors=errors, cutoff_date=cutoff_date, direction=direction) def _parse_user_info(user): return { 'user_info': { 'is_anonymous': json.encode(user.is_anonymous()), 'is_authenticated': json.encode(user.is_authenticated()), 'username': json.encode(user.username if user.is_authenticated() else 'Anonymous') } } @ajax_login_required @json.json_view def add_url(request): code = 0 url = request.POST['url'] folder = request.POST.get('folder', '') new_folder = request.POST.get('new_folder') auto_active = is_true(request.POST.get('auto_active', 1)) skip_fetch = is_true(request.POST.get('skip_fetch', False)) feed = None if not url: code = -1 message = 'Enter in the website address or the feed URL.' elif any([(banned_url in url) for banned_url in BANNED_URLS]): code = -1 message = "The publisher of this website has banned NewsBlur." elif re.match('(https?://)?twitter.com/\w+/?$', url): if not request.user.profile.is_premium: message = "You must be a premium subscriber to add Twitter feeds." code = -1 else: # Check if Twitter API is active for user ss = MSocialServices.get_user(request.user.pk) try: if not ss.twitter_uid: raise tweepy.TweepError("No API token") ss.twitter_api().me() except tweepy.TweepError: code = -1 message = "Your Twitter connection isn't setup. Go to Manage - Friends/Followers and reconnect Twitter." if code == -1: return dict(code=code, message=message) if new_folder: usf, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user) usf.add_folder(folder, new_folder) folder = new_folder code, message, us = UserSubscription.add_subscription(user=request.user, feed_address=url, folder=folder, auto_active=auto_active, skip_fetch=skip_fetch) feed = us and us.feed if feed: r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:%s' % feed.pk) MUserSearch.schedule_index_feeds_for_search(feed.pk, request.user.pk) return dict(code=code, message=message, feed=feed) @ajax_login_required @json.json_view def add_folder(request): folder = request.POST['folder'] parent_folder = request.POST.get('parent_folder', '') folders = None logging.user(request, "~FRAdding Folder: ~SB%s (in %s)" % (folder, parent_folder)) if folder: code = 1 message = "" user_sub_folders_object, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user) user_sub_folders_object.add_folder(parent_folder, folder) folders = json.decode(user_sub_folders_object.folders) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') else: code = -1 message = "Gotta write in a folder name." return dict(code=code, message=message, folders=folders) @ajax_login_required @json.json_view def delete_feed(request): feed_id = int(request.POST['feed_id']) in_folder = request.POST.get('in_folder', None) if not in_folder or in_folder == ' ': in_folder = "" user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders.delete_feed(feed_id, in_folder) feed = Feed.objects.filter(pk=feed_id) if feed: feed[0].count_subscribers() r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, message="Removed %s from '%s'." % (feed, in_folder)) @ajax_login_required @json.json_view def delete_feed_by_url(request): message = "" code = 0 url = request.POST['url'] in_folder = request.POST.get('in_folder', '') if in_folder == ' ': in_folder = "" logging.user(request.user, "~FBFinding feed (delete_feed_by_url): %s" % url) feed = Feed.get_feed_from_url(url, create=False) if feed: user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders.delete_feed(feed.pk, in_folder) code = 1 feed = Feed.objects.filter(pk=feed.pk) if feed: feed[0].count_subscribers() else: code = -1 message = "URL not found." return dict(code=code, message=message) @ajax_login_required @json.json_view def delete_folder(request): folder_to_delete = request.POST.get('folder_name') or request.POST.get('folder_to_delete') in_folder = request.POST.get('in_folder', None) feed_ids_in_folder = request.REQUEST.getlist('feed_id') or request.REQUEST.getlist('feed_id[]') feed_ids_in_folder = [int(f) for f in feed_ids_in_folder if f] request.user.profile.send_opml_export_email(reason="You have deleted an entire folder of feeds, so here's a backup of all of your subscriptions just in case.") # Works piss poor with duplicate folder titles, if they are both in the same folder. # Deletes all, but only in the same folder parent. But nobody should be doing that, right? user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders.delete_folder(folder_to_delete, in_folder, feed_ids_in_folder) folders = json.decode(user_sub_folders.folders) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, folders=folders) @required_params('feeds_by_folder') @ajax_login_required @json.json_view def delete_feeds_by_folder(request): feeds_by_folder = json.decode(request.POST['feeds_by_folder']) request.user.profile.send_opml_export_email(reason="You have deleted a number of feeds at once, so here's a backup of all of your subscriptions just in case.") # Works piss poor with duplicate folder titles, if they are both in the same folder. # Deletes all, but only in the same folder parent. But nobody should be doing that, right? user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders.delete_feeds_by_folder(feeds_by_folder) folders = json.decode(user_sub_folders.folders) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, folders=folders) @ajax_login_required @json.json_view def rename_feed(request): feed = get_object_or_404(Feed, pk=int(request.POST['feed_id'])) user_sub = UserSubscription.objects.get(user=request.user, feed=feed) feed_title = request.POST['feed_title'] logging.user(request, "~FRRenaming feed '~SB%s~SN' to: ~SB%s" % ( feed.feed_title, feed_title)) user_sub.user_title = feed_title user_sub.save() return dict(code=1) @ajax_login_required @json.json_view def rename_folder(request): folder_to_rename = request.POST.get('folder_name') or request.POST.get('folder_to_rename') new_folder_name = request.POST['new_folder_name'] in_folder = request.POST.get('in_folder', '') if 'Top Level' in in_folder: in_folder = '' code = 0 # Works piss poor with duplicate folder titles, if they are both in the same folder. # renames all, but only in the same folder parent. But nobody should be doing that, right? if folder_to_rename and new_folder_name: user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders.rename_folder(folder_to_rename, new_folder_name, in_folder) code = 1 else: code = -1 return dict(code=code) @ajax_login_required @json.json_view def move_feed_to_folders(request): feed_id = int(request.POST['feed_id']) in_folders = request.POST.getlist('in_folders', '') or request.POST.getlist('in_folders[]', '') to_folders = request.POST.getlist('to_folders', '') or request.POST.getlist('to_folders[]', '') user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders = user_sub_folders.move_feed_to_folders(feed_id, in_folders=in_folders, to_folders=to_folders) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, folders=json.decode(user_sub_folders.folders)) @ajax_login_required @json.json_view def move_feed_to_folder(request): feed_id = int(request.POST['feed_id']) in_folder = request.POST.get('in_folder', '') to_folder = request.POST.get('to_folder', '') user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders = user_sub_folders.move_feed_to_folder(feed_id, in_folder=in_folder, to_folder=to_folder) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, folders=json.decode(user_sub_folders.folders)) @ajax_login_required @json.json_view def move_folder_to_folder(request): folder_name = request.POST['folder_name'] in_folder = request.POST.get('in_folder', '') to_folder = request.POST.get('to_folder', '') user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) user_sub_folders = user_sub_folders.move_folder_to_folder(folder_name, in_folder=in_folder, to_folder=to_folder) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, folders=json.decode(user_sub_folders.folders)) @required_params('feeds_by_folder', 'to_folder') @ajax_login_required @json.json_view def move_feeds_by_folder_to_folder(request): feeds_by_folder = json.decode(request.POST['feeds_by_folder']) to_folder = request.POST['to_folder'] new_folder = request.POST.get('new_folder', None) request.user.profile.send_opml_export_email(reason="You have moved a number of feeds at once, so here's a backup of all of your subscriptions just in case.") user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user) if new_folder: user_sub_folders.add_folder(to_folder, new_folder) to_folder = new_folder user_sub_folders = user_sub_folders.move_feeds_by_folder_to_folder(feeds_by_folder, to_folder) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') return dict(code=1, folders=json.decode(user_sub_folders.folders)) @login_required def add_feature(request): if not request.user.is_staff: return HttpResponseForbidden() code = -1 form = FeatureForm(request.POST) if form.is_valid(): form.save() code = 1 return HttpResponseRedirect(reverse('index')) return dict(code=code) @json.json_view def load_features(request): user = get_user(request) page = max(int(request.REQUEST.get('page', 0)), 0) if page > 1: logging.user(request, "~FBBrowse features: ~SBPage #%s" % (page+1)) features = Feature.objects.all()[page*3:(page+1)*3+1].values() features = [{ 'description': f['description'], 'date': localtime_for_timezone(f['date'], user.profile.timezone).strftime("%b %d, %Y") } for f in features] return features @ajax_login_required @json.json_view def save_feed_order(request): folders = request.POST.get('folders') if folders: # Test that folders can be JSON decoded folders_list = json.decode(folders) assert folders_list is not None logging.user(request, "~FBFeed re-ordering: ~SB%s folders/feeds" % (len(folders_list))) user_sub_folders = UserSubscriptionFolders.objects.get(user=request.user) user_sub_folders.folders = folders user_sub_folders.save() return {} @json.json_view def feeds_trainer(request): classifiers = [] feed_id = request.REQUEST.get('feed_id') user = get_user(request) usersubs = UserSubscription.objects.filter(user=user, active=True) if feed_id: feed = get_object_or_404(Feed, pk=feed_id) usersubs = usersubs.filter(feed=feed) usersubs = usersubs.select_related('feed').order_by('-feed__stories_last_month') for us in usersubs: if (not us.is_trained and us.feed.stories_last_month > 0) or feed_id: classifier = dict() classifier['classifiers'] = get_classifiers_for_user(user, feed_id=us.feed.pk) classifier['feed_id'] = us.feed_id classifier['stories_last_month'] = us.feed.stories_last_month classifier['num_subscribers'] = us.feed.num_subscribers classifier['feed_tags'] = json.decode(us.feed.data.popular_tags) if us.feed.data.popular_tags else [] classifier['feed_authors'] = json.decode(us.feed.data.popular_authors) if us.feed.data.popular_authors else [] classifiers.append(classifier) user.profile.has_trained_intelligence = True user.profile.save() logging.user(user, "~FGLoading Trainer: ~SB%s feeds" % (len(classifiers))) return classifiers @ajax_login_required @json.json_view def save_feed_chooser(request): is_premium = request.user.profile.is_premium approved_feeds = request.POST.getlist('approved_feeds') or request.POST.getlist('approved_feeds[]') approved_feeds = [int(feed_id) for feed_id in approved_feeds if feed_id] approve_all = False if not is_premium: approved_feeds = approved_feeds[:64] elif is_premium and not approved_feeds: approve_all = True activated = 0 usersubs = UserSubscription.objects.filter(user=request.user) for sub in usersubs: try: if sub.feed_id in approved_feeds or approve_all: activated += 1 if not sub.active: sub.active = True sub.save() if sub.feed.active_subscribers <= 0: sub.feed.count_subscribers() elif sub.active: sub.active = False sub.save() except Feed.DoesNotExist: pass UserSubscription.queue_new_feeds(request.user) UserSubscription.refresh_stale_feeds(request.user, exclude_new=True) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'reload:feeds') logging.user(request, "~BB~FW~SBFeed chooser: ~FC%s~SN/~SB%s" % ( activated, usersubs.count() )) return {'activated': activated} @ajax_login_required def retrain_all_sites(request): for sub in UserSubscription.objects.filter(user=request.user): sub.is_trained = False sub.save() return feeds_trainer(request) @login_required def activate_premium_account(request): try: usersubs = UserSubscription.objects.select_related('feed').filter(user=request.user) for sub in usersubs: sub.active = True sub.save() if sub.feed.premium_subscribers <= 0: sub.feed.count_subscribers() sub.feed.schedule_feed_fetch_immediately() except Exception, e: subject = "Premium activation failed" message = "%s -- %s\n\n%s" % (request.user, usersubs, e) mail_admins(subject, message, fail_silently=True) request.user.profile.is_premium = True request.user.profile.save() return HttpResponseRedirect(reverse('index')) @login_required def login_as(request): if not request.user.is_staff: logging.user(request, "~SKNON-STAFF LOGGING IN AS ANOTHER USER!") assert False return HttpResponseForbidden() username = request.GET['user'] user = get_object_or_404(User, username__iexact=username) user.backend = settings.AUTHENTICATION_BACKENDS[0] login_user(request, user) return HttpResponseRedirect(reverse('index')) def iframe_buster(request): logging.user(request, "~FB~SBiFrame bust!") return HttpResponse(status=204) @required_params('story_id', feed_id=int) @ajax_login_required @json.json_view def mark_story_as_starred(request): return _mark_story_as_starred(request) @required_params('story_hash') @ajax_login_required @json.json_view def mark_story_hash_as_starred(request): return _mark_story_as_starred(request) def _mark_story_as_starred(request): code = 1 feed_id = int(request.REQUEST.get('feed_id', 0)) story_id = request.REQUEST.get('story_id', None) story_hash = request.REQUEST.get('story_hash', None) user_tags = request.REQUEST.getlist('user_tags') or request.REQUEST.getlist('user_tags[]') message = "" if story_hash: story, _ = MStory.find_story(story_hash=story_hash) feed_id = story and story.story_feed_id else: story, _ = MStory.find_story(story_feed_id=feed_id, story_id=story_id) if not story: return {'code': -1, 'message': "Could not find story to save."} story_db = dict([(k, v) for k, v in story._data.items() if k is not None and v is not None]) story_db.pop('user_id', None) story_db.pop('starred_date', None) story_db.pop('id', None) story_db.pop('user_tags', None) now = datetime.datetime.now() story_values = dict(starred_date=now, user_tags=user_tags, **story_db) params = dict(story_guid=story.story_guid, user_id=request.user.pk) starred_story = MStarredStory.objects(**params).limit(1) created = False removed_user_tags = [] if not starred_story: params.update(story_values) if params.has_key('story_latest_content_z'): params.pop('story_latest_content_z') try: starred_story = MStarredStory.objects.create(**params) except OperationError, e: logging.user(request, "~FCStarring ~FRfailed~FC: ~SB%s (~FM~SB%s~FC~SN)" % (story.story_title[:32], e)) return {'code': -1, 'message': "Could not save story due to: %s" % e} created = True MActivity.new_starred_story(user_id=request.user.pk, story_title=story.story_title, story_feed_id=feed_id, story_id=starred_story.story_guid) new_user_tags = user_tags MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=1) else: starred_story = starred_story[0] new_user_tags = list(set(user_tags) - set(starred_story.user_tags or [])) removed_user_tags = list(set(starred_story.user_tags or []) - set(user_tags)) starred_story.user_tags = user_tags starred_story.save() for tag in new_user_tags: MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=1) for tag in removed_user_tags: MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1) if random.random() < 0.01: MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk) MStarredStoryCounts.count_for_user(request.user.pk, total_only=True) starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True) if not starred_count and len(starred_counts): starred_count = MStarredStory.objects(user_id=request.user.pk).count() r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'story:starred:%s' % story.story_hash) if created: logging.user(request, "~FCStarring: ~SB%s (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags)) else: logging.user(request, "~FCUpdating starred:~SN~FC ~SB%s~SN (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags)) return {'code': code, 'message': message, 'starred_count': starred_count, 'starred_counts': starred_counts} @required_params('story_id') @ajax_login_required @json.json_view def mark_story_as_unstarred(request): return _mark_story_as_unstarred(request) @required_params('story_hash') @ajax_login_required @json.json_view def mark_story_hash_as_unstarred(request): return _mark_story_as_unstarred(request) def _mark_story_as_unstarred(request): code = 1 story_id = request.POST.get('story_id', None) story_hash = request.REQUEST.get('story_hash', None) starred_counts = None starred_story = None if story_id: starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id) if not story_id or not starred_story: starred_story = MStarredStory.objects(user_id=request.user.pk, story_hash=story_hash or story_id) if starred_story: starred_story = starred_story[0] logging.user(request, "~FCUnstarring: ~SB%s" % (starred_story.story_title[:50])) user_tags = starred_story.user_tags feed_id = starred_story.story_feed_id MActivity.remove_starred_story(user_id=request.user.pk, story_feed_id=starred_story.story_feed_id, story_id=starred_story.story_guid) starred_story.user_id = 0 try: starred_story.save() except NotUniqueError: starred_story.delete() MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=-1) for tag in user_tags: try: MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1) except MStarredStoryCounts.DoesNotExist: pass MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk) MStarredStoryCounts.count_for_user(request.user.pk, total_only=True) starred_counts = MStarredStoryCounts.user_counts(request.user.pk) r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL) r.publish(request.user.username, 'story:unstarred:%s' % starred_story.story_hash) else: code = -1 return {'code': code, 'starred_counts': starred_counts} @ajax_login_required @json.json_view def starred_counts(request): starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True) logging.user(request, "~FCRequesting starred counts: ~SB%s stories (%s tags)" % (starred_count, len([s for s in starred_counts if s['tag']]))) return {'starred_count': starred_count, 'starred_counts': starred_counts} @ajax_login_required @json.json_view def send_story_email(request): code = 1 message = 'OK' user = get_user(request) story_id = request.POST['story_id'] feed_id = request.POST['feed_id'] to_addresses = request.POST.get('to', '').replace(',', ' ').replace(' ', ' ').strip().split(' ') from_name = request.POST['from_name'] from_email = request.POST['from_email'] email_cc = is_true(request.POST.get('email_cc', 'true')) comments = request.POST['comments'] comments = comments[:2048] # Separated due to PyLint from_address = '[email protected]' share_user_profile = MSocialProfile.get_user(request.user.pk) quota = 32 if user.profile.is_premium else 1 if share_user_profile.over_story_email_quota(quota=quota): code = -1 if user.profile.is_premium: message = 'You can only send %s stories per day by email.' % quota else: message = 'Upgrade to a premium subscription to send more than one story per day by email.' logging.user(request, '~BRNOT ~BMSharing story by email to %s recipient, over quota: %s/%s' % (len(to_addresses), story_id, feed_id)) elif not to_addresses: code = -1 message = 'Please provide at least one email address.' elif not all(email_re.match(to_address) for to_address in to_addresses if to_addresses): code = -1 message = 'You need to send the email to a valid email address.' elif not email_re.match(from_email): code = -1 message = 'You need to provide your email address.' elif not from_name: code = -1 message = 'You need to provide your name.' else: story, _ = MStory.find_story(feed_id, story_id) story = Feed.format_story(story, feed_id, text=True) feed = Feed.get_by_id(story['story_feed_id']) params = { "to_addresses": to_addresses, "from_name": from_name, "from_email": from_email, "email_cc": email_cc, "comments": comments, "from_address": from_address, "story": story, "feed": feed, "share_user_profile": share_user_profile, } text = render_to_string('mail/email_story.txt', params) html = render_to_string('mail/email_story.xhtml', params) subject = '%s' % (story['story_title']) cc = None if email_cc: cc = ['%s <%s>' % (from_name, from_email)] subject = subject.replace('\n', ' ') msg = EmailMultiAlternatives(subject, text, from_email='NewsBlur <%s>' % from_address, to=to_addresses, cc=cc, headers={'Reply-To': "%s <%s>" % (from_name, from_email)}) msg.attach_alternative(html, "text/html") try: msg.send() except boto.ses.connection.BotoServerError, e: code = -1 message = "Email error: %s" % str(e) share_user_profile.save_sent_email() logging.user(request, '~BMSharing story by email to %s recipient%s: ~FY~SB%s~SN~BM~FY/~SB%s' % (len(to_addresses), '' if len(to_addresses) == 1 else 's', story['story_title'][:50], feed and feed.feed_title[:50])) return {'code': code, 'message': message} @json.json_view def load_tutorial(request): if request.REQUEST.get('finished'): logging.user(request, '~BY~FW~SBFinishing Tutorial') return {} else: newsblur_feed = Feed.objects.filter(feed_address__icontains='blog.newsblur.com').order_by('-pk')[0] logging.user(request, '~BY~FW~SBLoading Tutorial') return { 'newsblur_feed': newsblur_feed.canonical() } @required_params('query', 'feed_id') @json.json_view def save_search(request): feed_id = request.POST['feed_id'] query = request.POST['query'] MSavedSearch.save_search(user_id=request.user.pk, feed_id=feed_id, query=query) saved_searches = MSavedSearch.user_searches(request.user.pk) return { 'saved_searches': saved_searches, } @required_params('query', 'feed_id') @json.json_view def delete_search(request): feed_id = request.POST['feed_id'] query = request.POST['query'] MSavedSearch.delete_search(user_id=request.user.pk, feed_id=feed_id, query=query) saved_searches = MSavedSearch.user_searches(request.user.pk) return { 'saved_searches': saved_searches, }
{ "content_hash": "4df63582333b36fa5a1b8cd58e2cfb65", "timestamp": "", "source": "github", "line_count": 2624, "max_line_length": 163, "avg_line_length": 43.9828506097561, "alnum_prop": 0.5980885704135654, "repo_name": "AlphaCluster/NewsBlur", "id": "eb4afe4ed0fa0b12b8eb88639351b42a85d41bcc", "size": "115411", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apps/reader/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "454" }, { "name": "CSS", "bytes": "720684" }, { "name": "CoffeeScript", "bytes": "9696" }, { "name": "Dockerfile", "bytes": "1331" }, { "name": "HTML", "bytes": "492242" }, { "name": "Java", "bytes": "955691" }, { "name": "JavaScript", "bytes": "1680848" }, { "name": "Objective-C", "bytes": "2591129" }, { "name": "Perl", "bytes": "55598" }, { "name": "Python", "bytes": "2741187" }, { "name": "R", "bytes": "527" }, { "name": "Ruby", "bytes": "870" }, { "name": "Shell", "bytes": "40999" }, { "name": "Swift", "bytes": "3520" } ], "symlink_target": "" }
from channels import Group from channels.sessions import channel_session from django.contrib.auth.models import User from authentification.models import Document, Sentence, Word, Locuteur from backend.models import Historique import json, re, math user_room_dict = {} # Connected to websocket.connect @channel_session def ws_connect(message): # on accepte la connexion message.reply_channel.send({"accept": True}) # on recupere le nom du groupe en question (exemple: "edit19" -> edition sur le document avec l'id 19), room = message.content['path'].split('/', 2 )[2] user = message.content['path'].split('/', 2 )[1] # id de l'utilisateur courant document = int(re.search(r'\d+$', room).group()) # id du document if room in user_room_dict: user_room_dict[room].append(user) else: user_room_dict[room] = [] user_room_dict[room].append(user) # on enregistre le groupe, l'utilisateur et le document dans la session et on ajoute l'utilisateur au groupe message.channel_session['room'] = room message.channel_session['user'] = user message.channel_session['document'] = document Group(room).add(message.reply_channel) # on assigne cote serveur chaque sentence au bon correcteur # on supprime les doublons car un utilisateur peut etre connecte sur un meme document via plusieurs fenetres #user_without_doublon = [] #for current_user in user_room_dict[message.channel_session['room']]: #if not current_user in user_without_doublon: #user_without_doublon.append(current_user) # on recupere le document, le nombe de ses sentences #sentences = Sentence.objects.filter(document__in = [Document.objects.get(id = int(message.channel_session['document']))]).order_by('start_time') #nb_sentences_affectee = math.floor(len(sentences) / len(user_without_doublon)) #reste = len(sentences) % len(user_without_doublon) #for j, current_user in enumerate(user_without_doublon): #i = 0 #while i < nb_sentences_affectee: #current = nb_sentences_affectee * j + i #sentence = sentences[current] #sentence.current_correcteur = User.objects.get(id = int(current_user)) #sentence.save() #i += 1 #if len(user_without_doublon) - 1 == j: #i = 1 #while i <= reste: #sentence = sentences[len(sentences) - i] #sentence.current_correcteur = User.objects.get(id = int(current_user)) #sentence.save() #i += 1 # on renvoi dans le groupe de l'utilisateur, la liste des utilisateurs disponibles Group(room).send({ "text": json.dumps({ 'type' : 'connect', 'repartition' : user_room_dict[room] }), }) # Connected to websocket.receive @channel_session def ws_message(message): json_object = json.loads(message.content['text']) #json_object['type'] -> type d'operation (valider, invalider, supprimer, ..., la sentence) #json_object['value'] -> nouvelle valeur de la sentence #json_object['time_start'] -> nouveau temps de debut #json_object['time_end'] -> nouveau temps de fin #json_object['user_id'] -> id de l'utilisateur qui fait la modification #json_object['sentence_id'] -> id de la phrase a modifier # permet de savoir si l'utilisateur qui modifie une sentence a le droit de le faire #if sentence.current_correcteur.id != json_object['user_id']: #return None if json_object['type'] != 'insert' and json_object['type'] != 'split': sentence = Sentence.objects.get(id = int(json_object['sentence_id'])) historique = Historique() historique.user = User.objects.get(id = int(message.channel_session['user'])) historique.document = Document.objects.get(id = int(message.channel_session['document'])) historique.action = message.content['text'] historique.save() # sentence inaudible if json_object['type'] == 'inaudible': sentence.inaudible = True sentence.save() # changement de locuteur if json_object['type'] == 'locuteur': sentence.locuteur = Locuteur.objects.get(id = int(json_object['locuteur_id'])) sentence.save() # checkbox valid sentence if json_object['type'] == 'valid': sentence.valeur = json_object['value'] sentence.start_time = json_object['time_start'] sentence.end_time = json_object['time_end'] sentence.validated = True sentence.save() # checkbox invalide sentence if json_object['type'] == 'invalid': sentence.validated = False sentence.save() # supprimer une phrase if json_object['type'] == 'delete': sentence.delete() # inserer une phrase if json_object['type'] == 'insert': document_id = int(re.search(r'\d+$', message.channel_session['room']).group()) new_sentence = Sentence() new_sentence.valeur = '' new_sentence.start_time = json_object['time_start'] new_sentence.end_time = json_object['time_end'] new_sentence.validated = False new_sentence.document = Document.objects.get(id = int(document_id)) #new_sentence.locuteur = Locuteur.objects.filter(document__in = [Document.objects.get(id = int(document_id))])[0] new_sentence.save() # on recupere le nouvelle id de la sentence pour le mettre a jours json_object['sentence_id'] = new_sentence.id # textarea onChange if json_object['type'] == 'edit_text': sentence.valeur = json_object['value'] sentence.start_time = json_object['time_start'] sentence.end_time = json_object['time_end'] sentence.save() # textarea onChange if json_object['type'] == 'maj_time': sentence.start_time = json_object['time_start'] sentence.end_time = json_object['time_end'] sentence.save() # split d'une phrase en deux if json_object['type'] == 'split': document_id = int(re.search(r'\d+$', message.channel_session['room']).group()) sentence1 = Sentence.objects.get(id = int(json_object['sentence_id1'])) sentence1.start_time = json_object['time_start1'] sentence1.end_time = json_object['time_end1'] sentence1.valeur = json_object['value1'] sentence1.save() sentence2 = Sentence() sentence2.valeur = json_object['value2'] sentence2.start_time = json_object['time_start2'] sentence2.end_time = json_object['time_end2'] sentence2.document = Document.objects.get(id = int(document_id)) #sentence2.locuteur = Locuteur.objects.get(id = int(json_object['locuteur_id'])) sentence2.save() # on recupere le nouvelle id de la sentence pour le mettre a jours json_object['sentence_id2'] = sentence2.id if json_object['type'] == 'merge': sentence_to_remove = Sentence.objects.get(id = int(json_object['to_delete'])) sentence.start_time = json_object['time_start'] sentence.end_time = json_object['time_end'] sentence.valeur = json_object['value'] sentence.save() sentence_to_remove.delete() # on renvoi dans le groupe de l'utilisateur, le message qu'on a recus Group(message.channel_session['room']).send({ "text": json.dumps(json_object), }) # Connected to websocket.disconnect @channel_session def ws_disconnect(message): # message.channel_session['user'] --> user de l'utilisateur qui quitte le socket # message.channel_session['room'] --> room de l'utilisateur qui quitte le socket delete = 0 for i, val in enumerate(user_room_dict[message.channel_session['room']]): if val == message.channel_session['user']: delete = i user_room_dict[message.channel_session['room']].pop(delete) Group(message.channel_session['room']).discard(message.reply_channel) # si il n'y a plus personne dans le channel, pas besoin d'envoyer un message if len(user_room_dict[message.channel_session['room']]) == 0: return None # on assigne cote serveur chaque sentence au bon correcteur # on supprime les doublons car un utilisateur peut etre connecte sur un meme document via plusieurs fenetres #user_without_doublon = [] #for current_user in user_room_dict[message.channel_session['room']]: #if not current_user in user_without_doublon: #user_without_doublon.append(current_user) # on recupere le document, le nombe de ses sentences #sentences = Sentence.objects.filter(document__in = [Document.objects.get(id = int(message.channel_session['document']))]).order_by('start_time') #nb_sentences_affectee = math.floor(len(sentences) / len(user_without_doublon)) #reste = len(sentences) % len(user_without_doublon) #for j, current_user in enumerate(user_without_doublon): #i = 0 #while i < nb_sentences_affectee: #current = nb_sentences_affectee * j + i #sentence = sentences[current] #sentence.current_correcteur = User.objects.get(id = int(current_user)) #sentence.save() #i += 1 #if len(user_without_doublon) - 1 == j: #i = 1 #while i <= reste: #sentence = sentences[len(sentences) - i] #sentence.current_correcteur = User.objects.get(id = int(current_user)) #sentence.save() #i += 1 # on renvoi dans le groupe de l'utilisateur, la liste des utilisateurs disponibles Group(message.channel_session['room']).send({ "text": json.dumps({ 'type' : 'disconnect', 'repartition' : user_room_dict[message.channel_session['room']] }), })
{ "content_hash": "56eced715a010941b897bd1cbee93ee3", "timestamp": "", "source": "github", "line_count": 256, "max_line_length": 149, "avg_line_length": 38.53515625, "alnum_prop": 0.6359858084135834, "repo_name": "rachidoulasri/django_projectwebpage", "id": "cc8ccb9cc58c6fcdab1babaf8b04a5701c7c661c", "size": "9865", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "backend/consumers.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "11267" }, { "name": "CSS", "bytes": "50162" }, { "name": "HTML", "bytes": "30467" }, { "name": "JavaScript", "bytes": "1092528" }, { "name": "Makefile", "bytes": "529" }, { "name": "Python", "bytes": "52109" } ], "symlink_target": "" }
def get_message_from_world(): from rez.config import config message = config.plugins.command.world.message return message
{ "content_hash": "571c1092543dae2527cb141666803276", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 50, "avg_line_length": 33.5, "alnum_prop": 0.7388059701492538, "repo_name": "instinct-vfx/rez", "id": "a703c053ca1924eb04b50f16f9ed562c12821dcd", "size": "218", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "example_extensions/hello_cmd/lib.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "13" }, { "name": "CMake", "bytes": "61812" }, { "name": "Dockerfile", "bytes": "3668" }, { "name": "PowerShell", "bytes": "1390" }, { "name": "Python", "bytes": "1950470" }, { "name": "Shell", "bytes": "3185" } ], "symlink_target": "" }
import pytest from django.http import HttpResponse from urlparse import urlparse import mock from nose.tools import * # flake8: noqa from rest_framework.test import APIRequestFactory from django.test.utils import override_settings from website.util import api_v2_url from api.base import settings from api.base.middleware import CorsMiddleware from tests.base import ApiTestCase from osf_tests import factories class MiddlewareTestCase(ApiTestCase): MIDDLEWARE = None def setUp(self): super(MiddlewareTestCase, self).setUp() self.middleware = self.MIDDLEWARE() self.mock_response = mock.Mock() self.request_factory = APIRequestFactory() class TestCorsMiddleware(MiddlewareTestCase): MIDDLEWARE = CorsMiddleware @override_settings(CORS_ORIGIN_ALLOW_ALL=False) def test_institutions_added_to_cors_whitelist(self): url = api_v2_url('users/me/') domain = urlparse('https://dinosaurs.sexy') institution = factories.InstitutionFactory( domains=[domain.netloc.lower()], name='Institute for Sexy Lizards' ) settings.load_origins_whitelist() request = self.request_factory.get(url, HTTP_ORIGIN=domain.geturl()) response = HttpResponse() self.middleware.process_request(request) processed = self.middleware.process_response(request, response) assert_equal(response['Access-Control-Allow-Origin'], domain.geturl()) @override_settings(CORS_ORIGIN_ALLOW_ALL=False) def test_preprintproviders_added_to_cors_whitelist(self): url = api_v2_url('users/me/') domain = urlparse('https://dinoprints.sexy') preprintprovider = factories.PreprintProviderFactory( domain=domain.geturl().lower(), _id='DinoXiv' ) settings.load_origins_whitelist() request = self.request_factory.get(url, HTTP_ORIGIN=domain.geturl()) response = HttpResponse() self.middleware.process_request(request) processed = self.middleware.process_response(request, response) assert_equal(response['Access-Control-Allow-Origin'], domain.geturl()) @override_settings(CORS_ORIGIN_ALLOW_ALL=False) def test_cross_origin_request_with_cookies_does_not_get_cors_headers(self): url = api_v2_url('users/me/') domain = urlparse('https://dinosaurs.sexy') request = self.request_factory.get(url, HTTP_ORIGIN=domain.geturl()) response = {} with mock.patch.object(request, 'COOKIES', True): self.middleware.process_request(request) processed = self.middleware.process_response(request, response) assert_not_in('Access-Control-Allow-Origin', response) @override_settings(CORS_ORIGIN_ALLOW_ALL=False) def test_cross_origin_request_with_Authorization_gets_cors_headers(self): url = api_v2_url('users/me/') domain = urlparse('https://dinosaurs.sexy') request = self.request_factory.get( url, HTTP_ORIGIN=domain.geturl(), HTTP_AUTHORIZATION='Bearer aqweqweohuweglbiuwefq' ) response = HttpResponse() self.middleware.process_request(request) processed = self.middleware.process_response(request, response) assert_equal(response['Access-Control-Allow-Origin'], domain.geturl()) @override_settings(CORS_ORIGIN_ALLOW_ALL=False) def test_cross_origin_request_with_Authorization_and_cookie_does_not_get_cors_headers( self): url = api_v2_url('users/me/') domain = urlparse('https://dinosaurs.sexy') request = self.request_factory.get( url, HTTP_ORIGIN=domain.geturl(), HTTP_AUTHORIZATION='Bearer aqweqweohuweglbiuwefq' ) response = {} with mock.patch.object(request, 'COOKIES', True): self.middleware.process_request(request) processed = self.middleware.process_response(request, response) assert_not_in('Access-Control-Allow-Origin', response) @override_settings(CORS_ORIGIN_ALLOW_ALL=False) def test_non_institution_preflight_request_requesting_authorization_header_gets_cors_headers( self): url = api_v2_url('users/me/') domain = urlparse('https://dinosaurs.sexy') request = self.request_factory.options( url, HTTP_ORIGIN=domain.geturl(), HTTP_ACCESS_CONTROL_REQUEST_METHOD='GET', HTTP_ACCESS_CONTROL_REQUEST_HEADERS='authorization' ) response = HttpResponse() self.middleware.process_request(request) processed = self.middleware.process_response(request, response) assert_equal(response['Access-Control-Allow-Origin'], domain.geturl())
{ "content_hash": "feb96e73b5a336e400a5e502458fb97e", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 97, "avg_line_length": 41.37068965517241, "alnum_prop": 0.6699312356740987, "repo_name": "erinspace/osf.io", "id": "57dba001b1ca4ca1558b99cbe7c67d5e76e67108", "size": "4823", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "api_tests/base/test_middleware.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "92866" }, { "name": "Dockerfile", "bytes": "8455" }, { "name": "HTML", "bytes": "272808" }, { "name": "JavaScript", "bytes": "1796633" }, { "name": "Mako", "bytes": "665847" }, { "name": "Python", "bytes": "8478871" }, { "name": "VCL", "bytes": "13885" } ], "symlink_target": "" }
import collections from ..error import Error from ..language import ast def is_input_type(type): named_type = get_named_type(type) return isinstance(named_type, ( GraphQLScalarType, GraphQLEnumType, GraphQLInputObjectType, )) def is_composite_type(type): named_type = get_named_type(type) return isinstance(named_type, ( GraphQLObjectType, GraphQLInterfaceType, GraphQLUnionType, )) def is_leaf_type(type): named_type = get_named_type(type) return isinstance(named_type, ( GraphQLScalarType, GraphQLEnumType, )) def get_named_type(type): unmodified_type = type while isinstance(unmodified_type, (GraphQLList, GraphQLNonNull)): unmodified_type = unmodified_type.of_type return unmodified_type def get_nullable_type(type): if isinstance(type, GraphQLNonNull): return type.of_type return type class GraphQLType(object): def __str__(self): return self.name class GraphQLScalarType(GraphQLType): """Scalar Type Definition The leaf values of any request and input values to arguments are Scalars (or Enums) and are defined with a name and a series of coercion functions used to ensure validity. Example: def coerce_odd(value): if value % 2 == 1: return value return None OddType = GraphQLScalarType(name='Odd', coerce=coerce_odd) """ def __init__(self, name, description=None, coerce=None, coerce_literal=None): assert name, 'Type must be named.' self.name = name self.description = description self._coerce = coerce self._coerce_literal = coerce_literal def coerce(self, value): return self._coerce(value) def coerce_literal(self, value): if self._coerce_literal: return self._coerce_literal(value) return None def __str__(self): return self.name class GraphQLObjectType(GraphQLType): """Object Type Definition Almost all of the GraphQL types you define will be object types. Object types have a name, but most importantly describe their fields. Example: AddressType = GraphQLObjectType('Address', { 'street': GraphQLField(GraphQLString), 'number': GraphQLField(GraphQLInt), 'formatted': GraphQLField(GraphQLString, resolver=lambda obj, *_: obj.number + ' ' + obj.street), }) When two types need to refer to each other, or a type needs to refer to itself in a field, you can use a static method to supply the fields lazily. Example: PersonType = GraphQLObjectType('Person', lambda: { 'name': GraphQLField(GraphQLString), 'bestFriend': GraphQLField(PersonType) }) """ def __init__(self, name, fields, interfaces=None, is_type_of=None, description=None): assert name, 'Type must be named.' self.name = name self.description = description self._fields = fields self._field_map = None self._interfaces = interfaces or [] self._is_type_of = is_type_of add_impl_to_interfaces(self) def get_fields(self): if self._field_map is None: self._field_map = define_field_map(self._fields) return self._field_map def get_interfaces(self): return self._interfaces def is_type_of(self, value): if self._is_type_of: return self._is_type_of(value) def define_field_map(fields): if callable(fields): fields = fields() for field_name, field in fields.items(): field.name = field_name return fields def add_impl_to_interfaces(impl): for type in impl.get_interfaces(): type._impls.append(impl) class GraphQLField(object): def __init__(self, type, args=None, resolver=None, deprecation_reason=None, description=None): self.type = type self.args = [] if args: for arg_name, arg in args.items(): arg.name = arg_name self.args.append(arg) self.resolver = resolver self.deprecation_reason = deprecation_reason self.description = description class GraphQLArgument(object): def __init__(self, type, default_value=None, description=None): self.type = type self.default_value = default_value self.description = description class GraphQLInterfaceType(GraphQLType): """Interface Type Definition When a field can return one of a heterogeneous set of types, a Interface type is used to describe what types are possible, what fields are in common across all types, as well as a function to determine which type is actually used when the field is resolved. Example: EntityType = GraphQLInterfaceType( name='Entity', fields={ 'name': GraphQLField(GraphQLString), }) """ def __init__(self, name, fields=None, resolve_type=None, description=None): assert name, 'Type must be named.' self.name = name self.description = description self._fields = fields or {} self._resolver = resolve_type self._impls = [] self._field_map = None self._possible_type_names = None def get_fields(self): if self._field_map is None: self._field_map = define_field_map(self._fields) return self._field_map def get_possible_types(self): return self._impls def is_possible_type(self, type): if self._possible_type_names is None: self._possible_type_names = set( t.name for t in self.get_possible_types() ) return type.name in self._possible_type_names def resolve_type(self, value): if self._resolver: return self._resolver(value) return get_type_of(value, self) def get_type_of(value, abstract_type): possible_types = abstract_type.get_possible_types() for type in possible_types: is_type_of = type.is_type_of(value) if is_type_of is None: raise Error( 'Non-Object Type {} does not implement resolve_type and ' 'Object Type {} does not implement is_type_of. ' 'There is no way to determine if a value is of this type.' .format(abstract_type.name, type.name) ) if is_type_of: return type class GraphQLUnionType(GraphQLType): """Union Type Definition When a field can return one of a heterogeneous set of types, a Union type is used to describe what types are possible as well as providing a function to determine which type is actually used when the field is resolved. Example: class PetType(GraphQLUnionType): name = 'Pet' types = [DogType, CatType] def resolve_type(self, value): if isinstance(value, Dog): return DogType() if isinstance(value, Cat): return CatType() """ def __init__(self, name, types=None, resolve_type=None, description=None): assert name, 'Type must be named.' self.name = name self.description = description assert types, \ 'Must provide types for Union {}.'.format(name) self._possible_type_names = None non_obj_types = [t for t in types if not isinstance(t, GraphQLObjectType)] if non_obj_types: raise Error( 'Union {} may only contain object types, it cannot ' 'contain: {}.'.format( self.name, ', '.join(str(t) for t in non_obj_types) ) ) self._types = types self._resolve_type = resolve_type def get_possible_types(self): return self._types def is_possible_type(self, type): if self._possible_type_names is None: self._possible_type_names = set( t.name for t in self.get_possible_types() ) return type.name in self._possible_type_names def resolve_type(self, value): if self._resolve_type: return self._resolve_type(value) return get_type_of(value, self) class GraphQLEnumType(GraphQLType): """Enum Type Definition Some leaf values of requests and input values are Enums. GraphQL serializes Enum values as strings, however internally Enums can be represented by any kind of type, often integers. Example: RGBType = GraphQLEnumType('RGB', { 'RED': 0, 'GREEN': 1, 'BLUE': 2, }) Note: If a value is not provided in a definition, the name of the enum value will be used as it's internal value. """ def __init__(self, name, values, description=None): self.name = name self.description = description self._values = values self._value_map = None self._value_lookup = None self._name_lookup = None def get_values(self): if self._value_map is None: self._value_map = self._define_value_map() return self._value_map def coerce(self, value): if isinstance(value, collections.Hashable): enum_value = self._get_value_lookup().get(value) if enum_value: return enum_value.name return None def coerce_literal(self, value): if isinstance(value, ast.EnumValue): enum_value = self._get_name_lookup().get(value.value) if enum_value: return enum_value.value def _define_value_map(self): value_map = {} for value_name, value in self._values.items(): if not isinstance(value, GraphQLEnumValue): value = GraphQLEnumValue(value) value.name = value_name if value.value is None: value.value = value_name value_map[value_name] = value return value_map def _get_value_lookup(self): if self._value_lookup is None: lookup = {} for value_name, value in self.get_values().items(): lookup[value.value] = value self._value_lookup = lookup return self._value_lookup def _get_name_lookup(self): if self._name_lookup is None: lookup = {} for value_name, value in self.get_values().items(): lookup[value.name] = value self._name_lookup = lookup return self._name_lookup class GraphQLEnumValue(object): def __init__(self, value=None, deprecation_reason=None, description=None): self.value = value self.deprecation_reason = deprecation_reason self.description = description class GraphQLInputObjectType(GraphQLType): """Input Object Type Definition An input object defines a structured collection of fields which may be supplied to a field argument. Using `NonNull` will ensure that a value must be provided by the query Example: NonNullFloat = GraphQLNonNull(GraphQLFloat()) class GeoPoint(GraphQLInputObjectType): name = 'GeoPoint' fields = { 'lat': GraphQLInputObjectField(NonNullFloat), 'lon': GraphQLInputObjectField(NonNullFloat), 'alt': GraphQLInputObjectField(GraphQLFloat(), default_value=0) } """ def __init__(self, name, fields, description=None): assert name, 'Type must be named.' self.name = name self.description = description self._fields = fields self._field_map = None def get_fields(self): if self._field_map is None: self._field_map = define_field_map(self._fields) return self._field_map class GraphQLInputObjectField(object): def __init__(self, type, default_value=None, description=None): self.type = type self.default_value = default_value self.description = description class GraphQLList(GraphQLType): """List Modifier A list is a kind of type marker, a wrapping type which points to another type. Lists are often created within the context of defining the fields of an object type. Example: class PersonType(GraphQLObjectType): name = 'Person' def get_fields(self): return { 'parents': GraphQLField(GraphQLList(PersonType())), 'children': GraphQLField(GraphQLList(PersonType())), } """ def __init__(self, type): self.of_type = type def __str__(self): return '[' + str(self.of_type) + ']' class GraphQLNonNull(GraphQLType): """Non-Null Modifier A non-null is a kind of type marker, a wrapping type which points to another type. Non-null types enforce that their values are never null and can ensure an error is raised if this ever occurs during a request. It is useful for fields which you can make a strong guarantee on non-nullability, for example usually the id field of a database row will never be null. Example: class RowType(GraphQLObjectType): name = 'Row' fields = { 'id': GraphQLField(type=GraphQLNonNull(GraphQLString())) } Note: the enforcement of non-nullability occurs within the executor. """ def __init__(self, type): assert not isinstance(type, GraphQLNonNull), \ 'Cannot nest NonNull inside NonNull.' self.of_type = type def __str__(self): return str(self.of_type) + '!'
{ "content_hash": "ea6b7740fd90e89f3333313d51d6b269", "timestamp": "", "source": "github", "line_count": 451, "max_line_length": 142, "avg_line_length": 30.827050997782706, "alnum_prop": 0.6008775084514134, "repo_name": "rawls238/graphql-py", "id": "408eb7a880ff809c9de449067ca9223ca2281d57", "size": "13903", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "graphql/core/type/definition.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "289680" } ], "symlink_target": "" }
from os.path import abspath, dirname, normpath, join import os import sys BASEDIR = dirname(abspath(__file__)) LOG = normpath(join(BASEDIR, '..', 'log.html')) TESTDATA = join(BASEDIR, 'dir.suite') OUTPUT = join(BASEDIR, 'output.xml') TARGET = join(BASEDIR, 'data.js') SRC = normpath(join(BASEDIR, '..', '..', '..')) sys.path.insert(0, SRC) from robot import run from robot.conf.settings import RebotSettings from robot.reporting.resultwriter import Results from robot.reporting.jswriter import JsResultWriter from robot.utils import file_writer def run_robot(testdata, outxml): run(testdata, loglevel='DEBUG', output=outxml, log=None, report=None) def create_jsdata(outxml, target): settings = RebotSettings({ 'name': '<Suite.Name>', 'critical': ['i?'], 'noncritical': ['*kek*kone*'], 'tagstatlink': ['force:http://google.com:<kuukkeli&gt;', 'i*:http://%1/?foo=bar&zap=%1:Title of i%1', '?1:http://%1/<&>:Title', '</script>:<url>:<title>'], 'tagdoc': ['test:this_is_*my_bold*_test', 'IX:*Combined* and escaped <&lt; tag doc', 'i*:Me, myself, and I.', '</script>:<doc>'], 'tagstatcombine': ['fooANDi*:No Match', 'long1ORcollections', 'i?:IX', '<*>:<any>'] }) result = Results(settings, outxml).js_result config = {'logURL': 'log.html', 'title': 'This is a long long title. A very long title indeed. ' 'And it even contains some stuff to <esc&ape>. ' 'Yet it should still look good.', 'minLevel': 'DEBUG', 'defaultLevel': 'DEBUG', 'reportURL': 'report.html', 'background': {'fail': 'DeepPink'}} with file_writer(target) as output: writer = JsResultWriter(output, start_block='', end_block='') writer.write(result, config) print 'Log: ', normpath(join(BASEDIR, '..', 'rebot', 'log.html')) print 'Report: ', normpath(join(BASEDIR, '..', 'rebot', 'report.html')) if __name__ == '__main__': run_robot(TESTDATA, OUTPUT) create_jsdata(OUTPUT, TARGET) os.remove(OUTPUT)
{ "content_hash": "8a60c53a4758ba37375c0ac3facf955d", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 78, "avg_line_length": 37.20967741935484, "alnum_prop": 0.5483311660164716, "repo_name": "alexandrul-ci/robotframework", "id": "7868fcaed37727e4f5007fb071f946cc8ade8ed9", "size": "2330", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "src/robot/htmldata/testdata/create_jsdata.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "245" }, { "name": "CSS", "bytes": "27452" }, { "name": "HTML", "bytes": "140980" }, { "name": "Java", "bytes": "58264" }, { "name": "JavaScript", "bytes": "161259" }, { "name": "Python", "bytes": "2271402" }, { "name": "RobotFramework", "bytes": "2096190" }, { "name": "Shell", "bytes": "281" } ], "symlink_target": "" }
""" Talon.One API Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import datetime import talon_one from talon_one.models.entity import Entity # noqa: E501 from talon_one.rest import ApiException class TestEntity(unittest.TestCase): """Entity unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test Entity include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = talon_one.models.entity.Entity() # noqa: E501 if include_optional : return Entity( id = 6, created = '2020-06-10T09:05:27.993483Z' ) else : return Entity( id = 6, created = '2020-06-10T09:05:27.993483Z', ) def testEntity(self): """Test Entity""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main()
{ "content_hash": "ebbca951e31678c3a7495715659105b6", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 732, "avg_line_length": 38.22641509433962, "alnum_prop": 0.6618953603158934, "repo_name": "talon-one/talon_one.py", "id": "c16569866123a0c42880c33789589f7537fcfb56", "size": "2043", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_entity.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "238" }, { "name": "Python", "bytes": "5139586" }, { "name": "Shell", "bytes": "1826" } ], "symlink_target": "" }
import urllib.parse import pkg_resources import tempfile from netforce import database import os import random import string import json import hmac import hashlib import base64 import time import sys from struct import Struct from operator import xor from itertools import starmap import binascii import signal import platform import re import math import decimal try: import dns.resolver HAS_DNS = True except: HAS_DNS = False from . import config import xmlrpc.client from pprint import pprint def get_data_path(data, path, default=None, parent=False): if not path: return data val = data fields = path.split(".") if parent: fields = fields[:-1] for field in fields: if not field.isdigit(): if not isinstance(val, dict): return default val = val.get(field, default) else: ind = int(field) if not isinstance(val, list) or ind >= len(val): return default val = val[ind] return val def set_data_path(data, path, val): fields = path.split(".") if data is None: if not fields[0].isdigit() and fields[0] != "[]": data = {} else: data = [] obj = data for i, field in enumerate(fields): if i < len(fields) - 1: next_field = fields[i + 1] if not next_field.isdigit() and next_field != "[]": v = {} else: v = [] last = False else: v = val last = True if not field.isdigit() and field != "[]": if last: obj[field] = v else: obj = obj.setdefault(field, v) else: if field == "[]": obj.append(v) else: ind = int(field) while len(obj) <= ind: obj.append(None) if last: obj[ind] = v else: if obj[ind] is None: obj[ind] = v obj = obj[ind] return data def is_sub_url(url, base_url): o1 = urllib.parse.urlparse(base_url) o2 = urllib.parse.urlparse(url) if o2.path != o1.path: return False q1 = urllib.parse.parse_qs(o1.query) q2 = urllib.parse.parse_qs(o2.query) for k, v1 in q1.items(): v2 = q2.get(k) if v2 != v1: return False return True def get_ip_country(ip): # TODO: remove this return None def rmdup(seq): seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)] def get_file_path(fname): if not fname: return None dbname = database.get_active_db() if not dbname: return None path = os.path.join(os.getcwd(), "static", "db", dbname, "files", fname) return path def gen_passwd(n=8, numeric=False): if numeric: chars = string.digits else: chars = string.ascii_letters + string.digits return "".join([random.choice(chars) for i in range(n)]) def eval_json(expr, ctx): def _eval_var(name): if name in ("true", "false", "null"): return name comps = name.split(".") v = ctx for n in comps: v = v.get(n) if not isinstance(v, dict): return v return v chunks = [] state = "other" start = 0 for i, c in enumerate(expr): if state == "other": if c == "\"": state = "string" elif c.isalpha() or c == "_": chunks.append(expr[start:i]) state = "var" start = i elif state == "string": if c == "\"": state = "other" elif c == "\\": state = "escape" elif state == "escape": state = "string" elif state == "var": if not c.isalnum() and c != "_" and c != ".": n = expr[start:i].strip() v = _eval_var(n) if v is None: s="null" else: s=str(v) chunks.append(s) state = "other" start = i chunks.append(expr[start:]) data = "".join(chunks) return json.loads(data) _UTF8_TYPES = (bytes, type(None)) def utf8(value): if isinstance(value, _UTF8_TYPES): return value assert isinstance(value, str) return value.encode("utf-8") _TO_UNICODE_TYPES = (str, type(None)) def to_unicode(value): if isinstance(value, _TO_UNICODE_TYPES): return value assert isinstance(value, bytes) return value.decode("utf-8") def _create_signature(secret, *parts): hash = hmac.new(utf8(secret), digestmod=hashlib.sha1) for part in parts: hash.update(utf8(part)) return utf8(hash.hexdigest()) def _time_independent_equals(a, b): if len(a) != len(b): return False result = 0 for x, y in zip(a, b): result |= x ^ y return result == 0 def _decode_signed_value(secret, name, value, max_age_days=31): if not value: return None parts = utf8(value).split(b"|") if len(parts) != 3: return None signature = _create_signature(secret, name, parts[0], parts[1]) if not _time_independent_equals(parts[2], signature): print("WARNING: Invalid cookie signature %r" % value) return None timestamp = int(parts[1]) if timestamp < time.time() - max_age_days * 86400: print("WARNING: Expired cookie %r" % value) return None if timestamp > time.time() + 31 * 86400: # _cookie_signature does not hash a delimiter between the # parts of the cookie, so an attacker could transfer trailing # digits from the payload to the timestamp without altering the # signature. For backwards compatibility, sanity-check timestamp # here instead of modifying _cookie_signature. print("WARNING: Cookie timestamp in future; possible tampering %r" % value) return None if parts[1].startswith(b"0"): print("WARNING: Tampered cookie %r" % value) try: return base64.b64decode(parts[0]) except Exception: return None def _create_signed_value(secret, name, value): timestamp = utf8(str(int(time.time()))) value = base64.b64encode(utf8(value)) signature = _create_signature(secret, name, value, timestamp) value = b"|".join([value, timestamp, signature]) return value _token_secret=None def get_token_secret(): global _token_secret if _token_secret is not None: return _token_secret path=".token_secret" if os.path.exists(path): _token_secret=open(path).read() else: _token_secret=gen_passwd(20) open(path,"w").write(_token_secret) return _token_secret def new_token(dbname, user_id): user = "%s %s" % (dbname, user_id) secret=get_token_secret() token = to_unicode(_create_signed_value(secret, "user", user)) return token def check_token(dbname, user_id, token, schema=None): # print("check_token",dbname,user_id,token) if schema: user = "%s.%s %s" % (dbname, schema, user_id) else: user = "%s %s" % (dbname, user_id) secret=get_token_secret() val = to_unicode(_decode_signed_value(secret, "user", token)) return val == user def url_escape(value): return urllib.parse.quote_plus(utf8(value)) def url_unescape(value, encoding='utf-8'): # XXX: check tornado return urllib.parse.unquote_plus(value, encoding=encoding) def format_color(msg, color=None, bright=False): color_codes = { "black": 0, "red": 1, "green": 2, "yellow": 3, "blue": 4, "magenta": 5, "cyan": 6, "white": 7, } code = color_codes.get(color, 7) head = "\x1b[3%dm" % code if bright: head += "\x1b[1m" foot = "\x1b[39;49m" if bright: foot += "\x1b[22m" return head + msg + foot def print_color(msg, color=None, bright=False): if sys.stdout.isatty(): msg = format_color(msg, color=color, bright=bright) print(msg) def compare_version(v1, v2): v1_ = [int(d) for d in v1.split(".")] v2_ = [int(d) for d in v2.split(".")] if v1_ < v2_: return -1 if v1_ > v2_: return 1 return 0 def get_db_version(): db = database.get_connection() schema = database.get_active_schema() or "public" res = db.get("SELECT * FROM pg_class JOIN pg_catalog.pg_namespace n ON n.oid=pg_class.relnamespace WHERE relname='settings' AND nspname=%s",schema) if not res: return None res = db.get("SELECT * FROM settings WHERE id=1") if not res: return None return res.version def set_db_version(version): db = database.get_connection() schema = database.get_active_schema() or "public" res = db.get("SELECT * FROM pg_class JOIN pg_catalog.pg_namespace n ON n.oid=pg_class.relnamespace WHERE relname='settings' AND nspname=%s",schema) if not res: raise Exception("Missing settings table") res = db.get("SELECT * FROM settings WHERE id=1") if not res: raise Exception("Missing settings record") db.execute("UPDATE settings SET version=%s WHERE id=1", version) def is_empty_db(): db = database.get_connection() schema = database.get_active_schema() or "public" res = db.get("SELECT * FROM pg_class JOIN pg_catalog.pg_namespace n ON n.oid=pg_class.relnamespace WHERE relname='settings' AND nspname=%s",schema) if not res: return True res = db.get("SELECT * FROM settings WHERE id=1") if not res: return True return False def init_db(): db = database.get_connection() db.execute("INSERT INTO profile (id,name,default_model_perms) VALUES (1,'System Admin','full')") db.execute("INSERT INTO settings (id,anon_profile_id) VALUES (1,1)") enc_pass=encrypt_password('1234') db.execute("INSERT INTO base_user (id,login,password,name,profile_id,active) VALUES (1,'admin',%s,'Admin',1,true)",enc_pass) db.execute("INSERT INTO company (id,name) VALUES (1,'Test Company')") _pack_int = Struct('>I').pack def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None): return binascii.hexlify(pbkdf2_bin(data, salt, iterations, keylen, hashfunc)).decode() def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None): if isinstance(data, str): data = data.encode("utf-8") if isinstance(salt, str): salt = salt.encode("utf-8") hashfunc = hashfunc or hashlib.sha1 mac = hmac.new(data, None, hashfunc) def _pseudorandom(x, mac=mac): h = mac.copy() h.update(x) return h.digest() buf = [] for block in range(1, -(-keylen // mac.digest_size) + 1): rv = u = _pseudorandom(salt + _pack_int(block)) for i in range(iterations - 1): u = _pseudorandom(u) rv = starmap(xor, zip(rv, u)) buf.extend(rv) return bytes(buf[:keylen]) def encrypt_password(password): algo = "pbkdf2" salt = binascii.hexlify(os.urandom(8)).decode() hsh = pbkdf2_hex(password, salt) return "%s$%s$%s" % (algo, salt, hsh) def check_password(password, enc_password): master_pwd = config.get("master_password") if master_pwd and password == master_pwd: return True if not password or not enc_password: return False algo, salt, hsh = enc_password.split("$") if algo != "pbkdf2": raise Exception("Unknown password encryption algorithm") hsh2 = pbkdf2_hex(password, salt) return hsh2 == hsh class timeout: # XXX: doesn't seem to work yet... (some jsonrpc requests take more than 5min) def __init__(self, seconds=None): self.seconds = seconds def handle_timeout(self, signum, frame): raise Exception("Timeout!") def __enter__(self): if self.seconds and platform.system() != "Windows": signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) def __exit__(self, type, value, traceback): if self.seconds and platform.system() != "Windows": signal.alarm(0) def get_email_domain(email_addr): s = email_addr.strip() domain = s[s.find('@') + 1:].lower() return domain def get_mx_records(domain): if not HAS_DNS: raise Exception("dnspython library not installed") try: res = dns.resolver.query(domain, "MX") except: return None records = sorted([(int(r.preference), str(r.exchange)) for r in res]) return records WSP = r'[ \t]' # see 2.2.2. Structured Header Field Bodies CRLF = r'(?:\r\n)' # see 2.2.3. Long Header Fields NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f' # see 3.2.1. Primitive Tokens QUOTED_PAIR = r'(?:\\.)' # see 3.2.2. Quoted characters FWS = r'(?:(?:' + WSP + r'*' + CRLF + r')?' + \ WSP + r'+)' # see 3.2.3. Folding white space and comments CTEXT = r'[' + NO_WS_CTL + \ r'\x21-\x27\x2a-\x5b\x5d-\x7e]' # see 3.2.3 CCONTENT = r'(?:' + CTEXT + r'|' + \ QUOTED_PAIR + r')' # see 3.2.3 (NB: The RFC includes COMMENT here # as well, but that would be circular.) COMMENT = r'\((?:' + FWS + r'?' + CCONTENT + \ r')*' + FWS + r'?\)' # see 3.2.3 CFWS = r'(?:' + FWS + r'?' + COMMENT + ')*(?:' + \ FWS + '?' + COMMENT + '|' + FWS + ')' # see 3.2.3 ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4. Atom ATOM = CFWS + r'?' + ATEXT + r'+' + CFWS + r'?' # see 3.2.4 DOT_ATOM_TEXT = ATEXT + r'+(?:\.' + ATEXT + r'+)*' # see 3.2.4 DOT_ATOM = CFWS + r'?' + DOT_ATOM_TEXT + CFWS + r'?' # see 3.2.4 QTEXT = r'[' + NO_WS_CTL + \ r'\x21\x23-\x5b\x5d-\x7e]' # see 3.2.5. Quoted strings QCONTENT = r'(?:' + QTEXT + r'|' + \ QUOTED_PAIR + r')' # see 3.2.5 QUOTED_STRING = CFWS + r'?' + r'"(?:' + FWS + \ r'?' + QCONTENT + r')*' + FWS + \ r'?' + r'"' + CFWS + r'?' LOCAL_PART = r'(?:' + DOT_ATOM + r'|' + \ QUOTED_STRING + r')' # see 3.4.1. Addr-spec specification DTEXT = r'[' + NO_WS_CTL + r'\x21-\x5a\x5e-\x7e]' # see 3.4.1 DCONTENT = r'(?:' + DTEXT + r'|' + \ QUOTED_PAIR + r')' # see 3.4.1 DOMAIN_LITERAL = CFWS + r'?' + r'\[' + \ r'(?:' + FWS + r'?' + DCONTENT + \ r')*' + FWS + r'?\]' + CFWS + r'?' # see 3.4.1 DOMAIN = r'(?:' + DOT_ATOM + r'|' + \ DOMAIN_LITERAL + r')' # see 3.4.1 ADDR_SPEC = LOCAL_PART + r'@' + DOMAIN # see 3.4.1 # A valid address will match exactly the 3.4.1 addr-spec. VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$' def check_email_syntax(email_addr): m = re.match(VALID_ADDRESS_REGEXP, email_addr) if not m: return False return True def round_amount(amt, rounding, method="nearest"): if not rounding: return amt if method == "nearest": i = round((amt + 0.000001) / rounding) elif method == "lower": i = math.floor(amt / rounding) elif method == "upper": i = math.ceil(amt / rounding) else: raise Exception("Invalid rounding method") return i * rounding def decimal_default(obj): if isinstance(obj, decimal.Decimal): return float(obj) raise TypeError def json_dumps(val): return json.dumps(val, default=decimal_default) def json_loads(s): return json.loads(s, parse_float=decimal.Decimal) class XmlRpcCookieTransport(xmlrpc.client.Transport): def __init__(self): super().__init__() self._cookies = [] def send_headers(self, connection, headers): if self._cookies: connection.putheader("Cookie", "; ".join(self._cookies)) print("cookies",self._cookies) super().send_headers(connection, headers) def parse_response(self, response): for header in response.msg.get_all("Set-Cookie") or []: cookie = header.split(";", 1)[0] self._cookies.append(cookie) return super().parse_response(response) def create_thumbnails(fname): print("create_thumbnails",fname) dbname = database.get_active_db() if not dbname: return None fdir = os.path.join(os.getcwd(), "static", "db", dbname, "files") path=os.path.join(fdir,fname) basename,ext=os.path.splitext(fname) fname,rand = basename.split(",") for s in [512,256,128,64,32]: fname_thumb =fname+ "-resize-%s"%s+"," +rand + ext path_thumb = os.path.join(fdir, fname_thumb) os.system(r"convert -resize %sx%s\> '%s' '%s'" % (s,s,path, path_thumb))
{ "content_hash": "8681d38c81912d6202a6cf63b7b5d94d", "timestamp": "", "source": "github", "line_count": 555, "max_line_length": 151, "avg_line_length": 30.717117117117116, "alnum_prop": 0.5580713280150165, "repo_name": "nfco/netforce", "id": "a3030b85e0cc513f6838673cd4eb1af0b4c19f60", "size": "18153", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "netforce/netforce/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "73" }, { "name": "CSS", "bytes": "407336" }, { "name": "HTML", "bytes": "478918" }, { "name": "Java", "bytes": "11870" }, { "name": "JavaScript", "bytes": "3712147" }, { "name": "Makefile", "bytes": "353" }, { "name": "PHP", "bytes": "2274" }, { "name": "Python", "bytes": "3469515" }, { "name": "Roff", "bytes": "15858" }, { "name": "Shell", "bytes": "117" } ], "symlink_target": "" }
from __future__ import print_function import numpy as np import argparse import os import sys import signal import time import socket from contextlib import closing from six import string_types import math import paddle import paddle.fluid as fluid import paddle.fluid.profiler as profiler import paddle.fluid.unique_name as nameGen from paddle.fluid import core import unittest from multiprocessing import Process import paddle.fluid.layers as layers from functools import reduce from test_collective_base import TestCollectiveRunnerBase, runtime_main class TestCollectiveBroadcast(TestCollectiveRunnerBase): def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): tindata = layers.data( name="tindata", shape=[10, 1000], dtype='float32') toutdata = main_prog.current_block().create_var( name="outofbroadcast", dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, stop_gradient=False) main_prog.global_block().append_op( type="c_broadcast", inputs={'X': tindata}, attrs={'ring_id': ring_id, 'root': rootid}, outputs={'Out': toutdata}) main_prog.global_block().append_op( type="c_sync_comm_stream", inputs={'X': toutdata}, outputs={'Out': toutdata}, attrs={'ring_id': ring_id}) return toutdata if __name__ == "__main__": runtime_main(TestCollectiveBroadcast, "broadcast", 0)
{ "content_hash": "81616bc440c27556ff2a99ef0036cdb4", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 71, "avg_line_length": 31.553571428571427, "alnum_prop": 0.6151669496321449, "repo_name": "tensor-tang/Paddle", "id": "18f0485f923e4f72f76be3b0b34ebeb1d89c926c", "size": "2378", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "python/paddle/fluid/tests/unittests/collective_broadcast_op.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "32490" }, { "name": "C++", "bytes": "10161819" }, { "name": "CMake", "bytes": "290828" }, { "name": "Cuda", "bytes": "1183095" }, { "name": "Dockerfile", "bytes": "10002" }, { "name": "Python", "bytes": "7082088" }, { "name": "Ruby", "bytes": "353" }, { "name": "Shell", "bytes": "200906" } ], "symlink_target": "" }
import sys import unittest from test import support import socket import select import time import datetime import gc import os import errno import pprint import tempfile import urllib.request import traceback import asyncore import weakref import platform import functools from unittest import mock ssl = support.import_module("ssl") PROTOCOLS = sorted(ssl._PROTOCOL_NAMES) HOST = support.HOST def data_file(*name): return os.path.join(os.path.dirname(__file__), *name) # The custom key and certificate files used in test_ssl are generated # using Lib/test/make_ssl_certs.py. # Other certificates are simply fetched from the Internet servers they # are meant to authenticate. CERTFILE = data_file("keycert.pem") BYTES_CERTFILE = os.fsencode(CERTFILE) ONLYCERT = data_file("ssl_cert.pem") ONLYKEY = data_file("ssl_key.pem") BYTES_ONLYCERT = os.fsencode(ONLYCERT) BYTES_ONLYKEY = os.fsencode(ONLYKEY) CERTFILE_PROTECTED = data_file("keycert.passwd.pem") ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem") KEY_PASSWORD = "somepass" CAPATH = data_file("capath") BYTES_CAPATH = os.fsencode(CAPATH) CAFILE_NEURONIO = data_file("capath", "4e1295a3.0") CAFILE_CACERT = data_file("capath", "5ed36f99.0") # empty CRL CRLFILE = data_file("revocation.crl") # Two keys and certs signed by the same CA (for SNI tests) SIGNED_CERTFILE = data_file("keycert3.pem") SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") WRONGCERT = data_file("XXXnonexisting.pem") BADKEY = data_file("badkey.pem") NOKIACERT = data_file("nokia.pem") NULLBYTECERT = data_file("nullbytecert.pem") DHFILE = data_file("dh512.pem") BYTES_DHFILE = os.fsencode(DHFILE) def handle_error(prefix): exc_format = ' '.join(traceback.format_exception(*sys.exc_info())) if support.verbose: sys.stdout.write(prefix + exc_format) def can_clear_options(): # 0.9.8m or higher return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15) def no_sslv2_implies_sslv3_hello(): # 0.9.7h or higher return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15) def have_verify_flags(): # 0.9.8 or higher return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15) def asn1time(cert_time): # Some versions of OpenSSL ignore seconds, see #18207 # 0.9.8.i if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15): fmt = "%b %d %H:%M:%S %Y GMT" dt = datetime.datetime.strptime(cert_time, fmt) dt = dt.replace(second=0) cert_time = dt.strftime(fmt) # %d adds leading zero but ASN1_TIME_print() uses leading space if cert_time[4] == "0": cert_time = cert_time[:4] + " " + cert_time[5:] return cert_time # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 def skip_if_broken_ubuntu_ssl(func): if hasattr(ssl, 'PROTOCOL_SSLv2'): @functools.wraps(func) def f(*args, **kwargs): try: ssl.SSLContext(ssl.PROTOCOL_SSLv2) except ssl.SSLError: if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and platform.linux_distribution() == ('debian', 'squeeze/sid', '')): raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") return func(*args, **kwargs) return f else: return func needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test") class BasicSocketTests(unittest.TestCase): def test_constants(self): ssl.CERT_NONE ssl.CERT_OPTIONAL ssl.CERT_REQUIRED ssl.OP_CIPHER_SERVER_PREFERENCE ssl.OP_SINGLE_DH_USE if ssl.HAS_ECDH: ssl.OP_SINGLE_ECDH_USE if ssl.OPENSSL_VERSION_INFO >= (1, 0): ssl.OP_NO_COMPRESSION self.assertIn(ssl.HAS_SNI, {True, False}) self.assertIn(ssl.HAS_ECDH, {True, False}) def test_random(self): v = ssl.RAND_status() if support.verbose: sys.stdout.write("\n RAND_status is %d (%s)\n" % (v, (v and "sufficient randomness") or "insufficient randomness")) data, is_cryptographic = ssl.RAND_pseudo_bytes(16) self.assertEqual(len(data), 16) self.assertEqual(is_cryptographic, v == 1) if v: data = ssl.RAND_bytes(16) self.assertEqual(len(data), 16) else: self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16) # negative num is invalid self.assertRaises(ValueError, ssl.RAND_bytes, -5) self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5) if hasattr(ssl, 'RAND_egd'): self.assertRaises(TypeError, ssl.RAND_egd, 1) self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1) ssl.RAND_add("this is a random string", 75.0) @unittest.skipUnless(os.name == 'posix', 'requires posix') def test_random_fork(self): status = ssl.RAND_status() if not status: self.fail("OpenSSL's PRNG has insufficient randomness") rfd, wfd = os.pipe() pid = os.fork() if pid == 0: try: os.close(rfd) child_random = ssl.RAND_pseudo_bytes(16)[0] self.assertEqual(len(child_random), 16) os.write(wfd, child_random) os.close(wfd) except BaseException: os._exit(1) else: os._exit(0) else: os.close(wfd) self.addCleanup(os.close, rfd) _, status = os.waitpid(pid, 0) self.assertEqual(status, 0) child_random = os.read(rfd, 16) self.assertEqual(len(child_random), 16) parent_random = ssl.RAND_pseudo_bytes(16)[0] self.assertEqual(len(parent_random), 16) self.assertNotEqual(child_random, parent_random) def test_parse_cert(self): # note that this uses an 'unofficial' function in _ssl.c, # provided solely for this test, to exercise the certificate # parsing code p = ssl._ssl._test_decode_cert(CERTFILE) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['issuer'], ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)) ) # Note the next three asserts will fail if the keys are regenerated self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT')) self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT')) self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E') self.assertEqual(p['subject'], ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)) ) self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subjectAltName'], (('DNS', 'projects.developer.nokia.com'), ('DNS', 'projects.forum.nokia.com')) ) # extra OCSP and AIA fields self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',)) self.assertEqual(p['caIssuers'], ('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',)) self.assertEqual(p['crlDistributionPoints'], ('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',)) def test_parse_cert_CVE_2013_4238(self): p = ssl._ssl._test_decode_cert(NULLBYTECERT) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") subject = ((('countryName', 'US'),), (('stateOrProvinceName', 'Oregon'),), (('localityName', 'Beaverton'),), (('organizationName', 'Python Software Foundation'),), (('organizationalUnitName', 'Python Core Development'),), (('commonName', 'null.python.org\x00example.org'),), (('emailAddress', '[email protected]'),)) self.assertEqual(p['subject'], subject) self.assertEqual(p['issuer'], subject) if ssl._OPENSSL_API_VERSION >= (0, 9, 8): san = (('DNS', 'altnull.python.org\x00example.com'), ('email', '[email protected]\[email protected]'), ('URI', 'http://null.python.org\x00http://example.org'), ('IP Address', '192.0.2.1'), ('IP Address', '2001:DB8:0:0:0:0:0:1\n')) else: # OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName san = (('DNS', 'altnull.python.org\x00example.com'), ('email', '[email protected]\[email protected]'), ('URI', 'http://null.python.org\x00http://example.org'), ('IP Address', '192.0.2.1'), ('IP Address', '<invalid>')) self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) d2 = ssl.PEM_cert_to_DER_cert(p2) self.assertEqual(d1, d2) if not p2.startswith(ssl.PEM_HEADER + '\n'): self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2) if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'): self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2) def test_openssl_version(self): n = ssl.OPENSSL_VERSION_NUMBER t = ssl.OPENSSL_VERSION_INFO s = ssl.OPENSSL_VERSION self.assertIsInstance(n, int) self.assertIsInstance(t, tuple) self.assertIsInstance(s, str) # Some sanity checks follow # >= 0.9 self.assertGreaterEqual(n, 0x900000) # < 3.0 self.assertLess(n, 0x30000000) major, minor, fix, patch, status = t self.assertGreaterEqual(major, 0) self.assertLess(major, 3) self.assertGreaterEqual(minor, 0) self.assertLess(minor, 256) self.assertGreaterEqual(fix, 0) self.assertLess(fix, 256) self.assertGreaterEqual(patch, 0) self.assertLessEqual(patch, 63) self.assertGreaterEqual(status, 0) self.assertLessEqual(status, 15) # Version string as returned by {Open,Libre}SSL, the format might change if "LibreSSL" in s: self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)), (s, t)) else: self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)), (s, t)) @support.cpython_only def test_refcycle(self): # Issue #7943: an SSL object doesn't create reference cycles with # itself. s = socket.socket(socket.AF_INET) ss = ssl.wrap_socket(s) wr = weakref.ref(ss) with support.check_warnings(("", ResourceWarning)): del ss self.assertEqual(wr(), None) def test_wrapped_unconnected(self): # Methods on an unconnected SSLSocket propagate the original # OSError raise by the underlying socket object. s = socket.socket(socket.AF_INET) with ssl.wrap_socket(s) as ss: self.assertRaises(OSError, ss.recv, 1) self.assertRaises(OSError, ss.recv_into, bytearray(b'x')) self.assertRaises(OSError, ss.recvfrom, 1) self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1) self.assertRaises(OSError, ss.send, b'x') self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0)) def test_timeout(self): # Issue #8524: when creating an SSL socket, the timeout of the # original socket should be retained. for timeout in (None, 0.0, 5.0): s = socket.socket(socket.AF_INET) s.settimeout(timeout) with ssl.wrap_socket(s) as ss: self.assertEqual(timeout, ss.gettimeout()) def test_errors(self): sock = socket.socket() self.assertRaisesRegex(ValueError, "certfile must be specified", ssl.wrap_socket, sock, keyfile=CERTFILE) self.assertRaisesRegex(ValueError, "certfile must be specified for server-side operations", ssl.wrap_socket, sock, server_side=True) self.assertRaisesRegex(ValueError, "certfile must be specified for server-side operations", ssl.wrap_socket, sock, server_side=True, certfile="") with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s: self.assertRaisesRegex(ValueError, "can't connect in server-side mode", s.connect, (HOST, 8080)) with self.assertRaises(OSError) as cm: with socket.socket() as sock: ssl.wrap_socket(sock, certfile=WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(OSError) as cm: with socket.socket() as sock: ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(OSError) as cm: with socket.socket() as sock: ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) def test_match_hostname(self): def ok(cert, hostname): ssl.match_hostname(cert, hostname) def fail(cert, hostname): self.assertRaises(ssl.CertificateError, ssl.match_hostname, cert, hostname) cert = {'subject': ((('commonName', 'example.com'),),)} ok(cert, 'example.com') ok(cert, 'ExAmple.cOm') fail(cert, 'www.example.com') fail(cert, '.example.com') fail(cert, 'example.org') fail(cert, 'exampleXcom') cert = {'subject': ((('commonName', '*.a.com'),),)} ok(cert, 'foo.a.com') fail(cert, 'bar.foo.a.com') fail(cert, 'a.com') fail(cert, 'Xa.com') fail(cert, '.a.com') # only match one left-most wildcard cert = {'subject': ((('commonName', 'f*.com'),),)} ok(cert, 'foo.com') ok(cert, 'f.com') fail(cert, 'bar.com') fail(cert, 'foo.a.com') fail(cert, 'bar.foo.com') # NULL bytes are bad, CVE-2013-4073 cert = {'subject': ((('commonName', 'null.python.org\x00example.org'),),)} ok(cert, 'null.python.org\x00example.org') # or raise an error? fail(cert, 'example.org') fail(cert, 'null.python.org') # error cases with wildcards cert = {'subject': ((('commonName', '*.*.a.com'),),)} fail(cert, 'bar.foo.a.com') fail(cert, 'a.com') fail(cert, 'Xa.com') fail(cert, '.a.com') cert = {'subject': ((('commonName', 'a.*.com'),),)} fail(cert, 'a.foo.com') fail(cert, 'a..com') fail(cert, 'a.com') # wildcard doesn't match IDNA prefix 'xn--' idna = 'püthon.python.org'.encode("idna").decode("ascii") cert = {'subject': ((('commonName', idna),),)} ok(cert, idna) cert = {'subject': ((('commonName', 'x*.python.org'),),)} fail(cert, idna) cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)} fail(cert, idna) # wildcard in first fragment and IDNA A-labels in sequent fragments # are supported. idna = 'www*.pythön.org'.encode("idna").decode("ascii") cert = {'subject': ((('commonName', idna),),)} ok(cert, 'www.pythön.org'.encode("idna").decode("ascii")) ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii")) fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii")) fail(cert, 'pythön.org'.encode("idna").decode("ascii")) # Slightly fake real-world example cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT', 'subject': ((('commonName', 'linuxfrz.org'),),), 'subjectAltName': (('DNS', 'linuxfr.org'), ('DNS', 'linuxfr.com'), ('othername', '<unsupported>'))} ok(cert, 'linuxfr.org') ok(cert, 'linuxfr.com') # Not a "DNS" entry fail(cert, '<unsupported>') # When there is a subjectAltName, commonName isn't used fail(cert, 'linuxfrz.org') # A pristine real-world example cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),), (('commonName', 'mail.google.com'),))} ok(cert, 'mail.google.com') fail(cert, 'gmail.com') # Only commonName is considered fail(cert, 'California') # Neither commonName nor subjectAltName cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),))} fail(cert, 'mail.google.com') # No DNS entry in subjectAltName but a commonName cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('commonName', 'mail.google.com'),)), 'subjectAltName': (('othername', 'blabla'), )} ok(cert, 'mail.google.com') # No DNS entry subjectAltName and no commonName cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),)), 'subjectAltName': (('othername', 'blabla'),)} fail(cert, 'google.com') # Empty cert / no cert self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com') self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com') # Issue #17980: avoid denials of service by refusing more than one # wildcard per fragment. cert = {'subject': ((('commonName', 'a*b.com'),),)} ok(cert, 'axxb.com') cert = {'subject': ((('commonName', 'a*b.co*'),),)} fail(cert, 'axxb.com') cert = {'subject': ((('commonName', 'a*b*.com'),),)} with self.assertRaises(ssl.CertificateError) as cm: ssl.match_hostname(cert, 'axxbxxc.com') self.assertIn("too many wildcards", str(cm.exception)) def test_server_side(self): # server_hostname doesn't work for server sockets ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with socket.socket() as sock: self.assertRaises(ValueError, ctx.wrap_socket, sock, True, server_hostname="some.hostname") def test_unknown_channel_binding(self): # should raise ValueError for unknown type s = socket.socket(socket.AF_INET) with ssl.wrap_socket(s) as ss: with self.assertRaises(ValueError): ss.get_channel_binding("unknown-type") @unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES, "'tls-unique' channel binding not available") def test_tls_unique_channel_binding(self): # unconnected should return None for known type s = socket.socket(socket.AF_INET) with ssl.wrap_socket(s) as ss: self.assertIsNone(ss.get_channel_binding("tls-unique")) # the same for server-side s = socket.socket(socket.AF_INET) with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss: self.assertIsNone(ss.get_channel_binding("tls-unique")) def test_dealloc_warn(self): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) r = repr(ss) with self.assertWarns(ResourceWarning) as cm: ss = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) def test_get_default_verify_paths(self): paths = ssl.get_default_verify_paths() self.assertEqual(len(paths), 6) self.assertIsInstance(paths, ssl.DefaultVerifyPaths) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE paths = ssl.get_default_verify_paths() self.assertEqual(paths.cafile, CERTFILE) self.assertEqual(paths.capath, CAPATH) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_enum_certificates(self): self.assertTrue(ssl.enum_certificates("CA")) self.assertTrue(ssl.enum_certificates("ROOT")) self.assertRaises(TypeError, ssl.enum_certificates) self.assertRaises(WindowsError, ssl.enum_certificates, "") trust_oids = set() for storename in ("CA", "ROOT"): store = ssl.enum_certificates(storename) self.assertIsInstance(store, list) for element in store: self.assertIsInstance(element, tuple) self.assertEqual(len(element), 3) cert, enc, trust = element self.assertIsInstance(cert, bytes) self.assertIn(enc, {"x509_asn", "pkcs_7_asn"}) self.assertIsInstance(trust, (set, bool)) if isinstance(trust, set): trust_oids.update(trust) serverAuth = "1.3.6.1.5.5.7.3.1" self.assertIn(serverAuth, trust_oids) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_enum_crls(self): self.assertTrue(ssl.enum_crls("CA")) self.assertRaises(TypeError, ssl.enum_crls) self.assertRaises(WindowsError, ssl.enum_crls, "") crls = ssl.enum_crls("CA") self.assertIsInstance(crls, list) for element in crls: self.assertIsInstance(element, tuple) self.assertEqual(len(element), 2) self.assertIsInstance(element[0], bytes) self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"}) def test_asn1object(self): expected = (129, 'serverAuth', 'TLS Web Server Authentication', '1.3.6.1.5.5.7.3.1') val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1') self.assertEqual(val, expected) self.assertEqual(val.nid, 129) self.assertEqual(val.shortname, 'serverAuth') self.assertEqual(val.longname, 'TLS Web Server Authentication') self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1') self.assertIsInstance(val, ssl._ASN1Object) self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth') val = ssl._ASN1Object.fromnid(129) self.assertEqual(val, expected) self.assertIsInstance(val, ssl._ASN1Object) self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1) with self.assertRaisesRegex(ValueError, "unknown NID 100000"): ssl._ASN1Object.fromnid(100000) for i in range(1000): try: obj = ssl._ASN1Object.fromnid(i) except ValueError: pass else: self.assertIsInstance(obj.nid, int) self.assertIsInstance(obj.shortname, str) self.assertIsInstance(obj.longname, str) self.assertIsInstance(obj.oid, (str, type(None))) val = ssl._ASN1Object.fromname('TLS Web Server Authentication') self.assertEqual(val, expected) self.assertIsInstance(val, ssl._ASN1Object) self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected) self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'), expected) with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"): ssl._ASN1Object.fromname('serverauth') def test_purpose_enum(self): val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1') self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object) self.assertEqual(ssl.Purpose.SERVER_AUTH, val) self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129) self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth') self.assertEqual(ssl.Purpose.SERVER_AUTH.oid, '1.3.6.1.5.5.7.3.1') val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2') self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object) self.assertEqual(ssl.Purpose.CLIENT_AUTH, val) self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130) self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth') self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid, '1.3.6.1.5.5.7.3.2') def test_unsupported_dtls(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.addCleanup(s.close) with self.assertRaises(NotImplementedError) as cx: ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE) self.assertEqual(str(cx.exception), "only stream sockets are supported") ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with self.assertRaises(NotImplementedError) as cx: ctx.wrap_socket(s) self.assertEqual(str(cx.exception), "only stream sockets are supported") class ContextTests(unittest.TestCase): @skip_if_broken_ubuntu_ssl def test_constructor(self): for protocol in PROTOCOLS: ssl.SSLContext(protocol) self.assertRaises(TypeError, ssl.SSLContext) self.assertRaises(ValueError, ssl.SSLContext, -1) self.assertRaises(ValueError, ssl.SSLContext, 42) @skip_if_broken_ubuntu_ssl def test_protocol(self): for proto in PROTOCOLS: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.protocol, proto) def test_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.set_ciphers("ALL") ctx.set_ciphers("DEFAULT") with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"): ctx.set_ciphers("^$:,;?*'dorothyx") @skip_if_broken_ubuntu_ssl def test_options(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # OP_ALL | OP_NO_SSLv2 is the default value self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2, ctx.options) ctx.options |= ssl.OP_NO_SSLv3 self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3, ctx.options) if can_clear_options(): ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1 self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3, ctx.options) ctx.options = 0 self.assertEqual(0, ctx.options) else: with self.assertRaises(ValueError): ctx.options = 0 def test_verify_mode(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # Default value self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) ctx.verify_mode = ssl.CERT_OPTIONAL self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) ctx.verify_mode = ssl.CERT_REQUIRED self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.verify_mode = ssl.CERT_NONE self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) with self.assertRaises(TypeError): ctx.verify_mode = None with self.assertRaises(ValueError): ctx.verify_mode = 42 @unittest.skipUnless(have_verify_flags(), "verify_flags need OpenSSL > 0.9.8") def test_verify_flags(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # default value by OpenSSL self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN) ctx.verify_flags = ssl.VERIFY_DEFAULT self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT) # supports any value ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT) with self.assertRaises(TypeError): ctx.verify_flags = None def test_load_cert_chain(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # Combined key and cert in a single file ctx.load_cert_chain(CERTFILE, keyfile=None) ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE) self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE) with self.assertRaises(OSError) as cm: ctx.load_cert_chain(WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(BADCERT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(EMPTYCERT) # Separate key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_cert_chain(ONLYCERT, ONLYKEY) ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY) ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYCERT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYKEY) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT) # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"): ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=bytearray(KEY_PASSWORD.encode())) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode()) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, bytearray(KEY_PASSWORD.encode())) with self.assertRaisesRegex(TypeError, "should be a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=True) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass") with self.assertRaisesRegex(ValueError, "cannot be longer"): # openssl has a fixed limit on the password buffer. # PEM_BUFSIZE is generally set to 1kb. # Return a string larger than this. ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400) # Password callback def getpass_unicode(): return KEY_PASSWORD def getpass_bytes(): return KEY_PASSWORD.encode() def getpass_bytearray(): return bytearray(KEY_PASSWORD.encode()) def getpass_badpass(): return "badpass" def getpass_huge(): return b'a' * (1024 * 1024) def getpass_bad_type(): return 9 def getpass_exception(): raise Exception('getpass error') class GetPassCallable: def __call__(self): return KEY_PASSWORD def getpass(self): return KEY_PASSWORD ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable().getpass) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass) with self.assertRaisesRegex(ValueError, "cannot be longer"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge) with self.assertRaisesRegex(TypeError, "must return a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type) with self.assertRaisesRegex(Exception, "getpass error"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception) # Make sure the password function isn't called if it isn't needed ctx.load_cert_chain(CERTFILE, password=getpass_exception) def test_load_verify_locations(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_verify_locations(CERTFILE) ctx.load_verify_locations(cafile=CERTFILE, capath=None) ctx.load_verify_locations(BYTES_CERTFILE) ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None) self.assertRaises(TypeError, ctx.load_verify_locations) self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None) with self.assertRaises(OSError) as cm: ctx.load_verify_locations(WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_verify_locations(BADCERT) ctx.load_verify_locations(CERTFILE, CAPATH) ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH) # Issue #10989: crash if the second argument type is invalid self.assertRaises(TypeError, ctx.load_verify_locations, None, True) def test_load_verify_cadata(self): # test cadata with open(CAFILE_CACERT) as f: cacert_pem = f.read() cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem) with open(CAFILE_NEURONIO) as f: neuronio_pem = f.read() neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem) # test PEM ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0) ctx.load_verify_locations(cadata=cacert_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1) ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) combined = "\n".join((cacert_pem, neuronio_pem)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # with junk around the certs ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) combined = ["head", cacert_pem, "other", neuronio_pem, "again", neuronio_pem, "tail"] ctx.load_verify_locations(cadata="\n".join(combined)) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # test DER ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_verify_locations(cadata=cacert_der) ctx.load_verify_locations(cadata=neuronio_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=cacert_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) combined = b"".join((cacert_der, neuronio_der)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # error cases ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object) with self.assertRaisesRegex(ssl.SSLError, "no start line"): ctx.load_verify_locations(cadata="broken") with self.assertRaisesRegex(ssl.SSLError, "not enough data"): ctx.load_verify_locations(cadata=b"broken") def test_load_dh_params(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_dh_params(DHFILE) if os.name != 'nt': ctx.load_dh_params(BYTES_DHFILE) self.assertRaises(TypeError, ctx.load_dh_params) self.assertRaises(TypeError, ctx.load_dh_params, None) with self.assertRaises(FileNotFoundError) as cm: ctx.load_dh_params(WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) @skip_if_broken_ubuntu_ssl def test_session_stats(self): for proto in PROTOCOLS: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.session_stats(), { 'number': 0, 'connect': 0, 'connect_good': 0, 'connect_renegotiate': 0, 'accept': 0, 'accept_good': 0, 'accept_renegotiate': 0, 'hits': 0, 'misses': 0, 'timeouts': 0, 'cache_full': 0, }) def test_set_default_verify_paths(self): # There's not much we can do to test that it acts as expected, # so just check it doesn't crash or raise an exception. ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.set_default_verify_paths() @unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build") def test_set_ecdh_curve(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.set_ecdh_curve("prime256v1") ctx.set_ecdh_curve(b"prime256v1") self.assertRaises(TypeError, ctx.set_ecdh_curve) self.assertRaises(TypeError, ctx.set_ecdh_curve, None) self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo") self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo") @needs_sni def test_sni_callback(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # set_servername_callback expects a callable, or None self.assertRaises(TypeError, ctx.set_servername_callback) self.assertRaises(TypeError, ctx.set_servername_callback, 4) self.assertRaises(TypeError, ctx.set_servername_callback, "") self.assertRaises(TypeError, ctx.set_servername_callback, ctx) def dummycallback(sock, servername, ctx): pass ctx.set_servername_callback(None) ctx.set_servername_callback(dummycallback) @needs_sni def test_sni_callback_refcycle(self): # Reference cycles through the servername callback are detected # and cleared. ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) def dummycallback(sock, servername, ctx, cycle=ctx): pass ctx.set_servername_callback(dummycallback) wr = weakref.ref(ctx) del ctx, dummycallback gc.collect() self.assertIs(wr(), None) def test_cert_store_stats(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_cert_chain(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) def test_get_ca_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.get_ca_certs(), []) # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', '[email protected]'),)), 'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'), 'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'), 'serialNumber': '00', 'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',), 'subject': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', '[email protected]'),)), 'version': 3}]) with open(SVN_PYTHON_ORG_ROOT_CERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) def test_load_default_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs(ssl.Purpose.SERVER_AUTH) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH) ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertRaises(TypeError, ctx.load_default_certs, None) self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH') @unittest.skipIf(sys.platform == "win32", "not-Windows specific") def test_load_default_certs_env(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0}) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_load_default_certs_env_windows(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs() stats = ctx.cert_store_stats() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() stats["x509"] += 1 self.assertEqual(ctx.cert_store_stats(), stats) def test_create_default_context(self): ctx = ssl.create_default_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) self.assertEqual( ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0), getattr(ssl, "OP_NO_COMPRESSION", 0), ) with open(SIGNING_CA) as f: cadata = f.read() ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH, cadata=cadata) self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) self.assertEqual( ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0), getattr(ssl, "OP_NO_COMPRESSION", 0), ) ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) self.assertEqual( ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0), getattr(ssl, "OP_NO_COMPRESSION", 0), ) self.assertEqual( ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0), getattr(ssl, "OP_SINGLE_DH_USE", 0), ) self.assertEqual( ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0), getattr(ssl, "OP_SINGLE_ECDH_USE", 0), ) def test__create_stdlib_context(self): ctx = ssl._create_stdlib_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, check_hostname=True) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) def test_check_hostname(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertFalse(ctx.check_hostname) # Requires CERT_REQUIRED or CERT_OPTIONAL with self.assertRaises(ValueError): ctx.check_hostname = True ctx.verify_mode = ssl.CERT_REQUIRED self.assertFalse(ctx.check_hostname) ctx.check_hostname = True self.assertTrue(ctx.check_hostname) ctx.verify_mode = ssl.CERT_OPTIONAL ctx.check_hostname = True self.assertTrue(ctx.check_hostname) # Cannot set CERT_NONE with check_hostname enabled with self.assertRaises(ValueError): ctx.verify_mode = ssl.CERT_NONE ctx.check_hostname = False self.assertFalse(ctx.check_hostname) class SSLErrorTests(unittest.TestCase): def test_str(self): # The str() of a SSLError doesn't include the errno e = ssl.SSLError(1, "foo") self.assertEqual(str(e), "foo") self.assertEqual(e.errno, 1) # Same for a subclass e = ssl.SSLZeroReturnError(1, "foo") self.assertEqual(str(e), "foo") self.assertEqual(e.errno, 1) def test_lib_reason(self): # Test the library and reason attributes ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) self.assertEqual(cm.exception.library, 'PEM') self.assertEqual(cm.exception.reason, 'NO_START_LINE') s = str(cm.exception) self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s) def test_subclass(self): # Check that the appropriate SSLError subclass is raised # (this only tests one of them) ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with socket.socket() as s: s.bind(("127.0.0.1", 0)) s.listen(5) c = socket.socket() c.connect(s.getsockname()) c.setblocking(False) with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c: with self.assertRaises(ssl.SSLWantReadError) as cm: c.do_handshake() s = str(cm.exception) self.assertTrue(s.startswith("The operation did not complete (read)"), s) # For compatibility self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ) class NetworkedTests(unittest.TestCase): def test_connect(self): with support.transient_internet("svn.python.org"): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: s.connect(("svn.python.org", 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() # this should fail because we have no verification certs s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegex(ssl.SSLError, "certificate verify failed", s.connect, ("svn.python.org", 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SVN_PYTHON_ORG_ROOT_CERT) try: s.connect(("svn.python.org", 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation with support.transient_internet("svn.python.org"): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SVN_PYTHON_ORG_ROOT_CERT) try: self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) self.assertTrue(s.getpeercert()) finally: s.close() def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. with support.transient_internet("svn.python.org"): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SVN_PYTHON_ORG_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) rc = s.connect_ex(('svn.python.org', 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish select.select([], [s], [], 5.0) # Non-blocking handshake while True: try: s.do_handshake() break except ssl.SSLWantReadError: select.select([s], [], [], 5.0) except ssl.SSLWantWriteError: select.select([], [s], [], 5.0) # SSL established self.assertTrue(s.getpeercert()) finally: s.close() def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). with support.transient_internet("svn.python.org"): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SVN_PYTHON_ORG_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) rc = s.connect_ex(('svn.python.org', 443)) if rc == 0: self.skipTest("svn.python.org responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): with support.transient_internet("svn.python.org"): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SVN_PYTHON_ORG_ROOT_CERT) try: rc = s.connect_ex(("svn.python.org", 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) finally: s.close() def test_connect_with_context(self): with support.transient_internet("svn.python.org"): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect(("svn.python.org", 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), server_hostname="svn.python.org") s.connect(("svn.python.org", 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegex(ssl.SSLError, "certificate verify failed", s.connect, ("svn.python.org", 443)) s.close() # This should succeed because we specify the root cert ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect(("svn.python.org", 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() def test_connect_capath(self): # Verify server certificates using the `capath` argument # NOTE: the subject hashing algorithm has been changed between # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. with support.transient_internet("svn.python.org"): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect(("svn.python.org", 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() # Same with a bytes `capath` argument ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect(("svn.python.org", 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() def test_connect_cadata(self): with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) with support.transient_internet("svn.python.org"): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(("svn.python.org", 443)) cert = s.getpeercert() self.assertTrue(cert) # same with DER ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s: s.connect(("svn.python.org", 443)) cert = s.getpeercert() self.assertTrue(cert) @unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows") def test_makefile_close(self): # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). with support.transient_internet("svn.python.org"): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) ss.connect(("svn.python.org", 443)) fd = ss.fileno() f = ss.makefile() f.close() # The fd is still open os.read(fd, 0) # Closing the SSL socket should close the fd too ss.close() gc.collect() with self.assertRaises(OSError) as e: os.read(fd, 0) self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): with support.transient_internet("svn.python.org"): s = socket.socket(socket.AF_INET) s.connect(("svn.python.org", 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, do_handshake_on_connect=False) count = 0 while True: try: count += 1 s.do_handshake() break except ssl.SSLWantReadError: select.select([s], [], []) except ssl.SSLWantWriteError: select.select([], [s], []) s.close() if support.verbose: sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count) def test_get_server_certificate(self): def _test_get_server_certificate(host, port, cert=None): with support.transient_internet(host): pem = ssl.get_server_certificate((host, port), ssl.PROTOCOL_SSLv23) if not pem: self.fail("No server certificate on %s:%s!" % (host, port)) try: pem = ssl.get_server_certificate((host, port), ssl.PROTOCOL_SSLv23, ca_certs=CERTFILE) except ssl.SSLError as x: #should fail if support.verbose: sys.stdout.write("%s\n" % x) else: self.fail("Got server certificate %s for %s:%s!" % (pem, host, port)) pem = ssl.get_server_certificate((host, port), ssl.PROTOCOL_SSLv23, ca_certs=cert) if not pem: self.fail("No server certificate on %s:%s!" % (host, port)) if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): remote = ("svn.python.org", 443) with support.transient_internet(remote[0]): with ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s: s.connect(remote) with ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s: s.connect(remote) # Error checking can happen at instantiation or when connecting with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"): with socket.socket(socket.AF_INET) as sock: s = ssl.wrap_socket(sock, cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx") s.connect(remote) def test_algorithms(self): # Issue #8484: all algorithms should be available when verifying a # certificate. # SHA256 was added in OpenSSL 0.9.8 if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15): self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION) # sha256.tbs-internet.com needs SNI to use the correct certificate if not ssl.HAS_SNI: self.skipTest("SNI needed for this test") # https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host) remote = ("sha256.tbs-internet.com", 443) sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem") with support.transient_internet("sha256.tbs-internet.com"): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(sha256_cert) s = ctx.wrap_socket(socket.socket(socket.AF_INET), server_hostname="sha256.tbs-internet.com") try: s.connect(remote) if support.verbose: sys.stdout.write("\nCipher with %r is %r\n" % (remote, s.cipher())) sys.stdout.write("Certificate is:\n%s\n" % pprint.pformat(s.getpeercert())) finally: s.close() def test_get_ca_certs_capath(self): # capath certs are loaded on request with support.transient_internet("svn.python.org"): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect(("svn.python.org", 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() self.assertEqual(len(ctx.get_ca_certs()), 1) @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. with support.transient_internet("svn.python.org"): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with ctx1.wrap_socket(s) as ss: ss.connect(("svn.python.org", 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 self.assertIs(ss.context, ctx2) self.assertIs(ss._sslobj.context, ctx2) try: import threading except ImportError: _have_threads = False else: _have_threads = True from test.ssl_servers import make_https_server class ThreadedEchoServer(threading.Thread): class ConnectionHandler(threading.Thread): """A mildly complicated class, because we want it to work both with and without the SSL wrapper around the socket connection, so that we can test the STARTTLS functionality.""" def __init__(self, server, connsock, addr): self.server = server self.running = False self.sock = connsock self.addr = addr self.sock.setblocking(1) self.sslconn = None threading.Thread.__init__(self) self.daemon = True def wrap_conn(self): try: self.sslconn = self.server.context.wrap_socket( self.sock, server_side=True) self.server.selected_protocols.append(self.sslconn.selected_npn_protocol()) except (ssl.SSLError, ConnectionResetError) as e: # We treat ConnectionResetError as though it were an # SSLError - OpenSSL on Ubuntu abruptly closes the # connection when asked to use an unsupported protocol. # # XXX Various errors can have happened here, for example # a mismatching protocol version, an invalid certificate, # or a low-level bug. This should be made more discriminating. self.server.conn_errors.append(e) if self.server.chatty: handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n") self.running = False self.server.stop() self.close() return False else: if self.server.context.verify_mode == ssl.CERT_REQUIRED: cert = self.sslconn.getpeercert() if support.verbose and self.server.chatty: sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n") cert_binary = self.sslconn.getpeercert(True) if support.verbose and self.server.chatty: sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n") cipher = self.sslconn.cipher() if support.verbose and self.server.chatty: sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n") sys.stdout.write(" server: selected protocol is now " + str(self.sslconn.selected_npn_protocol()) + "\n") return True def read(self): if self.sslconn: return self.sslconn.read() else: return self.sock.recv(1024) def write(self, bytes): if self.sslconn: return self.sslconn.write(bytes) else: return self.sock.send(bytes) def close(self): if self.sslconn: self.sslconn.close() else: self.sock.close() def run(self): self.running = True if not self.server.starttls_server: if not self.wrap_conn(): return while self.running: try: msg = self.read() stripped = msg.strip() if not stripped: # eof, so quit this handler self.running = False self.close() elif stripped == b'over': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: client closed connection\n") self.close() return elif (self.server.starttls_server and stripped == b'STARTTLS'): if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read STARTTLS from client, sending OK...\n") self.write(b"OK\n") if not self.wrap_conn(): return elif (self.server.starttls_server and self.sslconn and stripped == b'ENDTLS'): if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read ENDTLS from client, sending OK...\n") self.write(b"OK\n") self.sock = self.sslconn.unwrap() self.sslconn = None if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: connection is now unencrypted...\n") elif stripped == b'CB tls-unique': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n") data = self.sslconn.get_channel_binding("tls-unique") self.write(repr(data).encode("us-ascii") + b"\n") else: if (support.verbose and self.server.connectionchatty): ctype = (self.sslconn and "encrypted") or "unencrypted" sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n" % (msg, ctype, msg.lower(), ctype)) self.write(msg.lower()) except OSError: if self.server.chatty: handle_error("Test server failure:\n") self.close() self.running = False # normally, we'd just stop here, but for the test # harness, we want to stop the server self.server.stop() def __init__(self, certificate=None, ssl_version=None, certreqs=None, cacerts=None, chatty=True, connectionchatty=False, starttls_server=False, npn_protocols=None, ciphers=None, context=None): if context: self.context = context else: self.context = ssl.SSLContext(ssl_version if ssl_version is not None else ssl.PROTOCOL_TLSv1) self.context.verify_mode = (certreqs if certreqs is not None else ssl.CERT_NONE) if cacerts: self.context.load_verify_locations(cacerts) if certificate: self.context.load_cert_chain(certificate) if npn_protocols: self.context.set_npn_protocols(npn_protocols) if ciphers: self.context.set_ciphers(ciphers) self.chatty = chatty self.connectionchatty = connectionchatty self.starttls_server = starttls_server self.sock = socket.socket() self.port = support.bind_port(self.sock) self.flag = None self.active = False self.selected_protocols = [] self.conn_errors = [] threading.Thread.__init__(self) self.daemon = True def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): self.stop() self.join() def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.sock.settimeout(0.05) self.sock.listen(5) self.active = True if self.flag: # signal an event self.flag.set() while self.active: try: newconn, connaddr = self.sock.accept() if support.verbose and self.chatty: sys.stdout.write(' server: new connection from ' + repr(connaddr) + '\n') handler = self.ConnectionHandler(self, newconn, connaddr) handler.start() handler.join() except socket.timeout: pass except KeyboardInterrupt: self.stop() self.sock.close() def stop(self): self.active = False class AsyncoreEchoServer(threading.Thread): # this one's based on asyncore.dispatcher class EchoServer (asyncore.dispatcher): class ConnectionHandler (asyncore.dispatcher_with_send): def __init__(self, conn, certfile): self.socket = ssl.wrap_socket(conn, server_side=True, certfile=certfile, do_handshake_on_connect=False) asyncore.dispatcher_with_send.__init__(self, self.socket) self._ssl_accepting = True self._do_ssl_handshake() def readable(self): if isinstance(self.socket, ssl.SSLSocket): while self.socket.pending() > 0: self.handle_read_event() return True def _do_ssl_handshake(self): try: self.socket.do_handshake() except (ssl.SSLWantReadError, ssl.SSLWantWriteError): return except ssl.SSLEOFError: return self.handle_close() except ssl.SSLError: raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def handle_read(self): if self._ssl_accepting: self._do_ssl_handshake() else: data = self.recv(1024) if support.verbose: sys.stdout.write(" server: read %s from client\n" % repr(data)) if not data: self.close() else: self.send(data.lower()) def handle_close(self): self.close() if support.verbose: sys.stdout.write(" server: closed connection %s\n" % self.socket) def handle_error(self): raise def __init__(self, certfile): self.certfile = certfile sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = support.bind_port(sock, '') asyncore.dispatcher.__init__(self, sock) self.listen(5) def handle_accepted(self, sock_obj, addr): if support.verbose: sys.stdout.write(" server: new connection from %s:%s\n" %addr) self.ConnectionHandler(sock_obj, self.certfile) def handle_error(self): raise def __init__(self, certfile): self.flag = None self.active = False self.server = self.EchoServer(certfile) self.port = self.server.port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): if support.verbose: sys.stdout.write(" cleanup: stopping server.\n") self.stop() if support.verbose: sys.stdout.write(" cleanup: joining server thread.\n") self.join() if support.verbose: sys.stdout.write(" cleanup: successfully joined.\n") def start (self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.active = True if self.flag: self.flag.set() while self.active: try: asyncore.loop(1) except: pass def stop(self): self.active = False self.server.close() def bad_cert_test(certfile): """ Launch a server with CERT_REQUIRED, and check that trying to connect to it with the given client certificate fails. """ server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_REQUIRED, cacerts=CERTFILE, chatty=False, connectionchatty=False) with server: try: with socket.socket() as sock: s = ssl.wrap_socket(sock, certfile=certfile, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) except ssl.SSLError as x: if support.verbose: sys.stdout.write("\nSSLError is %s\n" % x.args[1]) except OSError as x: if support.verbose: sys.stdout.write("\nOSError is %s\n" % x.args[1]) except OSError as x: if x.errno != errno.ENOENT: raise if support.verbose: sys.stdout.write("\OSError is %s\n" % str(x)) else: raise AssertionError("Use of invalid cert should have failed!") def server_params_test(client_context, server_context, indata=b"FOO\n", chatty=True, connectionchatty=False, sni_name=None): """ Launch a server, connect a client to it and try various reads and writes. """ stats = {} server = ThreadedEchoServer(context=server_context, chatty=chatty, connectionchatty=False) with server: with client_context.wrap_socket(socket.socket(), server_hostname=sni_name) as s: s.connect((HOST, server.port)) for arg in [indata, bytearray(indata), memoryview(indata)]: if connectionchatty: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(arg) outdata = s.read() if connectionchatty: if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): raise AssertionError( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if connectionchatty: if support.verbose: sys.stdout.write(" client: closing connection.\n") stats.update({ 'compression': s.compression(), 'cipher': s.cipher(), 'peercert': s.getpeercert(), 'client_npn_protocol': s.selected_npn_protocol() }) s.close() stats['server_npn_protocols'] = server.selected_protocols return stats def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None, server_options=0, client_options=0): if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { ssl.CERT_NONE: "CERT_NONE", ssl.CERT_OPTIONAL: "CERT_OPTIONAL", ssl.CERT_REQUIRED: "CERT_REQUIRED", }[certsreqs] if support.verbose: formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n" sys.stdout.write(formatstr % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol), certtype)) client_context = ssl.SSLContext(client_protocol) client_context.options |= client_options server_context = ssl.SSLContext(server_protocol) server_context.options |= server_options # NOTE: we must enable "ALL" ciphers on the client, otherwise an # SSLv23 client will send an SSLv3 hello (rather than SSLv2) # starting from OpenSSL 1.0.0 (see issue #8322). if client_context.protocol == ssl.PROTOCOL_SSLv23: client_context.set_ciphers("ALL") for ctx in (client_context, server_context): ctx.verify_mode = certsreqs ctx.load_cert_chain(CERTFILE) ctx.load_verify_locations(CERTFILE) try: server_params_test(client_context, server_context, chatty=False, connectionchatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: if expect_success: raise except OSError as e: if expect_success or e.errno != errno.ECONNRESET: raise else: if not expect_success: raise AssertionError( "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) class ThreadedTests(unittest.TestCase): @skip_if_broken_ubuntu_ssl def test_echo(self): """Basic test of an SSL client connecting to a server""" if support.verbose: sys.stdout.write("\n") for protocol in PROTOCOLS: with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]): context = ssl.SSLContext(protocol) context.load_cert_chain(CERTFILE) server_params_test(context, context, chatty=True, connectionchatty=True) def test_getpeercert(self): if support.verbose: sys.stdout.write("\n") context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(CERTFILE) context.load_cert_chain(CERTFILE) server = ThreadedEchoServer(context=context, chatty=False) with server: s = context.wrap_socket(socket.socket(), do_handshake_on_connect=False) s.connect((HOST, server.port)) # getpeercert() raise ValueError while the handshake isn't # done. with self.assertRaises(ValueError): s.getpeercert() s.do_handshake() cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") cipher = s.cipher() if support.verbose: sys.stdout.write(pprint.pformat(cert) + '\n') sys.stdout.write("Connection cipher is " + str(cipher) + '.\n') if 'subject' not in cert: self.fail("No subject field in certificate: %s." % pprint.pformat(cert)) if ((('organizationName', 'Python Software Foundation'),) not in cert['subject']): self.fail( "Missing or invalid 'organizationName' field in certificate subject; " "should be 'Python Software Foundation'.") self.assertIn('notBefore', cert) self.assertIn('notAfter', cert) before = ssl.cert_time_to_seconds(cert['notBefore']) after = ssl.cert_time_to_seconds(cert['notAfter']) self.assertLess(before, after) s.close() @unittest.skipUnless(have_verify_flags(), "verify_flags need OpenSSL > 0.9.8") def test_crl_check(self): if support.verbose: sys.stdout.write("\n") server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(SIGNED_CERTFILE) context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(SIGNING_CA) self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT) # VERIFY_DEFAULT should pass server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket()) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") # VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket()) as s: with self.assertRaisesRegex(ssl.SSLError, "certificate verify failed"): s.connect((HOST, server.port)) # now load a CRL file. The CRL file is signed by the CA. context.load_verify_locations(CRLFILE) server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket()) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") def test_check_hostname(self): if support.verbose: sys.stdout.write("\n") server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(SIGNED_CERTFILE) context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True context.load_verify_locations(SIGNING_CA) # correct hostname should verify server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket(), server_hostname="localhost") as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") # incorrect hostname should raise an exception server = ThreadedEchoServer(context=server_context, chatty=True) with server: with context.wrap_socket(socket.socket(), server_hostname="invalid") as s: with self.assertRaisesRegex(ssl.CertificateError, "hostname 'invalid' doesn't match 'localhost'"): s.connect((HOST, server.port)) # missing server_hostname arg should cause an exception, too server = ThreadedEchoServer(context=server_context, chatty=True) with server: with socket.socket() as s: with self.assertRaisesRegex(ValueError, "check_hostname requires server_hostname"): context.wrap_socket(s) def test_empty_cert(self): """Connecting with an empty cert file""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "nullcert.pem")) def test_malformed_cert(self): """Connecting with a badly formatted certificate (syntax error)""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "badcert.pem")) def test_nonexisting_cert(self): """Connecting with a non-existing cert file""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "wrongcert.pem")) def test_malformed_key(self): """Connecting with a badly formatted key (syntax error)""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "badkey.pem")) def test_rude_shutdown(self): """A brutal shutdown of an SSL server should raise an OSError in the client when attempting handshake. """ listener_ready = threading.Event() listener_gone = threading.Event() s = socket.socket() port = support.bind_port(s, HOST) # `listener` runs in a thread. It sits in an accept() until # the main thread connects. Then it rudely closes the socket, # and sets Event `listener_gone` to let the main thread know # the socket is gone. def listener(): s.listen(5) listener_ready.set() newsock, addr = s.accept() newsock.close() s.close() listener_gone.set() def connector(): listener_ready.wait() with socket.socket() as c: c.connect((HOST, port)) listener_gone.wait() try: ssl_sock = ssl.wrap_socket(c) except OSError: pass else: self.fail('connecting to closed SSL socket should have failed') t = threading.Thread(target=listener) t.start() try: connector() finally: t.join() @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'), "OpenSSL is compiled without SSLv2 support") def test_protocol_sslv2(self): """Connecting to an SSLv2 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) # SSLv23 client with specific SSL options if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_SSLv2) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1) @skip_if_broken_ubuntu_ssl def test_protocol_sslv23(self): """Connecting to an SSLv23 server with various client options""" if support.verbose: sys.stdout.write("\n") if hasattr(ssl, 'PROTOCOL_SSLv2'): try: try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True) except OSError as x: # this fails on some older versions of OpenSSL (0.9.7l, for instance) if support.verbose: sys.stdout.write( " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) # Server with specific SSL options if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, server_options=ssl.OP_NO_SSLv3) # Will choose TLSv1 try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False, server_options=ssl.OP_NO_TLSv1) @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'), "OpenSSL is compiled without SSLv3 support") def test_protocol_sslv3(self): """Connecting to an SSLv3 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True, client_options=ssl.OP_NO_SSLv2) @skip_if_broken_ubuntu_ssl def test_protocol_tlsv1(self): """Connecting to a TLSv1 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1) @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"), "TLS version 1.1 not supported.") def test_protocol_tlsv1_1(self): """Connecting to a TLSv1.1 server with various client options. Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_1) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False) @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"), "TLS version 1.2 not supported.") def test_protocol_tlsv1_2(self): """Connecting to a TLSv1.2 server with various client options. Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True, server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2, client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_2) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False) def test_starttls(self): """Switching from clear text to encrypted and back again.""" msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6") server = ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_TLSv1, starttls_server=True, chatty=True, connectionchatty=True) wrapped = False with server: s = socket.socket() s.setblocking(1) s.connect((HOST, server.port)) if support.verbose: sys.stdout.write("\n") for indata in msgs: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) if wrapped: conn.write(indata) outdata = conn.read() else: s.send(indata) outdata = s.recv(1024) msg = outdata.strip().lower() if indata == b"STARTTLS" and msg.startswith(b"ok"): # STARTTLS ok, switch to secure mode if support.verbose: sys.stdout.write( " client: read %r from server, starting TLS...\n" % msg) conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1) wrapped = True elif indata == b"ENDTLS" and msg.startswith(b"ok"): # ENDTLS ok, switch back to clear text if support.verbose: sys.stdout.write( " client: read %r from server, ending TLS...\n" % msg) s = conn.unwrap() wrapped = False else: if support.verbose: sys.stdout.write( " client: read %r from server\n" % msg) if support.verbose: sys.stdout.write(" client: closing connection.\n") if wrapped: conn.write(b"over\n") else: s.send(b"over\n") if wrapped: conn.close() else: s.close() def test_socketserver(self): """Using a SocketServer to create and manage SSL connections.""" server = make_https_server(self, certfile=CERTFILE) # try to connect if support.verbose: sys.stdout.write('\n') with open(CERTFILE, 'rb') as f: d1 = f.read() d2 = '' # now fetch the same data from the HTTPS server url = 'https://localhost:%d/%s' % ( server.port, os.path.split(CERTFILE)[1]) context = ssl.create_default_context(cafile=CERTFILE) f = urllib.request.urlopen(url, context=context) try: dlen = f.info().get("content-length") if dlen and (int(dlen) > 0): d2 = f.read(int(dlen)) if support.verbose: sys.stdout.write( " client: read %d bytes from remote server '%s'\n" % (len(d2), server)) finally: f.close() self.assertEqual(d1, d2) def test_asyncore_server(self): """Check the example asyncore integration.""" indata = "TEST MESSAGE of mixed case\n" if support.verbose: sys.stdout.write("\n") indata = b"FOO\n" server = AsyncoreEchoServer(CERTFILE) with server: s = ssl.wrap_socket(socket.socket()) s.connect(('127.0.0.1', server.port)) if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(indata) outdata = s.read() if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): self.fail( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if support.verbose: sys.stdout.write(" client: closing connection.\n") s.close() if support.verbose: sys.stdout.write(" client: connection closed.\n") def test_recv_send(self): """Test recv(), send() and friends.""" if support.verbose: sys.stdout.write("\n") server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1, cacerts=CERTFILE, chatty=True, connectionchatty=False) with server: s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) # helper methods for standardising recv* method signatures def _recv_into(): b = bytearray(b"\0"*100) count = s.recv_into(b) return b[:count] def _recvfrom_into(): b = bytearray(b"\0"*100) count, addr = s.recvfrom_into(b) return b[:count] # (name, method, whether to expect success, *args) send_methods = [ ('send', s.send, True, []), ('sendto', s.sendto, False, ["some.address"]), ('sendall', s.sendall, True, []), ] recv_methods = [ ('recv', s.recv, True, []), ('recvfrom', s.recvfrom, False, ["some.address"]), ('recv_into', _recv_into, True, []), ('recvfrom_into', _recvfrom_into, False, []), ] data_prefix = "PREFIX_" for meth_name, send_meth, expect_success, args in send_methods: indata = (data_prefix + meth_name).encode('ascii') try: send_meth(indata, *args) outdata = s.read() if outdata != indata.lower(): self.fail( "While sending with <<{name:s}>> bad data " "<<{outdata:r}>> ({nout:d}) received; " "expected <<{indata:r}>> ({nin:d})\n".format( name=meth_name, outdata=outdata[:20], nout=len(outdata), indata=indata[:20], nin=len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to send with method <<{name:s}>>; " "expected to succeed.\n".format(name=meth_name) ) if not str(e).startswith(meth_name): self.fail( "Method <<{name:s}>> failed with unexpected " "exception message: {exp:s}\n".format( name=meth_name, exp=e ) ) for meth_name, recv_meth, expect_success, args in recv_methods: indata = (data_prefix + meth_name).encode('ascii') try: s.send(indata) outdata = recv_meth(*args) if outdata != indata.lower(): self.fail( "While receiving with <<{name:s}>> bad data " "<<{outdata:r}>> ({nout:d}) received; " "expected <<{indata:r}>> ({nin:d})\n".format( name=meth_name, outdata=outdata[:20], nout=len(outdata), indata=indata[:20], nin=len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to receive with method <<{name:s}>>; " "expected to succeed.\n".format(name=meth_name) ) if not str(e).startswith(meth_name): self.fail( "Method <<{name:s}>> failed with unexpected " "exception message: {exp:s}\n".format( name=meth_name, exp=e ) ) # consume data s.read() # Make sure sendmsg et al are disallowed to avoid # inadvertent disclosure of data and/or corruption # of the encrypted data stream self.assertRaises(NotImplementedError, s.sendmsg, [b"data"]) self.assertRaises(NotImplementedError, s.recvmsg, 100) self.assertRaises(NotImplementedError, s.recvmsg_into, bytearray(100)) s.write(b"over\n") s.close() def test_handshake_timeout(self): # Issue #5103: SSL handshake must respect the socket timeout server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = support.bind_port(server) started = threading.Event() finish = False def serve(): server.listen(5) started.set() conns = [] while not finish: r, w, e = select.select([server], [], [], 0.1) if server in r: # Let the socket hang around rather than having # it closed by garbage collection. conns.append(server.accept()[0]) for sock in conns: sock.close() t = threading.Thread(target=serve) t.start() started.wait() try: try: c = socket.socket(socket.AF_INET) c.settimeout(0.2) c.connect((host, port)) # Will attempt handshake and time out self.assertRaisesRegex(socket.timeout, "timed out", ssl.wrap_socket, c) finally: c.close() try: c = socket.socket(socket.AF_INET) c = ssl.wrap_socket(c) c.settimeout(0.2) # Will attempt handshake and time out self.assertRaisesRegex(socket.timeout, "timed out", c.connect, (host, port)) finally: c.close() finally: finish = True t.join() server.close() def test_server_accept(self): # Issue #16357: accept() on a SSLSocket created through # SSLContext.wrap_socket(). context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(CERTFILE) context.load_cert_chain(CERTFILE) server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = support.bind_port(server) server = context.wrap_socket(server, server_side=True) evt = threading.Event() remote = None peer = None def serve(): nonlocal remote, peer server.listen(5) # Block on the accept and wait on the connection to close. evt.set() remote, peer = server.accept() remote.recv(1) t = threading.Thread(target=serve) t.start() # Client wait until server setup and perform a connect. evt.wait() client = context.wrap_socket(socket.socket()) client.connect((host, port)) client_addr = client.getsockname() client.close() t.join() remote.close() server.close() # Sanity checks. self.assertIsInstance(remote, ssl.SSLSocket) self.assertEqual(peer, client_addr) def test_getpeercert_enotconn(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with context.wrap_socket(socket.socket()) as sock: with self.assertRaises(OSError) as cm: sock.getpeercert() self.assertEqual(cm.exception.errno, errno.ENOTCONN) def test_do_handshake_enotconn(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with context.wrap_socket(socket.socket()) as sock: with self.assertRaises(OSError) as cm: sock.do_handshake() self.assertEqual(cm.exception.errno, errno.ENOTCONN) def test_default_ciphers(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) try: # Force a set of weak ciphers on our client context context.set_ciphers("DES") except ssl.SSLError: self.skipTest("no DES cipher available") with ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_SSLv23, chatty=False) as server: with context.wrap_socket(socket.socket()) as s: with self.assertRaises(OSError): s.connect((HOST, server.port)) self.assertIn("no shared cipher", str(server.conn_errors[0])) @unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL") def test_default_ecdh_curve(self): # Issue #21015: elliptic curve-based Diffie Hellman key exchange # should be enabled by default on SSL contexts. context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.load_cert_chain(CERTFILE) # Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled # explicitly using the 'ECCdraft' cipher alias. Otherwise, # our default cipher list should prefer ECDH-based ciphers # automatically. if ssl.OPENSSL_VERSION_INFO < (1, 0, 0): context.set_ciphers("ECCdraft:ECDH") with ThreadedEchoServer(context=context) as server: with context.wrap_socket(socket.socket()) as s: s.connect((HOST, server.port)) self.assertIn("ECDH", s.cipher()[0]) @unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES, "'tls-unique' channel binding not available") def test_tls_unique_channel_binding(self): """Test tls-unique channel binding.""" if support.verbose: sys.stdout.write("\n") server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1, cacerts=CERTFILE, chatty=True, connectionchatty=False) with server: s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) # get the data cb_data = s.get_channel_binding("tls-unique") if support.verbose: sys.stdout.write(" got channel binding data: {0!r}\n" .format(cb_data)) # check if it is sane self.assertIsNotNone(cb_data) self.assertEqual(len(cb_data), 12) # True for TLSv1 # and compare with the peers version s.write(b"CB tls-unique\n") peer_data_repr = s.read().strip() self.assertEqual(peer_data_repr, repr(cb_data).encode("us-ascii")) s.close() # now, again s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) new_cb_data = s.get_channel_binding("tls-unique") if support.verbose: sys.stdout.write(" got another channel binding data: {0!r}\n" .format(new_cb_data)) # is it really unique self.assertNotEqual(cb_data, new_cb_data) self.assertIsNotNone(cb_data) self.assertEqual(len(cb_data), 12) # True for TLSv1 s.write(b"CB tls-unique\n") peer_data_repr = s.read().strip() self.assertEqual(peer_data_repr, repr(new_cb_data).encode("us-ascii")) s.close() def test_compression(self): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) stats = server_params_test(context, context, chatty=True, connectionchatty=True) if support.verbose: sys.stdout.write(" got compression: {!r}\n".format(stats['compression'])) self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' }) @unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'), "ssl.OP_NO_COMPRESSION needed for this test") def test_compression_disabled(self): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) context.options |= ssl.OP_NO_COMPRESSION stats = server_params_test(context, context, chatty=True, connectionchatty=True) self.assertIs(stats['compression'], None) def test_dh_params(self): # Check we can get a connection with ephemeral Diffie-Hellman context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) context.load_dh_params(DHFILE) context.set_ciphers("kEDH") stats = server_params_test(context, context, chatty=True, connectionchatty=True) cipher = stats["cipher"][0] parts = cipher.split("-") if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts: self.fail("Non-DH cipher: " + cipher[0]) def test_selected_npn_protocol(self): # selected_npn_protocol() is None unless NPN is used context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) stats = server_params_test(context, context, chatty=True, connectionchatty=True) self.assertIs(stats['client_npn_protocol'], None) @unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test") def test_npn_protocols(self): server_protocols = ['http/1.1', 'spdy/2'] protocol_tests = [ (['http/1.1', 'spdy/2'], 'http/1.1'), (['spdy/2', 'http/1.1'], 'http/1.1'), (['spdy/2', 'test'], 'spdy/2'), (['abc', 'def'], 'abc') ] for client_protocols, expected in protocol_tests: server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(CERTFILE) server_context.set_npn_protocols(server_protocols) client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) client_context.load_cert_chain(CERTFILE) client_context.set_npn_protocols(client_protocols) stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True) msg = "failed trying %s (s) and %s (c).\n" \ "was expecting %s, but got %%s from the %%s" \ % (str(server_protocols), str(client_protocols), str(expected)) client_result = stats['client_npn_protocol'] self.assertEqual(client_result, expected, msg % (client_result, "client")) server_result = stats['server_npn_protocols'][-1] \ if len(stats['server_npn_protocols']) else 'nothing' self.assertEqual(server_result, expected, msg % (server_result, "server")) def sni_contexts(self): server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(SIGNED_CERTFILE) other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) other_context.load_cert_chain(SIGNED_CERTFILE2) client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) client_context.verify_mode = ssl.CERT_REQUIRED client_context.load_verify_locations(SIGNING_CA) return server_context, other_context, client_context def check_common_name(self, stats, name): cert = stats['peercert'] self.assertIn((('commonName', name),), cert['subject']) @needs_sni def test_sni_callback(self): calls = [] server_context, other_context, client_context = self.sni_contexts() def servername_cb(ssl_sock, server_name, initial_context): calls.append((server_name, initial_context)) if server_name is not None: ssl_sock.context = other_context server_context.set_servername_callback(servername_cb) stats = server_params_test(client_context, server_context, chatty=True, sni_name='supermessage') # The hostname was fetched properly, and the certificate was # changed for the connection. self.assertEqual(calls, [("supermessage", server_context)]) # CERTFILE4 was selected self.check_common_name(stats, 'fakehostname') calls = [] # The callback is called with server_name=None stats = server_params_test(client_context, server_context, chatty=True, sni_name=None) self.assertEqual(calls, [(None, server_context)]) self.check_common_name(stats, 'localhost') # Check disabling the callback calls = [] server_context.set_servername_callback(None) stats = server_params_test(client_context, server_context, chatty=True, sni_name='notfunny') # Certificate didn't change self.check_common_name(stats, 'localhost') self.assertEqual(calls, []) @needs_sni def test_sni_callback_alert(self): # Returning a TLS alert is reflected to the connecting client server_context, other_context, client_context = self.sni_contexts() def cb_returning_alert(ssl_sock, server_name, initial_context): return ssl.ALERT_DESCRIPTION_ACCESS_DENIED server_context.set_servername_callback(cb_returning_alert) with self.assertRaises(ssl.SSLError) as cm: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED') @needs_sni def test_sni_callback_raising(self): # Raising fails the connection with a TLS handshake failure alert. server_context, other_context, client_context = self.sni_contexts() def cb_raising(ssl_sock, server_name, initial_context): 1/0 server_context.set_servername_callback(cb_raising) with self.assertRaises(ssl.SSLError) as cm, \ support.captured_stderr() as stderr: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE') self.assertIn("ZeroDivisionError", stderr.getvalue()) @needs_sni def test_sni_callback_wrong_return_type(self): # Returning the wrong return type terminates the TLS connection # with an internal error alert. server_context, other_context, client_context = self.sni_contexts() def cb_wrong_return_type(ssl_sock, server_name, initial_context): return "foo" server_context.set_servername_callback(cb_wrong_return_type) with self.assertRaises(ssl.SSLError) as cm, \ support.captured_stderr() as stderr: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR') self.assertIn("TypeError", stderr.getvalue()) def test_read_write_after_close_raises_valuerror(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(CERTFILE) context.load_cert_chain(CERTFILE) server = ThreadedEchoServer(context=context, chatty=False) with server: s = context.wrap_socket(socket.socket()) s.connect((HOST, server.port)) s.close() self.assertRaises(ValueError, s.read, 1024) self.assertRaises(ValueError, s.write, b'hello') def test_main(verbose=False): if support.verbose: plats = { 'Linux': platform.linux_distribution, 'Mac': platform.mac_ver, 'Windows': platform.win32_ver, } for name, func in plats.items(): plat = func() if plat and plat[0]: plat = '%s %r' % (name, plat) break else: plat = repr(platform.platform()) print("test_ssl: testing with %r %r" % (ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO)) print(" under %s" % plat) print(" HAS_SNI = %r" % ssl.HAS_SNI) print(" OP_ALL = 0x%8x" % ssl.OP_ALL) try: print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1) except AttributeError: pass for filename in [ CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: if not os.path.exists(filename): raise support.TestFailed("Can't read certificate file %r" % filename) tests = [ContextTests, BasicSocketTests, SSLErrorTests] if support.is_resource_enabled('network'): tests.append(NetworkedTests) if _have_threads: thread_info = support.threading_setup() if thread_info: tests.append(ThreadedTests) try: support.run_unittest(*tests) finally: if _have_threads: support.threading_cleanup(*thread_info) if __name__ == "__main__": test_main()
{ "content_hash": "54d0e1e5e9b1e365a6cf638b84c47551", "timestamp": "", "source": "github", "line_count": 2937, "max_line_length": 117, "avg_line_length": 44.84984678243105, "alnum_prop": 0.5332285688257266, "repo_name": "samuelhavron/heroku-buildpack-python", "id": "dc82475a9d02a2ebf6c3779c2f9598f12a0a7d92", "size": "131770", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "Python-3.4.3/Lib/test/test_ssl.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "594205" }, { "name": "Batchfile", "bytes": "18943" }, { "name": "C", "bytes": "16647302" }, { "name": "C++", "bytes": "176362" }, { "name": "CSS", "bytes": "2839" }, { "name": "Common Lisp", "bytes": "24481" }, { "name": "DIGITAL Command Language", "bytes": "26402" }, { "name": "Groff", "bytes": "255056" }, { "name": "HTML", "bytes": "130855" }, { "name": "JavaScript", "bytes": "10598" }, { "name": "M4", "bytes": "214312" }, { "name": "Makefile", "bytes": "196708" }, { "name": "Objective-C", "bytes": "33060" }, { "name": "PLSQL", "bytes": "22886" }, { "name": "PostScript", "bytes": "13803" }, { "name": "PowerShell", "bytes": "1420" }, { "name": "Prolog", "bytes": "557" }, { "name": "Python", "bytes": "24212132" }, { "name": "R", "bytes": "5378" }, { "name": "Shell", "bytes": "488285" }, { "name": "TeX", "bytes": "323102" }, { "name": "Visual Basic", "bytes": "481" } ], "symlink_target": "" }
from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function import re from subprocess import PIPE from system_test import TestCase, Qdrouterd, TIMEOUT, main_module from system_test import unittest from proton.handlers import MessagingHandler from proton.reactor import Container from proton import Message class DefaultDistributionTest(TestCase): """System tests for testing the defaultDistribution attribute of the router entity""" @classmethod def setUpClass(cls): super(DefaultDistributionTest, cls).setUpClass() name = "test-router" config = Qdrouterd.Config([ ('router', {'mode': 'standalone', 'id': 'QDR', "defaultDistribution": 'unavailable'}), ('listener', {'port': cls.tester.get_port()}), ('address', {'prefix': 'closest', 'distribution': 'closest'}), ('address', {'prefix': 'spread', 'distribution': 'balanced'}), ('address', {'prefix': 'multicast', 'distribution': 'multicast'}) ]) cls.router = cls.tester.qdrouterd(name, config) cls.router.wait_ready() cls.address = cls.router.addresses[0] def run_qdstat(self, args, regexp=None, address=None): p = self.popen( ['qdstat', '--bus', str(address or self.address), '--timeout', str(TIMEOUT) ] + args, name='qdstat-'+self.id(), stdout=PIPE, expect=None, universal_newlines=True) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) if regexp: assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out) return out def test_create_unavailable_sender(self): test = UnavailableSender(self.address) test.run() self.assertTrue(test.passed) def test_create_unavailable_receiver(self): test = UnavailableReceiver(self.address) test.run() self.assertTrue(test.passed) def test_anonymous_sender(self): test = UnavailableAnonymousSender(self.address) test.run() self.assertTrue(test.received_error) def test_general(self): out = self.run_qdstat(['--general'], r'(?s)Router Statistics.*Mode\s*Standalone') self.assertTrue("Connections 1" in out) self.assertTrue("Nodes 0" in out) self.assertTrue("Auto Links 0" in out) self.assertTrue("Link Routes 0" in out) self.assertTrue("Router Id QDR" in out) self.assertTrue("Mode standalone" in out) class Timeout(object): def __init__(self, parent): self.parent = parent def on_timer_task(self, event): self.parent.timeout() class UnavailableBase(MessagingHandler): def __init__(self, address): super(UnavailableBase, self).__init__() self.address = address self.dest = "UnavailableBase" self.conn = None self.sender = None self.receiver = None self.link_error = False self.link_closed = False self.passed = False self.timer = None self.link_name = "base_link" def check_if_done(self): if self.link_error and self.link_closed: self.passed = True self.conn.close() self.timer.cancel() def on_link_error(self, event): link = event.link if event.link.name == self.link_name and link.remote_condition.description \ == "Node not found": self.link_error = True self.check_if_done() def on_link_remote_close(self, event): if event.link.name == self.link_name: self.link_closed = True self.check_if_done() def run(self): Container(self).run() class UnavailableSender(UnavailableBase): def __init__(self, address): super(UnavailableSender, self).__init__(address) def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, Timeout(self)) self.conn = event.container.connect(self.address) # Creating a sender to an address with unavailable distribution # The router will not allow this link to be established. It will close the link with an error of # "Node not found" self.sender = event.container.create_sender(self.conn, self.dest, name=self.link_name) class UnavailableReceiver(UnavailableBase): def __init__(self, address): super(UnavailableReceiver, self).__init__(address) def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, Timeout(self)) self.conn = event.container.connect(self.address) # Creating a receiver to an address with unavailable distribution # The router will not allow this link to be established. It will close the link with an error of # "Node not found" self.receiver = event.container.create_receiver(self.conn, self.dest, name=self.link_name) class UnavailableAnonymousSender(MessagingHandler): def __init__(self, address): super(UnavailableAnonymousSender, self).__init__() self.address = address self.dest = "UnavailableBase" self.conn = None self.sender = None self.receiver = None self.received_error = False self.timer = None self.link_name = "anon_link" self.error_description = "Deliveries cannot be sent to an unavailable address" self.error_name = u'amqp:not-found' self.num_sent = 0 def on_start(self, event): self.timer = event.reactor.schedule(TIMEOUT, Timeout(self)) self.conn = event.container.connect(self.address) # Creating an anonymous sender self.sender = event.container.create_sender(self.conn, name=self.link_name) def on_sendable(self, event): if self.num_sent < 1: msg = Message(id=1, body='Hello World') # this is a unavailable address msg.address = "SomeUnavailableAddress" event.sender.send(msg) self.num_sent += 1 def on_rejected(self, event): if event.link.name == self.link_name and event.delivery.remote.condition.name == self.error_name \ and self.error_description == event.delivery.remote.condition.description: self.received_error = True self.conn.close() self.timer.cancel() def run(self): Container(self).run() if __name__ == '__main__': unittest.main(main_module())
{ "content_hash": "5731ad6677bf3590f9a80fd51edc2f69", "timestamp": "", "source": "github", "line_count": 177, "max_line_length": 106, "avg_line_length": 38.0225988700565, "alnum_prop": 0.6141158989598812, "repo_name": "irinabov/debian-qpid-dispatch", "id": "de81e2f1b2e4fa0dc9166ffbe38bafd43bd802d5", "size": "7520", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/system_tests_default_distribution.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1954231" }, { "name": "C++", "bytes": "58231" }, { "name": "CMake", "bytes": "42570" }, { "name": "CSS", "bytes": "24393" }, { "name": "Dockerfile", "bytes": "3278" }, { "name": "HTML", "bytes": "2320" }, { "name": "JavaScript", "bytes": "719793" }, { "name": "Python", "bytes": "2115168" }, { "name": "Shell", "bytes": "34107" } ], "symlink_target": "" }
import logging import sys import numpy as np import pytest import rasterio from rasterio.enums import MaskFlags from rasterio.warnings import NodataShadowWarning logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) @pytest.fixture(scope='function') def tiffs(tmpdir): with rasterio.open('tests/data/RGB.byte.tif') as src: profile = src.profile shadowed_profile = profile.copy() shadowed_profile['count'] = 4 with rasterio.open( str(tmpdir.join('shadowed.tif')), 'w', **shadowed_profile) as dst: for i, band in enumerate(src.read(masked=False), 1): dst.write(band, i) dst.write(band, 4) del profile['nodata'] with rasterio.open( str(tmpdir.join('no-nodata.tif')), 'w', **profile) as dst: dst.write(src.read(masked=False)) with rasterio.open( str(tmpdir.join('sidecar-masked.tif')), 'w', **profile) as dst: dst.write(src.read(masked=False)) mask = np.zeros(src.shape, dtype='uint8') dst.write_mask(mask) return tmpdir def test_mask_flags(): with rasterio.open('tests/data/RGB.byte.tif') as src: for flags in src.mask_flags: assert flags & MaskFlags.nodata assert not flags & MaskFlags.per_dataset assert not flags & MaskFlags.alpha def test_mask_flags_sidecar(tiffs): filename = str(tiffs.join('sidecar-masked.tif')) with rasterio.open(filename) as src: for flags in src.mask_flags: assert not flags & MaskFlags.nodata assert not flags & MaskFlags.alpha assert flags & MaskFlags.per_dataset def test_mask_flags_shadow(tiffs): filename = str(tiffs.join('shadowed.tif')) with rasterio.open(filename) as src: for flags in src.mask_flags: assert flags & MaskFlags.nodata assert not flags & MaskFlags.alpha assert not flags & MaskFlags.per_dataset def test_warning_no(): """No shadow warning is raised""" with rasterio.open('tests/data/RGB.byte.tif') as src: try: rm, gm, bm = src.read_masks() except NodataShadowWarning: pytest.fail("Unexpected NodataShadowWarning raised") def test_warning_shadow(tiffs): """Shadow warning is raised""" filename = str(tiffs.join('shadowed.tif')) with rasterio.open(filename) as src: with pytest.warns(NodataShadowWarning): _ = src.read_masks() def test_masks(): with rasterio.open('tests/data/RGB.byte.tif') as src: rm, gm, bm = src.read_masks() r, g, b = src.read(masked=False) assert not r[rm==0].any() assert not g[gm==0].any() assert not b[bm==0].any() def test_masked_true(): with rasterio.open('tests/data/RGB.byte.tif') as src: r, g, b = src.read(masked=True) rm, gm, bm = src.read_masks() assert (r.mask==~rm.astype('bool')).all() assert (g.mask==~gm.astype('bool')).all() assert (b.mask==~bm.astype('bool')).all() def test_masked_none(): with rasterio.open('tests/data/RGB.byte.tif') as src: r, g, b = src.read(masked=True) rm, gm, bm = src.read_masks() assert (r.mask==~rm.astype('bool')).all() assert (g.mask==~gm.astype('bool')).all() assert (b.mask==~bm.astype('bool')).all() def test_masking_no_nodata(tiffs): # if the dataset has no defined nodata values, all data is # considered valid data. The GDAL masks bands are arrays of # 255 values. ``read()`` returns masked arrays where `mask` # is False. filename = str(tiffs.join('no-nodata.tif')) with rasterio.open(filename) as src: for flags in src.mask_flags: assert flags & MaskFlags.all_valid assert not flags & MaskFlags.alpha assert not flags & MaskFlags.nodata rgb = src.read(masked=False) assert not hasattr(rgb, 'mask') r = src.read(1, masked=False) assert not hasattr(r, 'mask') rgb = src.read(masked=True) assert hasattr(rgb, 'mask') assert not rgb.mask.any() r = src.read(1, masked=True) assert hasattr(r, 'mask') assert not r.mask.any() rgb = src.read(masked=True) assert hasattr(rgb, 'mask') assert not r.mask.any() r = src.read(1, masked=True) assert not r.mask.any() masks = src.read_masks() assert masks.all() def test_masking_sidecar_mask(tiffs): # If the dataset has a .msk sidecar mask band file, all masks will # be derived from that file. with rasterio.open(str(tiffs.join('sidecar-masked.tif'))) as src: for flags in src.mask_flags: assert flags & MaskFlags.per_dataset assert not flags & MaskFlags.alpha assert not flags & MaskFlags.nodata rgb = src.read(masked=True) assert rgb.mask.all() r = src.read(1, masked=True) assert r.mask.all() masks = src.read_masks() assert not masks.any()
{ "content_hash": "ff5f6beacab7ec1b92f3f5b8aa1d7ccd", "timestamp": "", "source": "github", "line_count": 164, "max_line_length": 70, "avg_line_length": 31.49390243902439, "alnum_prop": 0.5969022265246854, "repo_name": "njwilson23/rasterio", "id": "0624b10b993ef36035dec2c67456fc555a33b885", "size": "5165", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_band_masks.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "34752" }, { "name": "Python", "bytes": "566861" }, { "name": "Shell", "bytes": "3058" } ], "symlink_target": "" }
""" Client for Swift Copyright 2012-2013 Gregory Holt Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __version__ = VERSION = '2.07' """Version str for Swiftly: "major.minor". If the second number is even, it's an official release. If the second number is odd, it's a development release. """
{ "content_hash": "fe026862d36a4cda4aad3b0b857a2969", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 72, "avg_line_length": 32.375, "alnum_prop": 0.7619047619047619, "repo_name": "dpgoetz/swiftly", "id": "e5ccdad6b7a573e94ec0e620a6310b7dc5e6b2c8", "size": "777", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "swiftly/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "291131" } ], "symlink_target": "" }
import base64 from openid.store.interface import OpenIDStore as OIDStore from openid.association import Association as OIDAssociation from models import OpenIDStore, OpenIDNonce class DBOpenIDStore(OIDStore): max_nonce_age = 6 * 60 * 60 def storeAssociation(self, server_url, assoc=None): OpenIDStore.objects.create( server_url=server_url, handle=assoc.handle, secret=base64.encodestring(assoc.secret), issued=assoc.issued, lifetime=assoc.lifetime, assoc_type=assoc.assoc_type ) def getAssociation(self, server_url, handle=None): stored_assocs = OpenIDStore.objects.filter( server_url=server_url ) if handle: stored_assocs = stored_assocs.filter(handle=handle) stored_assocs.order_by('-issued') if stored_assocs.count() == 0: return None return_val = None for stored_assoc in stored_assocs: assoc = OIDAssociation( stored_assoc.handle, base64.decodestring(stored_assoc.secret), stored_assoc.issued, stored_assoc.lifetime, stored_assoc.assoc_type ) if assoc.getExpiresIn() == 0: stored_assoc.delete() else: if return_val is None: return_val = assoc return return_val def removeAssociation(self, server_url, handle): stored_assocs = OpenIDStore.objects.filter( server_url=server_url ) if handle: stored_assocs = stored_assocs.filter(handle=handle) stored_assocs.delete() def useNonce(self, server_url, timestamp, salt): try: OpenIDNonce.objects.get( server_url=server_url, timestamp=timestamp, salt=salt ) except OpenIDNonce.DoesNotExist: OpenIDNonce.objects.create( server_url=server_url, timestamp=timestamp, salt=salt ) return True return False
{ "content_hash": "b57497fe92d97d3d9ece785d8d236bcf", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 83, "avg_line_length": 28.89189189189189, "alnum_prop": 0.5757717492984097, "repo_name": "Suite5/DataColibri", "id": "802f99374c846e15cf96b7d4c2198c374f593e21", "size": "2138", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "allauth/socialaccount/providers/openid/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "381" }, { "name": "CSS", "bytes": "944246" }, { "name": "HTML", "bytes": "566711" }, { "name": "JavaScript", "bytes": "1510227" }, { "name": "PHP", "bytes": "972" }, { "name": "Python", "bytes": "1046512" }, { "name": "Shell", "bytes": "79" } ], "symlink_target": "" }
import unittest from covered_lib_coveragerc import mod1 class TestLib(unittest.TestCase): def test1(self): self.assertEqual(mod1.covered_func(), 9)
{ "content_hash": "28565fd547d92198d327bd1d0cf81b58", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 48, "avg_line_length": 20.375, "alnum_prop": 0.7300613496932515, "repo_name": "ptthiem/nose2", "id": "55821cc4f00bdc3752144575d55890e35ea7fbdd", "size": "163", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nose2/tests/functional/support/scenario/test_coverage_config/coveragerc/test_coveragerc.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "142" }, { "name": "Python", "bytes": "447958" }, { "name": "Shell", "bytes": "1125" } ], "symlink_target": "" }
def triangle(index): triangle = (index * (index + 1)) / 2 return triangle def factors(n): count=0 divisors = 0; if n%2 == 0: n=n/2 while n%2 == 0: count+=1 n = n/2 divisors = count +1 div=3 # at this point, n is no even while n!=1: count = 0 while n%div == 0: count+=1 n = n/div # a this point, n is not divisible by div divisors = divisors*(count+1) # at this point, increase the number of divisor by 2 (STILL ODD) div+=2 # At thi point, divisors should contain the number of divisors that the number n had return divisors def find_triangular_index(factor_limit): n = 1 lnum, rnum = factors(n), factors(n+1) while lnum * rnum < 500: n += 1 lnum, rnum = rnum, factors(n+1) return n n = find_triangular_index(500) print ("The %sth triangle with value T(n)=%s has 500 divisors"% (n,triangle(n)))
{ "content_hash": "fcf94526b973b24870f27918657cf646", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 86, "avg_line_length": 19.931818181818183, "alnum_prop": 0.6180159635119726, "repo_name": "luiscarlin/project-euler", "id": "f5e25977141ba4534723347aa3fb7ab3117c7b80", "size": "893", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "prob12/problem12_2.py", "mode": "33261", "license": "mit", "language": [ { "name": "C++", "bytes": "6245" }, { "name": "Java", "bytes": "8174" }, { "name": "Python", "bytes": "16159" } ], "symlink_target": "" }
from trac.util import create_file from trac.web.main import get_environments import tempfile import unittest import os.path class EnvironmentsTestCase(unittest.TestCase): dirs = ('mydir1', 'mydir2', '.hidden_dir') files = ('myfile1', 'myfile2', '.dot_file') def setUp(self): self.parent_dir = tempfile.mkdtemp(prefix='trac-') self.tracignore = os.path.join(self.parent_dir, '.tracignore') for dname in self.dirs: os.mkdir(os.path.join(self.parent_dir, dname)) for fname in self.files: create_file(os.path.join(self.parent_dir, fname)) self.environ = { 'trac.env_paths': [], 'trac.env_parent_dir': self.parent_dir, } def tearDown(self): for fname in self.files: os.unlink(os.path.join(self.parent_dir, fname)) for dname in self.dirs: os.rmdir(os.path.join(self.parent_dir, dname)) if os.path.exists(self.tracignore): os.unlink(self.tracignore) os.rmdir(self.parent_dir) def env_paths(self, projects): return dict((project, os.path.normpath(os.path.join(self.parent_dir, project))) for project in projects) def test_default_tracignore(self): self.assertEquals(self.env_paths(['mydir1', 'mydir2']), get_environments(self.environ)) def test_empty_tracignore(self): create_file(self.tracignore) self.assertEquals(self.env_paths(['mydir1', 'mydir2', '.hidden_dir']), get_environments(self.environ)) def test_qmark_pattern_tracignore(self): create_file(self.tracignore, 'mydir?') self.assertEquals(self.env_paths(['.hidden_dir']), get_environments(self.environ)) def test_star_pattern_tracignore(self): create_file(self.tracignore, 'my*\n.hidden_dir') self.assertEquals({}, get_environments(self.environ)) def test_combined_tracignore(self): create_file(self.tracignore, 'my*i?1\n\n#mydir2') self.assertEquals(self.env_paths(['mydir2', '.hidden_dir']), get_environments(self.environ)) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(EnvironmentsTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
{ "content_hash": "e53060c184a92e850a545ed03eb329b8", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 78, "avg_line_length": 34.605633802816904, "alnum_prop": 0.5991045991045991, "repo_name": "apache/bloodhound", "id": "a335b77996dd4d464d823048573ffa55579e80c3", "size": "2949", "binary": false, "copies": "2", "ref": "refs/heads/trunk", "path": "trac/trac/web/tests/main.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "2637" }, { "name": "CSS", "bytes": "120348" }, { "name": "HTML", "bytes": "810359" }, { "name": "JavaScript", "bytes": "106594" }, { "name": "Makefile", "bytes": "16471" }, { "name": "Python", "bytes": "4270544" }, { "name": "SaltStack", "bytes": "13073" }, { "name": "Shell", "bytes": "8486" } ], "symlink_target": "" }
import re from pathlib import Path from .base import Media from .types import Types class Animes(Media): _rRule = re.compile(r'\[.+\] ([\w\.\-\ ]+?)(?: S(\d))? - (\d{2,})') def __init__(self, path: Path) -> None: super().__init__(Types.ANIMES, path) @classmethod def match(cls, filename: str) -> bool: if cls._rRule.search(filename): return True return False @classmethod def parse(cls, filename: str) -> str: if cls._rRule.search(filename): return cls._rRule.split(filename)[1] return '' @classmethod def format(cls, filename: str) -> str: info = cls._rRule.search(filename) if not info: return '' title, season, ep = info.groups() if not season: season = '01' return '{0} - S{1:0>2}E{2}'.format(title, season, ep)
{ "content_hash": "3e6d8f761ccb901d77111a5ea87f2df8", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 71, "avg_line_length": 24.583333333333332, "alnum_prop": 0.5412429378531074, "repo_name": "caedus75/Renamer", "id": "2fe00a1e64976a53a132c34e5131cdeb70d8a3e6", "size": "1080", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/renamer/filename/animes.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "29834" } ], "symlink_target": "" }
"""Test for the tit for tat strategies.""" import axelrod from .test_player import TestPlayer from .test_player import TestHeadsUp C, D = axelrod.Actions.C, axelrod.Actions.D class TestTitForTat(TestPlayer): """ Note that this test is referred to in the documentation as an example on writing tests. If you modify the tests here please also modify the documentation. """ name = "Tit For Tat" player = axelrod.TitForTat expected_classifier = { 'memory_depth': 1, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Repeats last action of opponent history.""" self.markov_test([C, D, C, D]) self.responses_test([C] * 4, [C, C, C, C], [C]) self.responses_test([C] * 5, [C, C, C, C, D], [D]) class TestTitFor2Tats(TestPlayer): name = 'Tit For 2 Tats' player = axelrod.TitFor2Tats expected_classifier = { 'memory_depth': 2, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Will defect only when last two turns of opponent were defections.""" self.responses_test([C, C, C], [D, D, D], [D]) self.responses_test([C, C, D, D], [D, D, D, C], [C]) class TestTwoTitsForTat(TestPlayer): name = 'Two Tits For Tat' player = axelrod.TwoTitsForTat expected_classifier = { 'memory_depth': 2, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Will defect twice when last turn of opponent was defection.""" self.responses_test([C], [D], [D]) self.responses_test([C, C], [D, D], [D]) self.responses_test([C, C, C], [D, D, C], [D]) self.responses_test([C, C, D, D], [D, D, C, C], [C]) class TestBully(TestPlayer): name = "Bully" player = axelrod.Bully expected_classifier = { 'memory_depth': 1, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by defecting""" self.first_play_test(D) def test_affect_of_strategy(self): """Will do opposite of what opponent does.""" self.markov_test([D, C, D, C]) class TestSneakyTitForTat(TestPlayer): name = "Sneaky Tit For Tat" player = axelrod.SneakyTitForTat expected_classifier = { 'memory_depth': float('inf'), # Long memory 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Will try defecting after two turns of cooperation, but will stop if punished.""" self.responses_test([C, C], [C, C], [D]) self.responses_test([C, C, D, D], [C, C, C, D], [C]) class TestSuspiciousTitForTat(TestPlayer): name = 'Suspicious Tit For Tat' player = axelrod.SuspiciousTitForTat expected_classifier = { 'memory_depth': 1, # Four-Vector = (1.,0.,1.,0.) 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by Defecting""" self.first_play_test(D) def test_affect_of_strategy(self): """Plays like TFT after the first move, repeating the opponents last move.""" self.markov_test([C, D, C, D]) class TestAntiTitForTat(TestPlayer): name = 'Anti Tit For Tat' player = axelrod.AntiTitForTat expected_classifier = { 'memory_depth': 1, # Four-Vector = (1.,0.,1.,0.) 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by Cooperating""" self.first_play_test(C) def test_affect_of_strategy(self): """Will do opposite of what opponent does.""" self.markov_test([D, C, D, C]) class TestHardTitForTat(TestPlayer): name = "Hard Tit For Tat" player = axelrod.HardTitForTat expected_classifier = { 'memory_depth': 3, # Four-Vector = (1.,0.,1.,0.) 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Repeats last action of opponent history.""" self.responses_test([C, C, C], [C, C, C], [C]) self.responses_test([C, C, C], [D, C, C], [D]) self.responses_test([C, C, C], [C, D, C], [D]) self.responses_test([C, C, C], [C, C, D], [D]) self.responses_test([C, C, C, C], [D, C, C, C], [C]) class TestHardTitFor2Tats(TestPlayer): name = "Hard Tit For 2 Tats" player = axelrod.HardTitFor2Tats expected_classifier = { 'memory_depth': 3, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Repeats last action of opponent history.""" self.responses_test([C, C, C], [C, C, C], [C]) self.responses_test([C, C, C], [D, C, C], [C]) self.responses_test([C, C, C], [C, D, C], [C]) self.responses_test([C, C, C], [C, C, D], [C]) self.responses_test([C, C, C], [D, C, D], [C]) self.responses_test([C, C, C], [D, D, C], [D]) self.responses_test([C, C, C], [C, D, D], [D]) self.responses_test([C, C, C, C], [D, C, C, C], [C]) self.responses_test([C, C, C, C], [D, D, C, C], [C]) self.responses_test([C, C, C, C], [C, D, D, C], [D]) class OmegaTFT(TestPlayer): name = "Omega TFT" player = axelrod.OmegaTFT expected_classifier = { 'memory_depth': float('inf'), 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) for i in range(10): self.responses_test([C] * i, [C] * i, [C]) def test_reset(self): player = self.player() opponent = axelrod.Defector() [player.play(opponent) for _ in range(10)] player.reset() self.assertEqual(player.randomness_counter, 0) self.assertEqual(player.deadlock_counter, 0) class TestOmegaTFTvsSTFT(TestHeadsUp): def test_rounds(self): outcomes = zip([C, D, C, D, C, C, C, C, C], [D, C, D, C, D, C, C, C, C]) self.versus_test(axelrod.OmegaTFT, axelrod.SuspiciousTitForTat, outcomes) class TestOmegaTFTvsAlternator(TestHeadsUp): def test_rounds(self): outcomes = zip([C, C, D, C, D, C, C, C, D, C, C, C, D, D, D, D, D, D], [C, D, C, D, C, D, C, D, C, D, C, D, C, D, C, D, C, D]) self.versus_test(axelrod.OmegaTFT, axelrod.Alternator, outcomes)
{ "content_hash": "6700c09ce0bd74a287ed5fc8bdc9ea6f", "timestamp": "", "source": "github", "line_count": 263, "max_line_length": 91, "avg_line_length": 29.897338403041825, "alnum_prop": 0.5712832252320997, "repo_name": "mojones/Axelrod", "id": "ad0f66fd7aeeff8f3b466ad48baca352f874567d", "size": "7863", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "axelrod/tests/unit/test_titfortat.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "410259" }, { "name": "Shell", "bytes": "959" } ], "symlink_target": "" }
""" The module's functions operate on message bodies trying to extract original messages (without quoted messages) from html """ import regex as re import html2text from lxml import html, etree from copy import deepcopy RE_FWD = re.compile("(([-]+[ ]*Forwarded message[ ]*[-]+)|(Begin forwarded message:))", re.I | re.M) CHECKPOINT_PREFIX = '#!%!' CHECKPOINT_SUFFIX = '!%!#' CHECKPOINT_PATTERN = re.compile(CHECKPOINT_PREFIX + '\d+' + CHECKPOINT_SUFFIX) # HTML quote indicators (tag ids) QUOTE_IDS = ['OLK_SRC_BODY_SECTION'] def add_checkpoint(html_note, counter): """Recursively adds checkpoints to html tree. checkpoints are added to both text and tail of each tag with DFS ordered counter """ if html_note.text: html_note.text = (html_note.text + CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) else: html_note.text = (CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) counter += 1 for child in html_note.iterchildren(): counter = add_checkpoint(child, counter) if html_note.tail: html_note.tail = (html_note.tail + CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) else: html_note.tail = (CHECKPOINT_PREFIX + str(counter) + CHECKPOINT_SUFFIX) counter += 1 return counter def delete_quotation_tags(html_note, quotation_checkpoints, placeholder=None): """Deletes tags with quotation checkpoints from html tree via depth-first traversal. mutates html_note to delete tags returns what was deleted """ def recursive_helper(html_note, counter, insert_placeholder): # enter node (check text) tag_text_in_quotation = quotation_checkpoints[counter] if tag_text_in_quotation: if insert_placeholder: html_note.insert(0, placeholder) insert_placeholder = False html_note.text = '' counter += 1 # recurse on children quotation_children = [] # Children tags which are in quotation. for child in html_note.iterchildren(): if child is placeholder: # skip placeholder in recursion continue child, counter, child_tag_in_quotation = recursive_helper(child, counter, insert_placeholder) if child_tag_in_quotation: quotation_children.append(child) # exit node (check tail) tag_tail_in_quotation = quotation_checkpoints[counter] if tag_tail_in_quotation: if insert_placeholder and not quotation_children: html_note.append(placeholder) insert_placeholder = False html_note.tail = '' counter += 1 # build return structure counter, is_in_quotation tag_in_quotation = tag_text_in_quotation and tag_tail_in_quotation if tag_in_quotation: return html_note, counter, True else: # Remove quotation children. if insert_placeholder and quotation_children: quotation_children[0].addprevious(placeholder) for child in quotation_children: html_note.remove(child) return html_note, counter, False recursive_helper(html_note, 0, placeholder != None) def cut_gmail_quote(html_message, placeholder=None): ''' Cuts the last outermost blockquote in the outermost element with class gmail_quote. ''' gmail_quote = html_message.cssselect('.gmail_quote') if gmail_quote: gmail_quote = gmail_quote[0] if gmail_quote.text and (re.search(RE_FWD, gmail_quote.text)): return False if len(gmail_quote) and (re.search(RE_FWD, html.tostring(gmail_quote[0]))): return False blockquotes = gmail_quote.xpath('//blockquote') if blockquotes: blockquotes = blockquotes[0].getparent().xpath('./blockquote') if len(blockquotes) == 1: if placeholder is not None: gmail_quote.addprevious(placeholder) gmail_quote.getparent().remove(gmail_quote) return True if len(blockquotes) > 1: if placeholder is not None: blockquotes[-1].addprevious(placeholder) blockquotes[-1].getparent().remove(blockquotes[-1]) return True return False return False def cut_microsoft_quote(html_message, placeholder=None): ''' Cuts splitter block and all following blocks. ''' splitter = html_message.xpath( #outlook 2007, 2010 "//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;" "padding:3.0pt 0cm 0cm 0cm']|" #windows mail "//div[@style='padding-top: 5px; " "border-top-color: rgb(229, 229, 229); " "border-top-width: 1px; border-top-style: solid;']" ) if splitter: splitter = splitter[0] #outlook 2010 if splitter == splitter.getparent().getchildren()[0]: splitter = splitter.getparent() else: #outlook 2003 splitter = html_message.xpath( "//div" "/div[@class='MsoNormal' and @align='center' " "and @style='text-align:center']" "/font" "/span" "/hr[@size='3' and @width='100%' and @align='center' " "and @tabindex='-1']" ) if len(splitter): splitter = splitter[0] splitter = splitter.getparent().getparent() splitter = splitter.getparent().getparent() if len(splitter): parent = splitter.getparent() after_splitter = splitter.getnext() while after_splitter is not None: parent.remove(after_splitter) after_splitter = splitter.getnext() if placeholder is not None: splitter.addprevious(placeholder) parent.remove(splitter) return True return False def cut_by_id(html_message, placeholder=None): found = False for quote_id in QUOTE_IDS: quote = html_message.cssselect('#{}'.format(quote_id)) if quote: if placeholder is not None: quote[0].addprevious(placeholder) quote[0].getparent().remove(quote[0]) return True return False def cut_blockquote(html_message, placeholder=None): ''' Cuts blockquote with wrapping elements. ''' blockquotes = html_message.xpath('//blockquote') if blockquotes: # get only highest-level blockquotes blockquotes = blockquotes[0].getparent().xpath('./blockquote') if blockquotes: if blockquotes[0].text and (re.search(RE_FWD, blockquotes[0].text)): return False if blockquotes[0].getprevious() and (re.search(RE_FWD, html.tostring(blockquotes[0].getprevious()))): return False if blockquotes[0].getparent().text and (re.search(RE_FWD, blockquotes[0].getparent().text)): return False if len(blockquotes[0]) and (re.search(RE_FWD, html.tostring(blockquotes[0][0]))): return False if len(blockquotes) == 1: if placeholder is not None: blockquotes[0].addprevious(placeholder) blockquotes[0].getparent().remove(blockquotes[0]) return True if len(blockquotes) > 1: if placeholder is not None: blockquotes[-1].addprevious(placeholder) blockquotes[-1].getparent().remove(blockquotes[-1]) return True return False return False
{ "content_hash": "34b0aad773c32e9da2410088862ebe50", "timestamp": "", "source": "github", "line_count": 211, "max_line_length": 113, "avg_line_length": 36.644549763033176, "alnum_prop": 0.5955768235902742, "repo_name": "mgontav/talon", "id": "1b927ff81aa91f447d2a6a9cadda427a1385ff3f", "size": "7732", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "talon/html_quotations.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "97496" } ], "symlink_target": "" }
from django.test import TestCase from django.contrib.auth.models import User FIXTURES = ["test_data"] class BaseTest(TestCase): fixtures = FIXTURES def setUp(self): for user in User.objects.all(): user.set_password(user.password) user.save() # from : # http://stackoverflow.com/questions/8017204/users-in-initial-data-fixture
{ "content_hash": "0910af281825da6dd45f9de392cc0da7", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 82, "avg_line_length": 27.5, "alnum_prop": 0.6571428571428571, "repo_name": "echodelt/django-auth-skel-project", "id": "653bddfb3a12cbddf9694f33ba128e31c65142eb", "size": "410", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "account/tests/base.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3829" }, { "name": "HTML", "bytes": "39924" }, { "name": "Python", "bytes": "44127" } ], "symlink_target": "" }
def test_metrics(client): resp = client.get("/metrics") assert 200 == resp.status_code assert b"auth_server_error_total" in resp.data
{ "content_hash": "8295a74258ac73badb239029cec8dac9", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 50, "avg_line_length": 29.4, "alnum_prop": 0.6802721088435374, "repo_name": "adamcik/oauthclientbridge", "id": "2996be6933bd3660ded8fd508cf822546fcac39f", "size": "147", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/metrics_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "62545" } ], "symlink_target": "" }
"""Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu :version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ __author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" __version__ = '1.0' __revision__ = "$Revision: 3473 $" __date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' import numpy as np from numpy.testing import * from numpy.ma.testutils import * from numpy.ma.core import * class SubArray(np.ndarray): """Defines a generic np.ndarray subclass, that stores some metadata in the dictionary `info`.""" def __new__(cls,arr,info={}): x = np.asanyarray(arr).view(cls) x.info = info return x def __array_finalize__(self, obj): self.info = getattr(obj,'info',{}) return def __add__(self, other): result = np.ndarray.__add__(self, other) result.info.update({'added':result.info.pop('added',0)+1}) return result subarray = SubArray class MSubArray(SubArray,MaskedArray): def __new__(cls, data, info={}, mask=nomask): subarr = SubArray(data, info) _data = MaskedArray.__new__(cls, data=subarr, mask=mask) _data.info = subarr.info return _data def __array_finalize__(self,obj): MaskedArray.__array_finalize__(self,obj) SubArray.__array_finalize__(self, obj) return def _get_series(self): _view = self.view(MaskedArray) _view._sharedmask = False return _view _series = property(fget=_get_series) msubarray = MSubArray class MMatrix(MaskedArray, np.matrix,): def __new__(cls, data, mask=nomask): mat = np.matrix(data) _data = MaskedArray.__new__(cls, data=mat, mask=mask) return _data def __array_finalize__(self,obj): np.matrix.__array_finalize__(self, obj) MaskedArray.__array_finalize__(self,obj) return def _get_series(self): _view = self.view(MaskedArray) _view._sharedmask = False return _view _series = property(fget=_get_series) mmatrix = MMatrix class TestSubclassing(TestCase): """Test suite for masked subclasses of ndarray.""" def test_data_subclassing(self): "Tests whether the subclass is kept." x = np.arange(5) m = [0,0,1,0,0] xsub = SubArray(x) xmsub = masked_array(xsub, mask=m) self.failUnless(isinstance(xmsub, MaskedArray)) assert_equal(xmsub._data, xsub) self.failUnless(isinstance(xmsub._data, SubArray)) def test_maskedarray_subclassing(self): "Tests subclassing MaskedArray" x = np.arange(5) mx = mmatrix(x,mask=[0,1,0,0,0]) self.failUnless(isinstance(mx._data, np.matrix)) "Tests masked_unary_operation" self.failUnless(isinstance(add(mx,mx), mmatrix)) self.failUnless(isinstance(add(mx,x), mmatrix)) assert_equal(add(mx,x), mx+x) self.failUnless(isinstance(add(mx,mx)._data, np.matrix)) self.failUnless(isinstance(add.outer(mx,mx), mmatrix)) "Tests masked_binary_operation" self.failUnless(isinstance(hypot(mx,mx), mmatrix)) self.failUnless(isinstance(hypot(mx,x), mmatrix)) def test_attributepropagation(self): x = array(arange(5), mask=[0]+[1]*4) my = masked_array(subarray(x)) ym = msubarray(x) # z = (my+1) self.failUnless(isinstance(z,MaskedArray)) self.failUnless(not isinstance(z, MSubArray)) self.failUnless(isinstance(z._data, SubArray)) assert_equal(z._data.info, {}) # z = (ym+1) self.failUnless(isinstance(z, MaskedArray)) self.failUnless(isinstance(z, MSubArray)) self.failUnless(isinstance(z._data, SubArray)) self.failUnless(z._data.info['added'] > 0) # ym._set_mask([1,0,0,0,1]) assert_equal(ym._mask, [1,0,0,0,1]) ym._series._set_mask([0,0,0,0,1]) assert_equal(ym._mask, [0,0,0,0,1]) # xsub = subarray(x, info={'name':'x'}) mxsub = masked_array(xsub) self.failUnless(hasattr(mxsub, 'info')) assert_equal(mxsub.info, xsub.info) def test_subclasspreservation(self): "Checks that masked_array(...,subok=True) preserves the class." x = np.arange(5) m = [0,0,1,0,0] xinfo = [(i,j) for (i,j) in zip(x,m)] xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) # mxsub = masked_array(xsub, subok=False) self.failUnless(not isinstance(mxsub, MSubArray)) self.failUnless(isinstance(mxsub, MaskedArray)) assert_equal(mxsub._mask, m) # mxsub = asarray(xsub) self.failUnless(not isinstance(mxsub, MSubArray)) self.failUnless(isinstance(mxsub, MaskedArray)) assert_equal(mxsub._mask, m) # mxsub = masked_array(xsub, subok=True) self.failUnless(isinstance(mxsub, MSubArray)) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, xsub._mask) # mxsub = asanyarray(xsub) self.failUnless(isinstance(mxsub, MSubArray)) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, m) ################################################################################ if __name__ == '__main__': run_module_suite()
{ "content_hash": "13e7964228ea08d7899f241dda43e15d", "timestamp": "", "source": "github", "line_count": 154, "max_line_length": 80, "avg_line_length": 35.214285714285715, "alnum_prop": 0.5935828877005348, "repo_name": "qpython-android/QPypi-numpy", "id": "5943ad6c1daab51c88fa1c14c5089b9299c4a9ee", "size": "5471", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "numpy/ma/tests/test_subclassing.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "3832435" }, { "name": "C++", "bytes": "73392" }, { "name": "FORTRAN", "bytes": "4872" }, { "name": "Objective-C", "bytes": "135" }, { "name": "Python", "bytes": "3402698" } ], "symlink_target": "" }
from flask_restful.fields import Raw, MarshallingException class Dict(Raw): def __init__(self, cls_or_instance, **kwargs): super(Dict, self).__init__(**kwargs) error_msg = ("The type of the dict elements must be a subclass of " "flask_restful.fields.Raw") if isinstance(cls_or_instance, type): if not issubclass(cls_or_instance, Raw): raise MarshallingException(error_msg) self.container = cls_or_instance() else: if not isinstance(cls_or_instance, Raw): raise MarshallingException(error_msg) self.container = cls_or_instance def format(self, value): return { str(key): self.container.output(key, value) for key in value if key is not None }
{ "content_hash": "35ff195f8ac2aebfa35955c6c6ebce9d", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 75, "avg_line_length": 34.5, "alnum_prop": 0.5833333333333334, "repo_name": "hackerspace-silesia/cebulany-manager", "id": "2e5cc5cbdbcb44000f3e17e465510701e25a6b4f", "size": "828", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cebulany/fields.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "152" }, { "name": "HTML", "bytes": "3771" }, { "name": "JavaScript", "bytes": "24254" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "89314" }, { "name": "Shell", "bytes": "354" }, { "name": "Vue", "bytes": "70090" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.apps import AppConfig class AgentesConfig(AppConfig): name = 'agentes'
{ "content_hash": "bc9146d2c54641d9d48031f4f8eb2290", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 39, "avg_line_length": 18.571428571428573, "alnum_prop": 0.7538461538461538, "repo_name": "fcopantoja/sips", "id": "787fd321fcbada74d5c662ae0f3e457de661cd38", "size": "130", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sips/agentes/apps.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "321522" }, { "name": "HTML", "bytes": "41865" }, { "name": "JavaScript", "bytes": "19371" }, { "name": "Python", "bytes": "53936" } ], "symlink_target": "" }
import base64 import tensorflow_data_validation as tfdv from IPython.display import display from IPython.display import HTML from tensorflow_metadata.proto.v0 import statistics_pb2 from typing import Text # The following variables are provided through dependency injection. These # variables come from the specified input path and arguments provided by the # API post request. # # source # train_stats = tfdv.generate_statistics_from_csv(data_location=source) # tfdv.visualize_statistics(train_stats) def get_statistics_html( lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList ) -> Text: """Build the HTML for visualizing the input statistics using Facets. Args: lhs_statistics: A DatasetFeatureStatisticsList protocol buffer. Returns: HTML to be embedded for visualization. Raises: TypeError: If the input argument is not of the expected type. ValueError: If the input statistics protos does not have only one dataset. """ rhs_statistics = None lhs_name = 'lhs_statistics' rhs_name = 'rhs_statistics' if not isinstance(lhs_statistics, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'lhs_statistics is of type %s, should be ' 'a DatasetFeatureStatisticsList proto.' % type(lhs_statistics).__name__) if len(lhs_statistics.datasets) != 1: raise ValueError('lhs_statistics proto contains multiple datasets. Only ' 'one dataset is currently supported.') if lhs_statistics.datasets[0].name: lhs_name = lhs_statistics.datasets[0].name # Add lhs stats. combined_statistics = statistics_pb2.DatasetFeatureStatisticsList() lhs_stats_copy = combined_statistics.datasets.add() lhs_stats_copy.MergeFrom(lhs_statistics.datasets[0]) lhs_stats_copy.name = lhs_name protostr = base64.b64encode( combined_statistics.SerializeToString()).decode('utf-8') # pylint: disable=line-too-long # Note that in the html template we currently assign a temporary id to the # facets element and then remove it once we have appended the serialized proto # string to the element. We do this to avoid any collision of ids when # displaying multiple facets output in the notebook. html_template = """<iframe id='facets-iframe' width="100%" height="500px"></iframe> <script> facets_iframe = document.getElementById('facets-iframe'); facets_html = '<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"><\/script><link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/master/facets-dist/facets-jupyter.html"><facets-overview proto-input="protostr"></facets-overview>'; facets_iframe.srcdoc = facets_html; facets_iframe.id = ""; setTimeout(() => { facets_iframe.setAttribute('height', facets_iframe.contentWindow.document.body.offsetHeight + 'px') }, 1500) </script>""" # pylint: enable=line-too-long html = html_template.replace('protostr', protostr) return html stats = tfdv.load_statistics(source) html = get_statistics_html(stats) display(HTML(html))
{ "content_hash": "868079707d2bcaab31f06c008587cd11", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 306, "avg_line_length": 40.34615384615385, "alnum_prop": 0.7235462345090562, "repo_name": "kubeflow/kfp-tekton-backend", "id": "ac3e303b60de1af4c3c2428444d3b2119cd79d5c", "size": "3723", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "backend/src/apiserver/visualization/types/tfdv.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "47293" }, { "name": "Go", "bytes": "1269081" }, { "name": "HTML", "bytes": "3584" }, { "name": "JavaScript", "bytes": "24828" }, { "name": "Jupyter Notebook", "bytes": "177616" }, { "name": "Makefile", "bytes": "9694" }, { "name": "PowerShell", "bytes": "3194" }, { "name": "Python", "bytes": "1628570" }, { "name": "Scala", "bytes": "13000" }, { "name": "Shell", "bytes": "180020" }, { "name": "Smarty", "bytes": "7694" }, { "name": "Starlark", "bytes": "76037" }, { "name": "TypeScript", "bytes": "1641150" } ], "symlink_target": "" }
from collections import defaultdict import math from recommends.similarities import sim_distance from recommends.converters import convert_vote_list_to_userprefs, convert_vote_list_to_itemprefs from .base import BaseAlgorithm class NaiveAlgorithm(BaseAlgorithm): """ """ similarity = sim_distance def top_matches(self, prefs, p1): """ Returns the best matches for p1 from the prefs dictionary. """ return [(p2, self.similarity(prefs[p1], prefs[p2])) for p2 in prefs if p2 != p1] def calculate_similarities(self, vote_list, verbose=0): # Invert the preference matrix to be item-centric itemPrefs = convert_vote_list_to_itemprefs(vote_list) itemMatch = {} for item in itemPrefs: # Find the most similar items to this one itemMatch[item] = self.top_matches(itemPrefs, item) iteritems = itemMatch.items() return iteritems def get_recommended_items(self, vote_list, itemMatch, itemIgnored, user): prefs = convert_vote_list_to_userprefs(vote_list) itemMatch = dict(itemMatch) if user in prefs: userRatings = prefs[user] scores = defaultdict(int) totalSim = defaultdict(int) # Loop over items rated by this user for (item, rating) in userRatings.items(): # Loop over items similar to this one for (item2, similarity) in itemMatch[item]: # Skip ignored items if user.pk in itemIgnored and item2 in itemIgnored[user.pk]: continue # Ignore if this user has already rated this item if not math.isnan(similarity) and item2 not in userRatings: # Weighted sum of rating times similarity scores[item2] += similarity * rating # Sum of all the similarities totalSim[item2] += similarity # Divide each total score by total weighting to get an average rankings = ((item, (score / totalSim[item])) for item, score in scores.items() if totalSim[item] != 0) return rankings return [] def calculate_recommendations(self, vote_list, itemMatch, itemIgnored): """ ``itemMatch`` is supposed to be the result of ``calculate_similarities()`` Returns a list of recommendations: :: [ (<user1>, [ ("<object_identifier1>", <score>), ("<object_identifier2>", <score>), ]), (<user2>, [ ("<object_identifier1>", <score>), ("<object_identifier2>", <score>), ]), ] """ recommendations = [] users = set(map(lambda x: x[0], vote_list)) for user in users: rankings = self.get_recommended_items(vote_list, itemMatch, itemIgnored, user) recommendations.append((user, rankings)) return recommendations
{ "content_hash": "984909cbf9a4fffd85012247399d958c", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 114, "avg_line_length": 37.68674698795181, "alnum_prop": 0.5664961636828645, "repo_name": "python-recsys/django-recommends", "id": "63a0ade22c9418483f951a7aa6624170f24cd19b", "size": "3128", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "recommends/algorithms/naive.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1583" }, { "name": "Python", "bytes": "99141" } ], "symlink_target": "" }
def request_method_decorator(f): """Wraps methods returned from a resource to capture HttpRequests. When a method which returns HttpRequests is called, it will pass the method and arguments off to the transport to be executed. This wrapping allows the transport to skim arguments off the top of the method call, and modify any return values (such as executing a returned HttpRequest). However, if called with the ``internal`` argument set to True, the method itself will be executed and the value returned as-is. Thus, any method calls embedded inside the code for another method should use the ``internal`` argument to access the expected value. """ def request_method(self, *args, **kwargs): if kwargs.pop('internal', False): return f(self, *args, **kwargs) else: def method_wrapper(*args, **kwargs): return f(self, *args, **kwargs) return self._transport.execute_request_method(method_wrapper, *args, **kwargs) request_method.__name__ = f.__name__ request_method.__doc__ = f.__doc__ request_method.__dict__.update(f.__dict__) return request_method
{ "content_hash": "16db53c43ec3376e5469c55f174a9471", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 74, "avg_line_length": 42.96551724137931, "alnum_prop": 0.637239165329053, "repo_name": "brettdh/rbtools", "id": "48a14da52e564072ccbe50c1e7a843e2b534b024", "size": "1246", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "rbtools/api/decorators.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import nc import iso_modal import math ################################################################################ class Creator(iso_modal.Creator): def __init__(self): iso_modal.Creator.__init__(self) self.output_tool_definitions = False ################################################################################ nc.creator = Creator()
{ "content_hash": "bcae1043ad1af32cc14b041661954133", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 80, "avg_line_length": 27.428571428571427, "alnum_prop": 0.3619791666666667, "repo_name": "JohnyEngine/CNC", "id": "c6b86a471a34605fdf31730a2b849ab29f5dee34", "size": "625", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heekscnc/nc/siegkx1.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "30017" }, { "name": "Batchfile", "bytes": "10126" }, { "name": "C", "bytes": "209705" }, { "name": "C++", "bytes": "7311456" }, { "name": "CMake", "bytes": "92171" }, { "name": "Inno Setup", "bytes": "29066" }, { "name": "Makefile", "bytes": "16079" }, { "name": "Objective-C", "bytes": "8124" }, { "name": "Python", "bytes": "1182253" }, { "name": "Shell", "bytes": "9694" } ], "symlink_target": "" }
import json import random import time import unittest from collections import defaultdict from operator import itemgetter import gevent from unittest import mock import requests from gevent import sleep from gevent.pool import Group from gevent.queue import Queue import locust from locust import ( LoadTestShape, constant, runners, __version__, ) from locust.argument_parser import parse_options from locust.env import Environment from locust.exception import ( RPCError, StopUser, ) from locust.main import create_environment from locust.rpc import Message from locust.runners import ( LocalRunner, STATE_INIT, STATE_SPAWNING, STATE_RUNNING, STATE_MISSING, STATE_STOPPING, STATE_STOPPED, WorkerNode, WorkerRunner, ) from locust.stats import RequestStats from .testcases import LocustTestCase from locust.user import ( TaskSet, User, task, ) from retry import retry # type: ignore from .util import patch_env NETWORK_BROKEN = "network broken" def mocked_rpc(raise_on_close=True): class MockedRpcServerClient: queue = Queue() outbox = [] raise_error_on_close = raise_on_close def __init__(self, *args, **kwargs): pass @classmethod def mocked_send(cls, message): cls.queue.put(message.serialize()) sleep(0) def recv(self): results = self.queue.get() msg = Message.unserialize(results) if msg.data == NETWORK_BROKEN: raise RPCError() return msg def send(self, message): self.outbox.append(message) def send_to_client(self, message): self.outbox.append((message.node_id, message)) def recv_from_client(self): results = self.queue.get() msg = Message.unserialize(results) if msg.data == NETWORK_BROKEN: raise RPCError() return msg.node_id, msg def close(self): if self.raise_error_on_close: raise RPCError() else: pass return MockedRpcServerClient class mocked_options: def __init__(self): self.spawn_rate = 5 self.num_users = 5 self.host = "/" self.tags = None self.exclude_tags = None self.master_host = "localhost" self.master_port = 5557 self.master_bind_host = "*" self.master_bind_port = 5557 self.heartbeat_liveness = 3 self.heartbeat_interval = 1 self.stop_timeout = None self.connection_broken = False def reset_stats(self): pass class HeyAnException(Exception): pass class LocustRunnerTestCase(LocustTestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.runner_stopping = False self.runner_stopped = False def setUp(self): super().setUp() self.reset_state() def reset_state(self): self.runner_stopping = False self.runner_stopped = False class TestLocustRunner(LocustRunnerTestCase): def test_cpu_warning(self): _monitor_interval = runners.CPU_MONITOR_INTERVAL runners.CPU_MONITOR_INTERVAL = 2.0 try: class CpuUser(User): wait_time = constant(0.001) @task def cpu_task(self): for i in range(1000000): _ = 3 / 2 environment = Environment(user_classes=[CpuUser]) environment._cpu_warning_event_triggered = False def cpu_warning(environment, cpu_usage, **kwargs): environment._cpu_warning_event_triggered = True environment._cpu_usage = cpu_usage environment.events.cpu_warning.add_listener(cpu_warning) runner = LocalRunner(environment) self.assertFalse(runner.cpu_warning_emitted) runner.spawn_users({CpuUser.__name__: 1}, wait=False) sleep(2.5) self.assertTrue(environment._cpu_warning_event_triggered) self.assertGreater(environment._cpu_usage, 90) runner.quit() self.assertTrue(runner.cpu_warning_emitted) finally: runners.CPU_MONITOR_INTERVAL = _monitor_interval def test_kill_locusts(self): triggered = [False] class BaseUser(User): wait_time = constant(1) @task class task_set(TaskSet): @task def trigger(self): triggered[0] = True runner = Environment(user_classes=[BaseUser]).create_local_runner() users = runner.spawn_users({BaseUser.__name__: 2}, wait=False) self.assertEqual(2, len(users)) self.assertEqual(2, len(runner.user_greenlets)) g1 = list(runner.user_greenlets)[0] g2 = list(runner.user_greenlets)[1] runner.stop_users({BaseUser.__name__: 2}) self.assertEqual(0, len(runner.user_greenlets)) self.assertTrue(g1.dead) self.assertTrue(g2.dead) self.assertTrue(triggered[0]) def test_start_event(self): class MyUser(User): wait_time = constant(2) task_run_count = 0 @task def my_task(self): MyUser.task_run_count += 1 test_start_run = [0] environment = Environment(user_classes=[MyUser]) def on_test_start(*args, **kwargs): test_start_run[0] += 1 environment.events.test_start.add_listener(on_test_start) runner = LocalRunner(environment) runner.start(user_count=3, spawn_rate=3, wait=False) runner.spawning_greenlet.get(timeout=3) self.assertEqual(1, test_start_run[0]) self.assertEqual(3, MyUser.task_run_count) def test_stop_event(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass environment = Environment(user_classes=[MyUser]) @environment.events.test_stopping.add_listener def on_test_stopping(*_, **__): self.runner_stopping = True @environment.events.test_stop.add_listener def on_test_stop(*_, **__): self.runner_stopped = True runner = LocalRunner(environment) runner.start(user_count=3, spawn_rate=3, wait=False) self.assertFalse(self.runner_stopping) self.assertFalse(self.runner_stopped) runner.stop() self.assertTrue(self.runner_stopping) self.assertTrue(self.runner_stopped) def test_stop_event_quit(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass environment = Environment(user_classes=[MyUser]) @environment.events.test_stopping.add_listener def on_test_stopping(*_, **__): self.runner_stopping = True @environment.events.test_stop.add_listener def on_test_stop(*_, **__): self.runner_stopped = True runner = LocalRunner(environment) runner.start(user_count=3, spawn_rate=3, wait=False) self.assertFalse(self.runner_stopping) self.assertFalse(self.runner_stopped) runner.quit() self.assertTrue(self.runner_stopping) self.assertTrue(self.runner_stopped) def test_stop_event_stop_and_quit(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass environment = Environment(user_classes=[MyUser]) @environment.events.test_stopping.add_listener def on_test_stopping(*_, **__): self.runner_stopping = True @environment.events.test_stop.add_listener def on_test_stop(*_, **__): self.runner_stopped = True runner = LocalRunner(environment) runner.start(user_count=3, spawn_rate=3, wait=False) self.assertFalse(self.runner_stopping) self.assertFalse(self.runner_stopped) runner.stop() runner.quit() self.assertTrue(self.runner_stopping) self.assertTrue(self.runner_stopped) def test_stopping_event(self): on_stop_called = [False] class MyUser(User): on_stop_called = False wait_time = constant(1) @task def my_task(self): pass def on_stop(self): MyUser.on_stop_called = True environment = Environment(user_classes=[MyUser]) @environment.events.test_stopping.add_listener def on_test_stopping(*_, **__): on_stop_called[0] = MyUser.on_stop_called self.runner_stopping = True runner = LocalRunner(environment) runner.start(user_count=3, spawn_rate=3, wait=False) runner.quit() self.assertTrue(self.runner_stopping) self.assertFalse(on_stop_called[0]) def test_change_user_count_during_spawning(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass environment = Environment(user_classes=[MyUser]) runner = LocalRunner(environment) runner.start(user_count=10, spawn_rate=5, wait=False) sleep(0.6) runner.start(user_count=5, spawn_rate=5, wait=False) runner.spawning_greenlet.join() self.assertEqual(5, len(runner.user_greenlets)) runner.quit() def test_reset_stats(self): class MyUser(User): @task class task_set(TaskSet): @task def my_task(self): self.user.environment.events.request.fire( request_type="GET", name="/test", response_time=666, response_length=1337, exception=None, context={}, ) # Make sure each user only run this task once during the test sleep(30) environment = Environment(user_classes=[MyUser], reset_stats=True) runner = LocalRunner(environment) runner.start(user_count=6, spawn_rate=1, wait=False) sleep(3) self.assertGreaterEqual(runner.stats.get("/test", "GET").num_requests, 3) sleep(3.25) self.assertLessEqual(runner.stats.get("/test", "GET").num_requests, 1) runner.quit() def test_no_reset_stats(self): class MyUser(User): @task class task_set(TaskSet): @task def my_task(self): self.user.environment.events.request.fire( request_type="GET", name="/test", response_time=666, response_length=1337, exception=None, context={}, ) sleep(2) environment = Environment(reset_stats=False, user_classes=[MyUser]) runner = LocalRunner(environment) runner.start(user_count=6, spawn_rate=12, wait=False) sleep(0.25) self.assertGreaterEqual(runner.stats.get("/test", "GET").num_requests, 3) sleep(0.3) self.assertEqual(6, runner.stats.get("/test", "GET").num_requests) runner.quit() def test_runner_reference_on_environment(self): env = Environment() runner = env.create_local_runner() self.assertEqual(env, runner.environment) self.assertEqual(runner, env.runner) def test_users_can_call_runner_quit_without_deadlocking(self): class BaseUser(User): stop_triggered = False @task def trigger(self): self.environment.runner.quit() def on_stop(self): BaseUser.stop_triggered = True runner = Environment(user_classes=[BaseUser]).create_local_runner() users = runner.spawn_users({BaseUser.__name__: 1}, wait=False) self.assertEqual(1, len(users)) timeout = gevent.Timeout(0.5) timeout.start() try: runner.greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception, runner must have hung somehow.") finally: timeout.cancel() self.assertTrue(BaseUser.stop_triggered) def test_runner_quit_can_run_on_stop_for_multiple_users_concurrently(self): class BaseUser(User): stop_count = 0 @task def trigger(self): pass def on_stop(self): gevent.sleep(0.1) BaseUser.stop_count += 1 runner = Environment(user_classes=[BaseUser]).create_local_runner() users = runner.spawn_users({BaseUser.__name__: 10}, wait=False) self.assertEqual(10, len(users)) timeout = gevent.Timeout(0.3) timeout.start() try: runner.quit() except gevent.Timeout: self.fail("Got Timeout exception, runner must have hung somehow.") finally: timeout.cancel() self.assertEqual(10, BaseUser.stop_count) # verify that all users executed on_stop def test_stop_users_with_spawn_rate(self): """ The spawn rate does not have an effect on the rate at which the users are stopped. It is expected that the excess users will be stopped as soon as possible in parallel (while respecting the stop_timeout). """ class MyUser(User): wait_time = constant(1) @task def my_task(self): pass environment = Environment(user_classes=[MyUser]) runner = LocalRunner(environment) # Start load test, wait for users to start, then trigger ramp down ts = time.time() runner.start(10, 10, wait=False) runner.spawning_greenlet.join() delta = time.time() - ts self.assertTrue( 0 <= delta <= 0.05, f"Expected user count to increase to 10 instantaneously, instead it took {delta:f}" ) self.assertTrue( runner.user_count == 10, "User count has not decreased correctly to 2, it is : %i" % runner.user_count ) ts = time.time() runner.start(2, 4, wait=False) runner.spawning_greenlet.join() delta = time.time() - ts self.assertTrue(0 <= delta <= 1.05, f"Expected user count to decrease to 2 in 1s, instead it took {delta:f}") self.assertTrue( runner.user_count == 2, "User count has not decreased correctly to 2, it is : %i" % runner.user_count ) def test_attributes_populated_when_calling_start(self): class MyUser1(User): wait_time = constant(0) @task def my_task(self): pass class MyUser2(User): wait_time = constant(0) @task def my_task(self): pass environment = Environment(user_classes=[MyUser1, MyUser2]) runner = LocalRunner(environment) runner.start(user_count=10, spawn_rate=5, wait=False) runner.spawning_greenlet.join() self.assertDictEqual({"MyUser1": 5, "MyUser2": 5}, runner.user_classes_count) runner.start(user_count=5, spawn_rate=5, wait=False) runner.spawning_greenlet.join() self.assertDictEqual({"MyUser1": 3, "MyUser2": 2}, runner.user_classes_count) runner.quit() def test_user_classes_count(self): class MyUser1(User): wait_time = constant(0) @task def my_task(self): pass class MyUser2(User): wait_time = constant(0) @task def my_task(self): pass environment = Environment(user_classes=[MyUser1, MyUser2]) runner = LocalRunner(environment) runner.start(user_count=10, spawn_rate=5, wait=False) runner.spawning_greenlet.join() self.assertDictEqual({"MyUser1": 5, "MyUser2": 5}, runner.user_classes_count) runner.start(user_count=5, spawn_rate=5, wait=False) runner.spawning_greenlet.join() self.assertDictEqual({"MyUser1": 3, "MyUser2": 2}, runner.user_classes_count) runner.quit() def test_host_class_attribute_from_web(self): """If host is left empty from the webUI, we should not use it""" class MyUser1(User): host = "https://host1.com" @task def my_task(self): pass class MyUser2(User): host = "https://host2.com" @task def my_task(self): pass opts = mocked_options() # If left empty on the web, we get an empty string as host opts.host = "" environment = create_environment([MyUser1, MyUser2], opts) runner = LocalRunner(environment) # Start the runner to trigger problematic code runner.start(user_count=2, spawn_rate=1, wait=False) runner.spawning_greenlet.join() # Make sure we did not overwrite the host variable self.assertEqual(MyUser1.host, "https://host1.com") self.assertEqual(MyUser2.host, "https://host2.com") runner.quit() def test_custom_message(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass test_custom_msg = [False] test_custom_msg_data = [{}] def on_custom_msg(msg, **kw): test_custom_msg[0] = True test_custom_msg_data[0] = msg.data environment = Environment(user_classes=[MyUser]) runner = LocalRunner(environment) runner.register_message("test_custom_msg", on_custom_msg) runner.send_message("test_custom_msg", {"test_data": 123}) self.assertTrue(test_custom_msg[0]) self.assertEqual(123, test_custom_msg_data[0]["test_data"]) def test_undefined_custom_message(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass test_custom_msg = [False] def on_custom_msg(msg, **kw): test_custom_msg[0] = True environment = Environment(user_classes=[MyUser]) runner = LocalRunner(environment) runner.register_message("test_custom_msg", on_custom_msg) runner.send_message("test_different_custom_msg") self.assertFalse(test_custom_msg[0]) self.assertEqual(1, len(self.mocked_log.warning)) msg = self.mocked_log.warning[0] self.assertIn("Unknown message type received", msg) def test_swarm_endpoint_is_non_blocking(self): class TestUser1(User): @task def my_task(self): gevent.sleep(600) class TestUser2(User): @task def my_task(self): gevent.sleep(600) stop_timeout = 0 env = Environment(user_classes=[TestUser1, TestUser2], stop_timeout=stop_timeout) local_runner = env.create_local_runner() web_ui = env.create_web_ui("127.0.0.1", 0) gevent.sleep(0.1) ts = time.perf_counter() response = requests.post( f"http://127.0.0.1:{web_ui.server.server_port}/swarm", data={"user_count": 20, "spawn_rate": 5, "host": "https://localhost"}, ) self.assertEqual(200, response.status_code) self.assertTrue(0 <= time.perf_counter() - ts <= 1, "swarm endpoint is blocking") ts = time.perf_counter() while local_runner.state != STATE_RUNNING: self.assertTrue(time.perf_counter() - ts <= 4, local_runner.state) gevent.sleep(0.1) self.assertTrue(3 <= time.perf_counter() - ts <= 5) self.assertEqual(local_runner.user_count, 20) local_runner.stop() web_ui.stop() def test_can_call_stop_endpoint_if_currently_swarming(self): class TestUser1(User): @task def my_task(self): gevent.sleep(600) class TestUser2(User): @task def my_task(self): gevent.sleep(600) stop_timeout = 5 env = Environment(user_classes=[TestUser1, TestUser2], stop_timeout=stop_timeout) local_runner = env.create_local_runner() web_ui = env.create_web_ui("127.0.0.1", 0) gevent.sleep(0.1) ts = time.perf_counter() response = requests.post( f"http://127.0.0.1:{web_ui.server.server_port}/swarm", data={"user_count": 20, "spawn_rate": 1, "host": "https://localhost"}, ) self.assertEqual(200, response.status_code) self.assertTrue(0 <= time.perf_counter() - ts <= 1, "swarm endpoint is blocking") gevent.sleep(5) self.assertEqual(local_runner.state, STATE_SPAWNING) self.assertLessEqual(local_runner.user_count, 10) ts = time.perf_counter() response = requests.get( f"http://127.0.0.1:{web_ui.server.server_port}/stop", ) self.assertEqual(200, response.status_code) self.assertTrue(stop_timeout <= time.perf_counter() - ts <= stop_timeout + 5, "stop endpoint took too long") ts = time.perf_counter() while local_runner.state != STATE_STOPPED: self.assertTrue(time.perf_counter() - ts <= 2) gevent.sleep(0.1) self.assertLessEqual(local_runner.user_count, 0) local_runner.stop() web_ui.stop() def test_target_user_count_is_set_before_ramp_up(self): """Test for https://github.com/locustio/locust/issues/1883""" class MyUser1(User): wait_time = constant(0) @task def my_task(self): pass environment = Environment(user_classes=[MyUser1]) runner = LocalRunner(environment) test_start_event_fired = [False] @environment.events.test_start.add_listener def on_test_start(*args, **kwargs): test_start_event_fired[0] = True self.assertEqual(runner.target_user_count, 3) runner.start(user_count=3, spawn_rate=1, wait=False) gevent.sleep(1) self.assertEqual(runner.target_user_count, 3) self.assertEqual(runner.user_count, 1) # However, target_user_classes_count is only updated at the end of the ramp-up/ramp-down # due to the way it is implemented. self.assertDictEqual({}, runner.target_user_classes_count) runner.spawning_greenlet.join() self.assertEqual(runner.target_user_count, 3) self.assertEqual(runner.user_count, 3) self.assertDictEqual({"MyUser1": 3}, runner.target_user_classes_count) runner.quit() self.assertTrue(test_start_event_fired[0]) def test_stop_users_count(self): user_count = 10 class BaseUser1(User): wait_time = constant(1) @task def task_a(self): pass class BaseUser2(BaseUser1): wait_time = constant(1) runner = Environment(user_classes=[BaseUser1, BaseUser2]).create_local_runner() runner.start(user_count=user_count, spawn_rate=10) sleep(1) self.assertEqual(user_count, runner.user_count) runner.stop() sleep(1) self.assertEqual(0, runner.user_count) class TestMasterWorkerRunners(LocustTestCase): def test_distributed_integration_run(self): """ Full integration test that starts both a MasterRunner and three WorkerRunner instances and makes sure that their stats is sent to the Master. """ class TestUser(User): wait_time = constant(0.1) @task def incr_stats(self): self.environment.events.request.fire( request_type="GET", name="/", response_time=1337, response_length=666, exception=None, context={}, ) with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): # start a Master runner master_env = Environment(user_classes=[TestUser]) master = master_env.create_master_runner("*", 0) sleep(0) # start 3 Worker runners workers = [] for i in range(3): worker_env = Environment(user_classes=[TestUser]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # give workers time to connect sleep(0.1) # issue start command that should trigger TestUsers to be spawned in the Workers master.start(6, spawn_rate=1000) sleep(0.1) # check that worker nodes have started locusts for worker in workers: self.assertEqual(2, worker.user_count) # give time for users to generate stats, and stats to be sent to master sleep(1) master.quit() # make sure users are killed for worker in workers: self.assertEqual(0, worker.user_count) # check that stats are present in master self.assertGreater( master_env.runner.stats.total.num_requests, 20, "For some reason the master node's stats has not come in", ) def test_distributed_rebalanced_integration_run(self): """ Full integration test that starts both a MasterRunner and three WorkerRunner instances and makes sure that their stats is sent to the Master. """ class TestUser(User): wait_time = constant(0.1) @task def incr_stats(self): self.environment.events.request.fire( request_type="GET", name="/", response_time=1337, response_length=666, exception=None, context={}, ) with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3), patch_env( "LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP", "0.1" ): # start a Master runner options = parse_options(["--enable-rebalancing"]) master_env = Environment(user_classes=[TestUser], parsed_options=options) master = master_env.create_master_runner("*", 0) sleep(0) # start 3 Worker runners workers = [] def add_worker(): worker_env = Environment(user_classes=[TestUser]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) for i in range(3): add_worker() # give workers time to connect sleep(0.1) # issue start command that should trigger TestUsers to be spawned in the Workers master.start(6, spawn_rate=1000) sleep(0.1) # check that worker nodes have started locusts for worker in workers: self.assertEqual(2, worker.user_count) # give time for users to generate stats, and stats to be sent to master # Add 1 more workers (should be 4 now) add_worker() @retry(AssertionError, tries=10, delay=0.5) def check_rebalanced_true(): for worker in workers: self.assertTrue(worker.user_count > 0) # Check that all workers have a user count > 0 at least check_rebalanced_true() # Add 2 more workers (should be 6 now) add_worker() add_worker() @retry(AssertionError, tries=10, delay=0.5) def check_rebalanced_equals(): for worker in workers: self.assertEqual(1, worker.user_count) # Check that all workers have a user count = 1 now check_rebalanced_equals() # Simulate that some workers are missing by "killing" them abrutly for i in range(3): workers[i].greenlet.kill(block=True) @retry(AssertionError, tries=10, delay=1) def check_master_worker_missing_count(): self.assertEqual(3, len(master.clients.missing)) # Check that master detected the missing workers check_master_worker_missing_count() @retry(AssertionError, tries=10, delay=1) def check_remaing_worker_new_user_count(): for i in range(3, 6): self.assertEqual(2, workers[i].user_count) # Check that remaining workers have a new count of user due to rebalancing. check_remaing_worker_new_user_count() sleep(1) # Finally quit and check states of remaining workers. master.quit() # make sure users are killed on remaining workers for i in range(3, 6): self.assertEqual(0, workers[i].user_count) # check that stats are present in master self.assertGreater( master_env.runner.stats.total.num_requests, 20, "For some reason the master node's stats has not come in", ) def test_distributed_run_with_custom_args(self): """ Full integration test that starts both a MasterRunner and three WorkerRunner instances and makes sure that their stats is sent to the Master. """ class TestUser(User): wait_time = constant(0.1) @task def incr_stats(self): self.environment.events.request.fire( request_type="GET", name=self.environment.parsed_options.my_str_argument, response_time=self.environment.parsed_options.my_int_argument, response_length=666, exception=None, context={}, ) @locust.events.init_command_line_parser.add_listener def _(parser, **kw): parser.add_argument("--my-int-argument", type=int) parser.add_argument("--my-str-argument", type=str, default="NOOOO") with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): # start a Master runner master_env = Environment(user_classes=[TestUser]) master = master_env.create_master_runner("*", 0) master_env.parsed_options = parse_options( [ "--my-int-argument", "42", "--my-str-argument", "cool-string", ] ) sleep(0) # start 3 Worker runners workers = [] for i in range(3): worker_env = Environment(user_classes=[TestUser]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # give workers time to connect sleep(0.1) # issue start command that should trigger TestUsers to be spawned in the Workers master.start(6, spawn_rate=1000) sleep(0.1) # check that worker nodes have started locusts for worker in workers: self.assertEqual(2, worker.user_count) # give time for users to generate stats, and stats to be sent to master sleep(1) master.quit() # make sure users are killed for worker in workers: self.assertEqual(0, worker.user_count) self.assertEqual(master_env.runner.stats.total.max_response_time, 42) self.assertEqual(master_env.runner.stats.get("cool-string", "GET").avg_response_time, 42) def test_test_stop_event(self): class TestUser(User): wait_time = constant(0.1) @task def my_task(l): pass with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): # start a Master runner master_env = Environment(user_classes=[TestUser]) test_stop_count = {"master": 0, "worker": 0} @master_env.events.test_stop.add_listener def _(*args, **kwargs): test_stop_count["master"] += 1 master = master_env.create_master_runner("*", 0) sleep(0) # start a Worker runner worker_env = Environment(user_classes=[TestUser]) @worker_env.events.test_stop.add_listener def _(*args, **kwargs): test_stop_count["worker"] += 1 worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) # give worker time to connect sleep(0.1) # issue start command that should trigger TestUsers to be spawned in the Workers master.start(2, spawn_rate=1000) sleep(0.1) # check that worker nodes have started locusts self.assertEqual(2, worker.user_count) # give time for users to generate stats, and stats to be sent to master sleep(0.1) master_env.events.quitting.fire(environment=master_env, reverse=True) master.quit() sleep(0.1) # make sure users are killed self.assertEqual(0, worker.user_count) # check the test_stop event was called one time in master and one time in worker self.assertEqual( 1, test_stop_count["master"], "The test_stop event was not called exactly one time in the master node", ) self.assertEqual( 1, test_stop_count["worker"], "The test_stop event was not called exactly one time in the worker node", ) def test_distributed_shape(self): """ Full integration test that starts both a MasterRunner and three WorkerRunner instances and tests a basic LoadTestShape with scaling up and down users """ class TestUser(User): @task def my_task(self): pass class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 2: return 9, 9 elif run_time < 4: return 21, 21 elif run_time < 6: return 3, 21 else: return None with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): test_shape = TestShape() master_env = Environment(user_classes=[TestUser], shape_class=test_shape) master_env.shape_class.reset_time() master = master_env.create_master_runner("*", 0) workers = [] for i in range(3): worker_env = Environment(user_classes=[TestUser]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) # Start a shape test master.start_shape() sleep(1) # Ensure workers have connected and started the correct amount of users for worker in workers: self.assertEqual(3, worker.user_count, "Shape test has not reached stage 1") self.assertEqual( 9, test_shape.get_current_user_count(), "Shape is not seeing stage 1 runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"TestUser": 9}) # Ensure new stage with more users has been reached sleep(2) for worker in workers: self.assertEqual(7, worker.user_count, "Shape test has not reached stage 2") self.assertEqual( 21, test_shape.get_current_user_count(), "Shape is not seeing stage 2 runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"TestUser": 21}) # Ensure new stage with less users has been reached sleep(2) for worker in workers: self.assertEqual(1, worker.user_count, "Shape test has not reached stage 3") self.assertEqual( 3, test_shape.get_current_user_count(), "Shape is not seeing stage 3 runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"TestUser": 3}) # Ensure test stops at the end sleep(2) for worker in workers: self.assertEqual(0, worker.user_count, "Shape test has not stopped") self.assertEqual( 0, test_shape.get_current_user_count(), "Shape is not seeing stopped runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"TestUser": 0}) self.assertEqual("stopped", master.state) def test_distributed_shape_with_fixed_users(self): """ Full integration test that starts both a MasterRunner and three WorkerRunner instances and tests a basic LoadTestShape with scaling up and down users with 'fixed count' users """ class TestUser(User): @task def my_task(self): pass class FixedUser1(User): fixed_count = 1 @task def my_task(self): pass class FixedUser2(User): fixed_count = 11 @task def my_task(self): pass class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 1: return 12, 12 elif run_time < 2: return 36, 24 elif run_time < 3: return 12, 24 else: return None with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): test_shape = TestShape() master_env = Environment(user_classes=[TestUser, FixedUser1, FixedUser2], shape_class=test_shape) master_env.shape_class.reset_time() master = master_env.create_master_runner("*", 0) workers = [] for _ in range(3): worker_env = Environment(user_classes=[TestUser, FixedUser1, FixedUser2]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) # Start a shape test master.start_shape() sleep(1) # Ensure workers have connected and started the correct amount of users (fixed is spawn first) for worker in workers: self.assertEqual(4, worker.user_count, "Shape test has not reached stage 1") self.assertEqual( 12, test_shape.get_current_user_count(), "Shape is not seeing stage 1 runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"FixedUser1": 1, "FixedUser2": 11, "TestUser": 0}) # Ensure new stage with more users has been reached sleep(1) for worker in workers: self.assertEqual(12, worker.user_count, "Shape test has not reached stage 2") self.assertEqual( 36, test_shape.get_current_user_count(), "Shape is not seeing stage 2 runner user count correctly" ) self.assertDictEqual( master.reported_user_classes_count, {"FixedUser1": 1, "FixedUser2": 11, "TestUser": 24} ) # Ensure new stage with less users has been reached # and expected count of the fixed users is present sleep(1) for worker in workers: self.assertEqual(4, worker.user_count, "Shape test has not reached stage 3") self.assertEqual( 12, test_shape.get_current_user_count(), "Shape is not seeing stage 3 runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"FixedUser1": 1, "FixedUser2": 11, "TestUser": 0}) # Ensure test stops at the end sleep(0.5) for worker in workers: self.assertEqual(0, worker.user_count, "Shape test has not stopped") self.assertEqual( 0, test_shape.get_current_user_count(), "Shape is not seeing stopped runner user count correctly" ) self.assertDictEqual(master.reported_user_classes_count, {"FixedUser1": 0, "FixedUser2": 0, "TestUser": 0}) try: with gevent.Timeout(3.0): while master.state != STATE_STOPPED: sleep(0.1) finally: self.assertEqual(STATE_STOPPED, master.state) def test_distributed_shape_with_stop_timeout(self): """ Full integration test that starts both a MasterRunner and five WorkerRunner instances and tests a basic LoadTestShape with scaling up and down users """ class TestUser1(User): def start(self, group: Group): gevent.sleep(0.5) return super().start(group) @task def my_task(self): gevent.sleep(0) class TestUser2(User): def start(self, group: Group): gevent.sleep(0.5) return super().start(group) @task def my_task(self): gevent.sleep(600) class TestUser3(User): def start(self, group: Group): gevent.sleep(0.5) return super().start(group) @task def my_task(self): gevent.sleep(600) class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 10: return 15, 3 elif run_time < 30: return 5, 10 else: return None locust_worker_additional_wait_before_ready_after_stop = 5 with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3), patch_env( "LOCUST_WORKER_ADDITIONAL_WAIT_BEFORE_READY_AFTER_STOP", str(locust_worker_additional_wait_before_ready_after_stop), ): stop_timeout = 5 master_env = Environment( user_classes=[TestUser1, TestUser2, TestUser3], shape_class=TestShape(), stop_timeout=stop_timeout ) master_env.shape_class.reset_time() master = master_env.create_master_runner("*", 0) workers = [] for i in range(5): worker_env = Environment(user_classes=[TestUser1, TestUser2, TestUser3]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) self.assertEqual(STATE_INIT, master.state) self.assertEqual(5, len(master.clients.ready)) # Re-order `workers` so that it is sorted by `id`. # This is required because the dispatch is done # on the sorted workers. workers = sorted(workers, key=lambda w: w.client_id) # Start a shape test master.start_shape() # First stage ts = time.time() while master.state != STATE_SPAWNING: self.assertTrue(time.time() - ts <= 1, master.state) sleep() sleep(5 - (time.time() - ts)) # runtime = 5s ts = time.time() while master.state != STATE_RUNNING: self.assertTrue(time.time() - ts <= 1, master.state) sleep() self.assertEqual(STATE_RUNNING, master.state) w1 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} w2 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} w3 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} w4 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} w5 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} self.assertDictEqual(w1, workers[0].user_classes_count) self.assertDictEqual(w2, workers[1].user_classes_count) self.assertDictEqual(w3, workers[2].user_classes_count) self.assertDictEqual(w4, workers[3].user_classes_count) self.assertDictEqual(w5, workers[4].user_classes_count) self.assertDictEqual(w1, master.clients[workers[0].client_id].user_classes_count) self.assertDictEqual(w2, master.clients[workers[1].client_id].user_classes_count) self.assertDictEqual(w3, master.clients[workers[2].client_id].user_classes_count) self.assertDictEqual(w4, master.clients[workers[3].client_id].user_classes_count) self.assertDictEqual(w5, master.clients[workers[4].client_id].user_classes_count) sleep(5 - (time.time() - ts)) # runtime = 10s # Fourth stage ts = time.time() while master.state != STATE_SPAWNING: self.assertTrue(time.time() - ts <= 1, master.state) sleep() sleep(5 - (time.time() - ts)) # runtime = 15s # Fourth stage - Excess TestUser1 have been stopped but # TestUser2/TestUser3 have not reached stop timeout yet, so # their number are unchanged ts = time.time() while master.state != STATE_RUNNING: self.assertTrue(time.time() - ts <= 1, master.state) sleep() delta = time.time() - ts w1 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} w2 = {"TestUser1": 0, "TestUser2": 1, "TestUser3": 1} w3 = {"TestUser1": 0, "TestUser2": 1, "TestUser3": 1} w4 = {"TestUser1": 1, "TestUser2": 1, "TestUser3": 1} w5 = {"TestUser1": 0, "TestUser2": 1, "TestUser3": 1} self.assertDictEqual(w1, workers[0].user_classes_count) self.assertDictEqual(w2, workers[1].user_classes_count) self.assertDictEqual(w3, workers[2].user_classes_count) self.assertDictEqual(w4, workers[3].user_classes_count) self.assertDictEqual(w5, workers[4].user_classes_count) self.assertDictEqual(w1, master.clients[workers[0].client_id].user_classes_count) self.assertDictEqual(w2, master.clients[workers[1].client_id].user_classes_count) self.assertDictEqual(w3, master.clients[workers[2].client_id].user_classes_count) self.assertDictEqual(w4, master.clients[workers[3].client_id].user_classes_count) self.assertDictEqual(w5, master.clients[workers[4].client_id].user_classes_count) sleep(1 - delta) # runtime = 16s # Fourth stage - All users are now at the desired number ts = time.time() while master.state != STATE_RUNNING: self.assertTrue(time.time() - ts <= 1, master.state) sleep() delta = time.time() - ts w1 = {"TestUser1": 1, "TestUser2": 0, "TestUser3": 0} w2 = {"TestUser1": 0, "TestUser2": 1, "TestUser3": 0} w3 = {"TestUser1": 0, "TestUser2": 0, "TestUser3": 1} w4 = {"TestUser1": 1, "TestUser2": 0, "TestUser3": 0} w5 = {"TestUser1": 0, "TestUser2": 1, "TestUser3": 0} self.assertDictEqual(w1, workers[0].user_classes_count) self.assertDictEqual(w2, workers[1].user_classes_count) self.assertDictEqual(w3, workers[2].user_classes_count) self.assertDictEqual(w4, workers[3].user_classes_count) self.assertDictEqual(w5, workers[4].user_classes_count) self.assertDictEqual(w1, master.clients[workers[0].client_id].user_classes_count) self.assertDictEqual(w2, master.clients[workers[1].client_id].user_classes_count) self.assertDictEqual(w3, master.clients[workers[2].client_id].user_classes_count) self.assertDictEqual(w4, master.clients[workers[3].client_id].user_classes_count) self.assertDictEqual(w5, master.clients[workers[4].client_id].user_classes_count) sleep(10 - delta) # runtime = 26s # Sleep stop_timeout and make sure the test has stopped sleep(5) # runtime = 31s self.assertEqual(STATE_STOPPING, master.state) sleep(stop_timeout) # runtime = 36s # We wait for "stop_timeout" seconds to let the workers reconnect as "ready" with the master. # The reason for waiting an additional "stop_timeout" when we already waited for "stop_timeout" # above is that when a worker receives the stop message, it can take up to "stop_timeout" # for the worker to send the "client_stopped" message then an additional "stop_timeout" seconds # to send the "client_ready" message. ts = time.time() while len(master.clients.ready) != len(workers): self.assertTrue( time.time() - ts <= stop_timeout + locust_worker_additional_wait_before_ready_after_stop, f"expected {len(workers)} workers to be ready but only {len(master.clients.ready)} workers are", ) sleep() sleep(1) # Check that no users are running w1 = {"TestUser1": 0, "TestUser2": 0, "TestUser3": 0} w2 = {"TestUser1": 0, "TestUser2": 0, "TestUser3": 0} w3 = {"TestUser1": 0, "TestUser2": 0, "TestUser3": 0} w4 = {"TestUser1": 0, "TestUser2": 0, "TestUser3": 0} w5 = {"TestUser1": 0, "TestUser2": 0, "TestUser3": 0} self.assertDictEqual(w1, workers[0].user_classes_count) self.assertDictEqual(w2, workers[1].user_classes_count) self.assertDictEqual(w3, workers[2].user_classes_count) self.assertDictEqual(w4, workers[3].user_classes_count) self.assertDictEqual(w5, workers[4].user_classes_count) self.assertDictEqual(w1, master.clients[workers[0].client_id].user_classes_count) self.assertDictEqual(w2, master.clients[workers[1].client_id].user_classes_count) self.assertDictEqual(w3, master.clients[workers[2].client_id].user_classes_count) self.assertDictEqual(w4, master.clients[workers[3].client_id].user_classes_count) self.assertDictEqual(w5, master.clients[workers[4].client_id].user_classes_count) ts = time.time() while master.state != STATE_STOPPED: self.assertTrue(time.time() - ts <= 5, master.state) sleep() master.stop() @unittest.skip(reason="takes a lot of time and has randomness to it") def test_distributed_shape_fuzzy_test(self): """ Incredibility useful test to find issues with dispatch logic. This test allowed to find multiple small corner cases with the new dispatch logic of locust v2. The test is disabled by default because it takes a lot of time to run and has randomness to it. However, it is advised to run it a few times (you can run it in parallel) when modifying the dispatch logic. """ class BaseUser(User): @task def my_task(self): gevent.sleep(600) class TestUser01(BaseUser): pass class TestUser02(BaseUser): pass class TestUser03(BaseUser): pass class TestUser04(BaseUser): pass class TestUser05(BaseUser): pass class TestUser06(BaseUser): pass class TestUser07(BaseUser): pass class TestUser08(BaseUser): pass class TestUser09(BaseUser): pass class TestUser10(BaseUser): pass class TestUser11(BaseUser): pass class TestUser12(BaseUser): pass class TestUser13(BaseUser): pass class TestUser14(BaseUser): pass class TestUser15(BaseUser): pass class TestShape(LoadTestShape): def __init__(self): super().__init__() self.stages = [] runtime = 0 for _ in range(100): runtime += random.uniform(3, 15) self.stages.append((runtime, random.randint(1, 100), random.uniform(0.1, 10))) def tick(self): run_time = self.get_run_time() for stage in self.stages: if run_time < stage[0]: return stage[1], stage[2] user_classes = [ TestUser01, TestUser02, TestUser03, TestUser04, TestUser05, TestUser06, TestUser07, TestUser08, TestUser09, TestUser10, TestUser11, TestUser12, TestUser13, TestUser14, TestUser15, ] chosen_user_classes = random.sample(user_classes, k=random.randint(1, len(user_classes))) for user_class in chosen_user_classes: user_class.weight = random.uniform(1, 20) locust_worker_additional_wait_before_ready_after_stop = 5 with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3), patch_env( "LOCUST_WORKER_ADDITIONAL_WAIT_BEFORE_READY_AFTER_STOP", str(locust_worker_additional_wait_before_ready_after_stop), ): stop_timeout = 5 master_env = Environment( user_classes=chosen_user_classes, shape_class=TestShape(), stop_timeout=stop_timeout ) master_env.shape_class.reset_time() master = master_env.create_master_runner("*", 0) workers = [] for i in range(random.randint(1, 30)): worker_env = Environment(user_classes=chosen_user_classes) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) self.assertEqual(STATE_INIT, master.state) self.assertEqual(len(workers), len(master.clients.ready)) # Start a shape test master.start_shape() ts = time.time() while master.state != STATE_STOPPED: self.assertTrue(time.time() - ts <= master_env.shape_class.stages[-1][0] + 60, master.state) print( "{:.2f}/{:.2f} | {} | {:.0f} | ".format( time.time() - ts, master_env.shape_class.stages[-1][0], master.state, sum(master.reported_user_classes_count.values()), ) + json.dumps(dict(sorted(master.reported_user_classes_count.items(), key=itemgetter(0)))) ) sleep(1) master.stop() def test_distributed_shape_stop_and_restart(self): """ Test stopping and then restarting a LoadTestShape """ class TestUser(User): @task def my_task(self): pass class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 10: return 4, 4 else: return None with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): master_env = Environment(user_classes=[TestUser], shape_class=TestShape()) master_env.shape_class.reset_time() master = master_env.create_master_runner("*", 0) workers = [] for i in range(2): worker_env = Environment(user_classes=[TestUser]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) # Start a shape test and ensure workers have connected and started the correct amount of users master.start_shape() sleep(1) for worker in workers: self.assertEqual(2, worker.user_count, "Shape test has not started correctly") # Stop the test and ensure all user count is 0 master.stop() sleep(1) for worker in workers: self.assertEqual(0, worker.user_count, "Shape test has not stopped") # Then restart the test again and ensure workers have connected and started the correct amount of users master.start_shape() sleep(1) for worker in workers: self.assertEqual(2, worker.user_count, "Shape test has not started again correctly") master.stop() def test_distributed_shape_statuses_transition(self): """ Full integration test that starts both a MasterRunner and five WorkerRunner instances The goal of this test is to validate the status on the master is correctly transitioned for each of the test phases. """ class TestUser1(User): @task def my_task(self): gevent.sleep(600) class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 5: return 5, 2.5 elif run_time < 10: return 10, 2.5 elif run_time < 15: return 15, 2.5 else: return None locust_worker_additional_wait_before_ready_after_stop = 2 with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3), patch_env( "LOCUST_WORKER_ADDITIONAL_WAIT_BEFORE_READY_AFTER_STOP", str(locust_worker_additional_wait_before_ready_after_stop), ): stop_timeout = 0 master_env = Environment(user_classes=[TestUser1], shape_class=TestShape(), stop_timeout=stop_timeout) master_env.shape_class.reset_time() master = master_env.create_master_runner("*", 0) workers = [] for i in range(5): worker_env = Environment(user_classes=[TestUser1]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) self.assertEqual(STATE_INIT, master.state) self.assertEqual(5, len(master.clients.ready)) statuses = [] ts = time.perf_counter() master.start_shape() while master.state != STATE_STOPPED: # +5s buffer to let master stop self.assertTrue( time.perf_counter() - ts <= 30 + locust_worker_additional_wait_before_ready_after_stop + 5, master.state, ) statuses.append((time.perf_counter() - ts, master.state, master.user_count)) sleep(0.1) self.assertEqual(statuses[0][1], STATE_INIT) stage = 1 tolerance = 1 # in s for (t1, state1, user_count1), (t2, state2, user_count2) in zip(statuses[:-1], statuses[1:]): if state1 == STATE_SPAWNING and state2 == STATE_RUNNING and stage == 1: self.assertTrue(2.5 - tolerance <= t2 <= 2.5 + tolerance) elif state1 == STATE_RUNNING and state2 == STATE_SPAWNING and stage == 1: self.assertTrue(5 - tolerance <= t2 <= 5 + tolerance) stage += 1 elif state1 == STATE_SPAWNING and state2 == STATE_RUNNING and stage == 2: self.assertTrue(7.5 - tolerance <= t2 <= 7.5 + tolerance) elif state1 == STATE_RUNNING and state2 == STATE_SPAWNING and stage == 2: self.assertTrue(10 - tolerance <= t2 <= 10 + tolerance) stage += 1 elif state1 == STATE_SPAWNING and state2 == STATE_RUNNING and stage == 3: self.assertTrue(12.5 - tolerance <= t2 <= 12.5 + tolerance) elif state1 == STATE_RUNNING and state2 == STATE_SPAWNING and stage == 3: self.assertTrue(15 - tolerance <= t2 <= 15 + tolerance) stage += 1 elif state1 == STATE_RUNNING and state2 == STATE_STOPPED and stage == 3: self.assertTrue(15 - tolerance <= t2 <= 15 + tolerance) def test_swarm_endpoint_is_non_blocking(self): class TestUser1(User): @task def my_task(self): gevent.sleep(600) class TestUser2(User): @task def my_task(self): gevent.sleep(600) with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): stop_timeout = 0 master_env = Environment(user_classes=[TestUser1, TestUser2], stop_timeout=stop_timeout) master = master_env.create_master_runner("*", 0) web_ui = master_env.create_web_ui("127.0.0.1", 0) workers = [] for i in range(2): worker_env = Environment(user_classes=[TestUser1, TestUser2]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) self.assertEqual(STATE_INIT, master.state) self.assertEqual(len(master.clients.ready), len(workers)) ts = time.perf_counter() response = requests.post( f"http://127.0.0.1:{web_ui.server.server_port}/swarm", data={"user_count": 20, "spawn_rate": 5, "host": "https://localhost"}, ) self.assertEqual(200, response.status_code) self.assertTrue(0 <= time.perf_counter() - ts <= 1, "swarm endpoint is blocking") ts = time.perf_counter() while master.state != STATE_RUNNING: self.assertTrue(time.perf_counter() - ts <= 4, master.state) gevent.sleep(0.1) self.assertTrue(3 <= time.perf_counter() - ts <= 5) self.assertEqual(master.user_count, 20) master.stop() web_ui.stop() def test_can_call_stop_endpoint_if_currently_swarming(self): class TestUser1(User): @task def my_task(self): gevent.sleep(600) class TestUser2(User): @task def my_task(self): gevent.sleep(600) with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): stop_timeout = 5 master_env = Environment(user_classes=[TestUser1, TestUser2], stop_timeout=stop_timeout) master = master_env.create_master_runner("*", 0) web_ui = master_env.create_web_ui("127.0.0.1", 0) workers = [] for i in range(2): worker_env = Environment(user_classes=[TestUser1, TestUser2]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) workers.append(worker) # Give workers time to connect sleep(0.1) self.assertEqual(STATE_INIT, master.state) self.assertEqual(len(master.clients.ready), len(workers)) ts = time.perf_counter() response = requests.post( f"http://127.0.0.1:{web_ui.server.server_port}/swarm", data={"user_count": 20, "spawn_rate": 1, "host": "https://localhost"}, ) self.assertEqual(200, response.status_code) self.assertTrue(0 <= time.perf_counter() - ts <= 1, "swarm endpoint is blocking") gevent.sleep(5) self.assertEqual(master.state, STATE_SPAWNING) self.assertLessEqual(master.user_count, 10) ts = time.perf_counter() response = requests.get( f"http://127.0.0.1:{web_ui.server.server_port}/stop", ) self.assertEqual(200, response.status_code) self.assertTrue(stop_timeout <= time.perf_counter() - ts <= stop_timeout + 5, "stop endpoint took too long") ts = time.perf_counter() while master.state != STATE_STOPPED: self.assertTrue(time.perf_counter() - ts <= 2) gevent.sleep(0.1) self.assertLessEqual(master.user_count, 0) master.stop() web_ui.stop() def test_target_user_count_is_set_before_ramp_up(self): """Test for https://github.com/locustio/locust/issues/1883""" class MyUser1(User): wait_time = constant(0) @task def my_task(self): pass with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): # start a Master runner master_env = Environment(user_classes=[MyUser1]) master = master_env.create_master_runner("*", 0) test_start_event_fired = [False] @master_env.events.test_start.add_listener def on_test_start(*args, **kwargs): test_start_event_fired[0] = True self.assertEqual(master.target_user_count, 3) sleep(0) # start 1 worker runner worker_env = Environment(user_classes=[MyUser1]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) # give worker time to connect sleep(0.1) gevent.spawn(master.start, 3, spawn_rate=1) sleep(1) self.assertEqual(master.target_user_count, 3) self.assertEqual(master.user_count, 1) # However, target_user_classes_count is only updated at the end of the ramp-up/ramp-down # due to the way it is implemented. self.assertDictEqual({}, master.target_user_classes_count) sleep(2) self.assertEqual(master.target_user_count, 3) self.assertEqual(master.user_count, 3) self.assertDictEqual({"MyUser1": 3}, master.target_user_classes_count) master.quit() # make sure users are killed self.assertEqual(0, worker.user_count) self.assertTrue(test_start_event_fired[0]) def test_long_running_test_start_is_run_to_completion_on_worker(self): """Test for https://github.com/locustio/locust/issues/1986""" class MyUser1(User): wait_time = constant(0) @task def my_task(self): pass with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): master_env = Environment(user_classes=[MyUser1]) master = master_env.create_master_runner("*", 0) sleep(0) # start 1 worker runner worker_env = Environment(user_classes=[MyUser1]) worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) test_start_exec_count = 0 @worker_env.events.test_start.add_listener def on_test_start(*_, **__): nonlocal test_start_exec_count test_start_exec_count += 1 sleep(3) # give worker time to connect sleep(0.1) gevent.spawn(master.start, 3, spawn_rate=1) t0 = time.perf_counter() while master.user_count != 3: self.assertLessEqual(time.perf_counter() - t0, 5, "Expected 3 users to be spawned") sleep(0.1) master.quit() # make sure users are killed self.assertEqual(0, worker.user_count) self.assertEqual(test_start_exec_count, 1) class TestMasterRunner(LocustRunnerTestCase): def setUp(self): super().setUp() self.environment = Environment(events=locust.events, catch_exceptions=False) def tearDown(self): super().tearDown() def get_runner(self, user_classes=None): if user_classes is not None: self.environment.user_classes = user_classes return self.environment.create_master_runner("*", 5557) def test_worker_connect(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "zeh_fake_client1")) self.assertEqual(1, len(master.clients)) self.assertTrue( "zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict" ) server.mocked_send(Message("client_ready", __version__, "zeh_fake_client2")) server.mocked_send(Message("client_ready", __version__, "zeh_fake_client3")) server.mocked_send(Message("client_ready", __version__, "zeh_fake_client4")) self.assertEqual(4, len(master.clients)) server.mocked_send(Message("quit", None, "zeh_fake_client3")) self.assertEqual(3, len(master.clients)) def test_worker_connect_with_special_versions(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", None, "1.x_style_client_should_not_be_allowed")) self.assertEqual(1, len(self.mocked_log.error)) self.assertEqual(0, len(master.clients)) server.mocked_send(Message("client_ready", "abcd", "other_version_mismatch_should_just_give_a_warning")) self.assertEqual(1, len(self.mocked_log.warning)) self.assertEqual(1, len(master.clients)) server.mocked_send(Message("client_ready", -1, "version_check_bypass_should_not_warn")) self.assertEqual(1, len(self.mocked_log.warning)) self.assertEqual(2, len(master.clients)) server.mocked_send( Message("client_ready", __version__ + "1", "difference_in_patch_version_should_not_warn") ) self.assertEqual(3, len(master.clients)) self.assertEqual(1, len(self.mocked_log.warning)) def test_worker_stats_report_median(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "fake_client")) master.stats.get("/", "GET").log(100, 23455) master.stats.get("/", "GET").log(800, 23455) master.stats.get("/", "GET").log(700, 23455) data = {"user_count": 1} self.environment.events.report_to_master.fire(client_id="fake_client", data=data) master.stats.clear_all() server.mocked_send(Message("stats", data, "fake_client")) s = master.stats.get("/", "GET") self.assertEqual(700, s.median_response_time) def test_worker_stats_report_with_none_response_times(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "fake_client")) master.stats.get("/mixed", "GET").log(0, 23455) master.stats.get("/mixed", "GET").log(800, 23455) master.stats.get("/mixed", "GET").log(700, 23455) master.stats.get("/mixed", "GET").log(None, 23455) master.stats.get("/mixed", "GET").log(None, 23455) master.stats.get("/mixed", "GET").log(None, 23455) master.stats.get("/mixed", "GET").log(None, 23455) master.stats.get("/onlyNone", "GET").log(None, 23455) data = {"user_count": 1} self.environment.events.report_to_master.fire(client_id="fake_client", data=data) master.stats.clear_all() server.mocked_send(Message("stats", data, "fake_client")) s1 = master.stats.get("/mixed", "GET") self.assertEqual(700, s1.median_response_time) self.assertEqual(500, s1.avg_response_time) s2 = master.stats.get("/onlyNone", "GET") self.assertEqual(0, s2.median_response_time) self.assertEqual(0, s2.avg_response_time) def test_master_marks_downed_workers_as_missing(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "fake_client")) sleep(6) # print(master.clients['fake_client'].__dict__) assert master.clients["fake_client"].state == STATE_MISSING def test_last_worker_quitting_stops_test(self): class TestUser(User): @task def my_task(self): gevent.sleep(600) with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server, patch_env( "LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP", "0.1" ): master = self.get_runner(user_classes=[TestUser]) server.mocked_send(Message("client_ready", __version__, "fake_client1")) server.mocked_send(Message("client_ready", __version__, "fake_client2")) master.start(1, 2) server.mocked_send(Message("spawning", None, "fake_client1")) server.mocked_send(Message("spawning", None, "fake_client2")) server.mocked_send(Message("quit", None, "fake_client1")) sleep(0.1) self.assertEqual(1, len(master.clients.all)) self.assertNotEqual(STATE_STOPPED, master.state, "Not all workers quit but test stopped anyway.") server.mocked_send(Message("quit", None, "fake_client2")) sleep(0.1) self.assertEqual(0, len(master.clients.all)) self.assertEqual(STATE_STOPPED, master.state, "All workers quit but test didn't stop.") @mock.patch("locust.runners.HEARTBEAT_INTERVAL", new=0.1) def test_last_worker_missing_stops_test(self): class TestUser(User): @task def my_task(self): gevent.sleep(600) with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server, patch_env( "LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP", "0.1" ): master = self.get_runner(user_classes=[TestUser]) server.mocked_send(Message("client_ready", __version__, "fake_client1")) server.mocked_send(Message("client_ready", __version__, "fake_client2")) server.mocked_send(Message("client_ready", __version__, "fake_client3")) master.start(3, 3) server.mocked_send(Message("spawning", None, "fake_client1")) server.mocked_send(Message("spawning", None, "fake_client2")) server.mocked_send(Message("spawning", None, "fake_client3")) sleep(0.2) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client1", ) ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client2", ) ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client3", ) ) sleep(0.2) self.assertEqual(0, len(master.clients.missing)) self.assertEqual(3, master.worker_count) self.assertNotIn( master.state, [STATE_STOPPED, STATE_STOPPING], "Not all workers went missing but test stopped anyway." ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client1", ) ) sleep(0.4) self.assertEqual(2, len(master.clients.missing)) self.assertEqual(1, master.worker_count) self.assertNotIn( master.state, [STATE_STOPPED, STATE_STOPPING], "Not all workers went missing but test stopped anyway." ) sleep(0.2) self.assertEqual(3, len(master.clients.missing)) self.assertEqual(0, master.worker_count) self.assertEqual(STATE_STOPPED, master.state, "All workers went missing but test didn't stop.") @mock.patch("locust.runners.HEARTBEAT_INTERVAL", new=0.1) @mock.patch("locust.runners.HEARTBEAT_DEAD_INTERNAL", new=-3) def test_worker_missing_after_heartbeat_dead_interval(self): class TestUser(User): @task def my_task(self): gevent.sleep(600) with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server, patch_env( "LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP", "0.1" ): master = self.get_runner(user_classes=[TestUser]) server.mocked_send(Message("client_ready", __version__, "fake_client1")) server.mocked_send(Message("client_ready", __version__, "fake_client2")) server.mocked_send(Message("client_ready", __version__, "fake_client3")) master.start(3, 3) server.mocked_send(Message("spawning", None, "fake_client1")) server.mocked_send(Message("spawning", None, "fake_client2")) server.mocked_send(Message("spawning", None, "fake_client3")) sleep(0.1) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client1", ) ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client2", ) ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client3", ) ) sleep(0.1) # initially all workers are in active state self.assertEqual(0, len(master.clients.missing)) self.assertEqual(3, master.worker_count) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client1", ) ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client2", ) ) sleep(0.6) # 4 intervals are passed since all 3 heart beats all workers are in missing state self.assertEqual(3, len(master.clients.missing)) self.assertEqual(0, master.worker_count) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client1", ) ) server.mocked_send( Message( "heartbeat", {"state": STATE_RUNNING, "current_cpu_usage": 50, "current_memory_usage": 200, "count": 1}, "fake_client2", ) ) sleep(0.2) # hearbeat received from two workers so they are active, for fake_client3 HEARTBEAT_DEAD_INTERNAL has been breached, so it will be removed from worker list self.assertEqual(0, len(master.clients.missing)) self.assertEqual(2, master.worker_count) master.stop() def test_master_total_stats(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) stats2 = RequestStats() stats2.log_request("GET", "/2", 700, 2201) server.mocked_send( Message( "stats", { "stats": stats.serialize_stats(), "stats_total": stats.total.serialize(), "errors": stats.serialize_errors(), "user_count": 1, }, "fake_client", ) ) server.mocked_send( Message( "stats", { "stats": stats2.serialize_stats(), "stats_total": stats2.total.serialize(), "errors": stats2.serialize_errors(), "user_count": 2, }, "fake_client", ) ) self.assertEqual(700, master.stats.total.median_response_time) def test_master_total_stats_with_none_response_times(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) stats.log_request("GET", "/1", None, 56743) stats2 = RequestStats() stats2.log_request("GET", "/2", 700, 2201) stats2.log_request("GET", "/2", None, 2201) stats3 = RequestStats() stats3.log_request("GET", "/3", None, 2201) server.mocked_send( Message( "stats", { "stats": stats.serialize_stats(), "stats_total": stats.total.serialize(), "errors": stats.serialize_errors(), "user_count": 1, }, "fake_client", ) ) server.mocked_send( Message( "stats", { "stats": stats2.serialize_stats(), "stats_total": stats2.total.serialize(), "errors": stats2.serialize_errors(), "user_count": 2, }, "fake_client", ) ) server.mocked_send( Message( "stats", { "stats": stats3.serialize_stats(), "stats_total": stats3.total.serialize(), "errors": stats3.serialize_errors(), "user_count": 2, }, "fake_client", ) ) self.assertEqual(700, master.stats.total.median_response_time) def test_master_current_response_times(self): start_time = 1 with mock.patch("time.time") as mocked_time: mocked_time.return_value = start_time with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() self.environment.stats.reset_all() mocked_time.return_value += 1.0234 server.mocked_send(Message("client_ready", __version__, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) stats.log_request("GET", "/1", 800, 56743) server.mocked_send( Message( "stats", { "stats": stats.serialize_stats(), "stats_total": stats.total.get_stripped_report(), "errors": stats.serialize_errors(), "user_count": 1, }, "fake_client", ) ) mocked_time.return_value += 1 stats2 = RequestStats() stats2.log_request("GET", "/2", 400, 2201) server.mocked_send( Message( "stats", { "stats": stats2.serialize_stats(), "stats_total": stats2.total.get_stripped_report(), "errors": stats2.serialize_errors(), "user_count": 2, }, "fake_client", ) ) mocked_time.return_value += 4 self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5)) self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95)) # let 10 second pass, do some more requests, send it to the master and make # sure the current response time percentiles only accounts for these new requests mocked_time.return_value += 10.10023 stats.log_request("GET", "/1", 20, 1) stats.log_request("GET", "/1", 30, 1) stats.log_request("GET", "/1", 3000, 1) server.mocked_send( Message( "stats", { "stats": stats.serialize_stats(), "stats_total": stats.total.get_stripped_report(), "errors": stats.serialize_errors(), "user_count": 2, }, "fake_client", ) ) self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5)) self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95)) @mock.patch("locust.runners.HEARTBEAT_INTERVAL", new=600) def test_rebalance_locust_users_on_worker_connect(self): class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) server.mocked_send(Message("client_ready", __version__, "zeh_fake_client1")) self.assertEqual(1, len(master.clients)) self.assertTrue( "zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict" ) master.start(100, 20) self.assertEqual(5, len(server.outbox)) for i, (_, msg) in enumerate(server.outbox.copy()): self.assertDictEqual({"TestUser": int((i + 1) * 20)}, msg.data["user_classes_count"]) server.outbox.pop() # Normally, this attribute would be updated when the # master receives the report from the worker. master.clients["zeh_fake_client1"].user_classes_count = {"TestUser": 100} # let another worker connect server.mocked_send(Message("client_ready", __version__, "zeh_fake_client2")) self.assertEqual(2, len(master.clients)) sleep(0.1) # give time for messages to be sent to clients self.assertEqual(2, len(server.outbox)) client_id, msg = server.outbox.pop() self.assertEqual({"TestUser": 50}, msg.data["user_classes_count"]) client_id, msg = server.outbox.pop() self.assertEqual({"TestUser": 50}, msg.data["user_classes_count"]) def test_sends_spawn_data_to_ready_running_spawning_workers(self): """Sends spawn job to running, ready, or spawning workers""" class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) master.clients[1] = WorkerNode("1") master.clients[2] = WorkerNode("2") master.clients[3] = WorkerNode("3") master.clients[1].state = STATE_INIT master.clients[2].state = STATE_SPAWNING master.clients[3].state = STATE_RUNNING master.start(user_count=5, spawn_rate=5) self.assertEqual(3, len(server.outbox)) def test_start_event(self): """ Tests that test_start event is fired """ class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) run_count = [0] @self.environment.events.test_start.add_listener def on_test_start(*a, **kw): run_count[0] += 1 for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) master.start(7, 7) self.assertEqual(5, len(server.outbox)) self.assertEqual(1, run_count[0]) # change number of users and check that test_start isn't fired again master.start(7, 7) self.assertEqual(1, run_count[0]) # stop and start to make sure test_start is fired again master.stop() master.start(3, 3) self.assertEqual(2, run_count[0]) master.quit() def test_stop_event(self): """ Tests that test_stop event is fired """ class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) @self.environment.events.test_stopping.add_listener def on_test_stopping(*_, **__): self.runner_stopping = True @self.environment.events.test_stop.add_listener def on_test_stop(*_, **__): self.runner_stopped = True for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) master.start(7, 7) self.assertEqual(5, len(server.outbox)) master.stop() self.assertTrue(self.runner_stopping) self.assertTrue(self.runner_stopped) self.reset_state() for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) master.start(7, 7) master.stop() master.quit() self.assertTrue(self.runner_stopping) self.assertTrue(self.runner_stopped) def test_stop_event_quit(self): """ Tests that test_stop event is fired when quit() is called directly """ class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) @self.environment.events.test_stopping.add_listener def on_test_stopping(*_, **__): self.runner_stopping = True @self.environment.events.test_stop.add_listener def on_test_stop(*_, **__): self.runner_stopped = True for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) master.start(7, 7) self.assertEqual(5, len(server.outbox)) master.quit() self.assertTrue(self.runner_stopping) self.assertTrue(self.runner_stopped) def test_spawn_zero_locusts(self): class MyTaskSet(TaskSet): @task def my_task(self): pass class MyTestUser(User): tasks = [MyTaskSet] wait_time = constant(0.1) environment = Environment(user_classes=[MyTestUser]) runner = LocalRunner(environment) timeout = gevent.Timeout(2.0) timeout.start() try: runner.start(0, 1, wait=True) runner.spawning_greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception. A locust seems to have been spawned, even though 0 was specified.") finally: timeout.cancel() def test_spawn_uneven_locusts(self): """ Tests that we can accurately spawn a certain number of locusts, even if it's not an even number of the connected workers """ class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) master.start(7, 7) self.assertEqual(5, len(server.outbox)) num_users = sum(sum(msg.data["user_classes_count"].values()) for _, msg in server.outbox if msg.data) self.assertEqual(7, num_users, "Total number of locusts that would have been spawned is not 7") def test_spawn_fewer_locusts_than_workers(self): class TestUser(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[TestUser]) for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) master.start(2, 2) self.assertEqual(5, len(server.outbox)) num_users = sum(sum(msg.data["user_classes_count"].values()) for _, msg in server.outbox if msg.data) self.assertEqual(2, num_users, "Total number of locusts that would have been spawned is not 2") def test_custom_shape_scale_up(self): class MyUser(User): @task def my_task(self): pass class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 2: return 1, 1 elif run_time < 4: return 2, 2 else: return None self.environment.shape_class = TestShape() with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[MyUser]) for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) # Start the shape_worker self.environment.shape_class.reset_time() master.start_shape() sleep(0.5) # Wait for shape_worker to update user_count num_users = sum(sum(msg.data["user_classes_count"].values()) for _, msg in server.outbox if msg.data) self.assertEqual( 1, num_users, "Total number of users in first stage of shape test is not 1: %i" % num_users ) # Wait for shape_worker to update user_count again sleep(2) num_users = sum(sum(msg.data["user_classes_count"].values()) for _, msg in server.outbox if msg.data) self.assertEqual( 3, num_users, "Total number of users in second stage of shape test is not 3: %i" % num_users ) # Wait to ensure shape_worker has stopped the test sleep(3) self.assertEqual("stopped", master.state, "The test has not been stopped by the shape class") def test_custom_shape_scale_down(self): class MyUser(User): @task def my_task(self): pass class TestShape(LoadTestShape): def tick(self): run_time = self.get_run_time() if run_time < 2: return 5, 5 elif run_time < 4: return 1, 5 else: return None self.environment.shape_class = TestShape() with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[MyUser]) for i in range(5): server.mocked_send(Message("client_ready", __version__, "fake_client%i" % i)) # Start the shape_worker self.environment.shape_class.reset_time() master.start_shape() sleep(0.5) # Wait for shape_worker to update user_count num_users = sum(sum(msg.data["user_classes_count"].values()) for _, msg in server.outbox if msg.data) self.assertEqual( 5, num_users, "Total number of users in first stage of shape test is not 5: %i" % num_users ) # Wait for shape_worker to update user_count again sleep(2) msgs = defaultdict(dict) for _, msg in server.outbox: if not msg.data: continue msgs[msg.node_id][msg.data["timestamp"]] = sum(msg.data["user_classes_count"].values()) # Count users for the last received messages num_users = sum(v[max(v.keys())] for v in msgs.values()) self.assertEqual( 1, num_users, "Total number of users in second stage of shape test is not 1: %i" % num_users ) # Wait to ensure shape_worker has stopped the test sleep(3) self.assertEqual("stopped", master.state, "The test has not been stopped by the shape class") def test_exception_in_task(self): class MyUser(User): @task def will_error(self): raise HeyAnException(":(") self.environment.user_classes = [MyUser] runner = self.environment.create_local_runner() l = MyUser(self.environment) self.assertRaises(HeyAnException, l.run) self.assertRaises(HeyAnException, l.run) self.assertEqual(1, len(runner.exceptions)) hash_key, exception = runner.exceptions.popitem() self.assertTrue("traceback" in exception) self.assertTrue("HeyAnException" in exception["traceback"]) self.assertEqual(2, exception["count"]) def test_exception_is_caught(self): """Test that exceptions are stored, and execution continues""" class MyTaskSet(TaskSet): def __init__(self, *a, **kw): super().__init__(*a, **kw) self._task_queue = [self.will_error, self.will_stop] @task(1) def will_error(self): raise HeyAnException(":(") @task(1) def will_stop(self): raise StopUser() class MyUser(User): wait_time = constant(0.01) tasks = [MyTaskSet] # set config to catch exceptions in locust users self.environment.catch_exceptions = True self.environment.user_classes = [MyUser] runner = LocalRunner(self.environment) l = MyUser(self.environment) # make sure HeyAnException isn't raised l.run() l.run() # make sure we got two entries in the error log self.assertEqual(2, len(self.mocked_log.error)) # make sure exception was stored self.assertEqual(1, len(runner.exceptions)) hash_key, exception = runner.exceptions.popitem() self.assertTrue("traceback" in exception) self.assertTrue("HeyAnException" in exception["traceback"]) self.assertEqual(2, exception["count"]) def test_master_reset_connection(self): """Test that connection will be reset when network issues found""" with mock.patch("locust.runners.FALLBACK_INTERVAL", new=0.1): with mock.patch("locust.rpc.rpc.Server", mocked_rpc(raise_on_close=False)) as server: master = self.get_runner() self.assertEqual(0, len(master.clients)) server.mocked_send(Message("client_ready", NETWORK_BROKEN, "fake_client")) self.assertTrue(master.connection_broken) server.mocked_send(Message("client_ready", __version__, "fake_client")) sleep(1) self.assertFalse(master.connection_broken) self.assertEqual(1, len(master.clients)) master.quit() def test_reset_connection_after_RPCError(self): with mock.patch("locust.rpc.rpc.Server", mocked_rpc(raise_on_close=False)) as server: master = self.get_runner() server.mocked_send(Message("client_ready", __version__, "fake_client")) sleep(0.2) self.assertFalse(master.connection_broken) self.assertEqual(1, len(master.clients)) # Trigger RPCError server.mocked_send(Message("lets_trigger_RPCError", NETWORK_BROKEN, "fake_client")) self.assertTrue(master.connection_broken) sleep(1) self.assertFalse(master.connection_broken) master.quit() def test_attributes_populated_when_calling_start(self): class MyUser1(User): @task def my_task(self): pass class MyUser2(User): @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner(user_classes=[MyUser1, MyUser2]) server.mocked_send(Message("client_ready", __version__, "fake_client1")) master.start(7, 7) self.assertEqual({"MyUser1": 4, "MyUser2": 3}, master.target_user_classes_count) self.assertEqual(7, master.target_user_count) self.assertEqual(7, master.spawn_rate) master.start(10, 10) self.assertEqual({"MyUser1": 5, "MyUser2": 5}, master.target_user_classes_count) self.assertEqual(10, master.target_user_count) self.assertEqual(10, master.spawn_rate) master.start(1, 3) self.assertEqual({"MyUser1": 1, "MyUser2": 0}, master.target_user_classes_count) self.assertEqual(1, master.target_user_count) self.assertEqual(3, master.spawn_rate) def test_custom_message_send(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: master = self.get_runner() for i in range(5): master.clients[i] = WorkerNode(str(i)) master.send_message("test_custom_msg", {"test_data": 123}) self.assertEqual(5, len(server.outbox)) for _, msg in server.outbox: self.assertEqual("test_custom_msg", msg.type) self.assertEqual(123, msg.data["test_data"]) def test_custom_message_receive(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: test_custom_msg = [False] test_custom_msg_data = [{}] def on_custom_msg(msg, **kw): test_custom_msg[0] = True test_custom_msg_data[0] = msg.data master = self.get_runner() master.register_message("test_custom_msg", on_custom_msg) server.mocked_send(Message("test_custom_msg", {"test_data": 123}, "dummy_id")) self.assertTrue(test_custom_msg[0]) self.assertEqual(123, test_custom_msg_data[0]["test_data"]) def test_undefined_custom_message_receive(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: test_custom_msg = [False] def on_custom_msg(msg, **kw): test_custom_msg[0] = True master = self.get_runner() master.register_message("test_custom_msg", on_custom_msg) server.mocked_send(Message("unregistered_custom_msg", {}, "dummy_id")) self.assertFalse(test_custom_msg[0]) self.assertEqual(1, len(self.mocked_log.warning)) msg = self.mocked_log.warning[0] self.assertIn("Unknown message type received from worker", msg) def test_wait_for_workers_report_after_ramp_up(self): def assert_cache_hits(): self.assertEqual(master._wait_for_workers_report_after_ramp_up.cache_info().hits, 0) master._wait_for_workers_report_after_ramp_up() self.assertEqual(master._wait_for_workers_report_after_ramp_up.cache_info().hits, 1) master = self.get_runner() master._wait_for_workers_report_after_ramp_up.cache_clear() self.assertEqual(master._wait_for_workers_report_after_ramp_up(), 1.0) assert_cache_hits() master._wait_for_workers_report_after_ramp_up.cache_clear() with patch_env("LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP", "5.7"): self.assertEqual(master._wait_for_workers_report_after_ramp_up(), 5.7) assert_cache_hits() master._wait_for_workers_report_after_ramp_up.cache_clear() with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=1.5), patch_env( "LOCUST_WAIT_FOR_WORKERS_REPORT_AFTER_RAMP_UP", "5.7 * WORKER_REPORT_INTERVAL" ): self.assertEqual(master._wait_for_workers_report_after_ramp_up(), 5.7 * 1.5) assert_cache_hits() master._wait_for_workers_report_after_ramp_up.cache_clear() class TestWorkerRunner(LocustTestCase): def setUp(self): super().setUp() # self._report_to_master_event_handlers = [h for h in events.report_to_master._handlers] def tearDown(self): # events.report_to_master._handlers = self._report_to_master_event_handlers super().tearDown() def get_runner(self, environment=None, user_classes=None): if environment is None: environment = self.environment user_classes = user_classes or [] environment.user_classes = user_classes return WorkerRunner(environment, master_host="localhost", master_port=5557) def test_worker_stop_timeout(self): class MyTestUser(User): _test_state = 0 @task def the_task(self): MyTestUser._test_state = 1 gevent.sleep(0.2) MyTestUser._test_state = 2 with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyTestUser]) self.assertEqual(1, len(client.outbox)) self.assertEqual("client_ready", client.outbox[0].type) client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyTestUser": 1}, "host": "", "stop_timeout": 1, "parsed_options": {}, }, "dummy_client_id", ) ) # wait for worker to spawn locusts self.assertIn("spawning", [m.type for m in client.outbox]) worker.spawning_greenlet.join() self.assertEqual(1, len(worker.user_greenlets)) # check that locust has started running gevent.sleep(0.01) self.assertEqual(1, MyTestUser._test_state) # send stop message client.mocked_send(Message("stop", None, "dummy_client_id")) worker.user_greenlets.join() # check that locust user got to finish self.assertEqual(2, MyTestUser._test_state) def test_worker_without_stop_timeout(self): class MyTestUser(User): _test_state = 0 @task def the_task(self): MyTestUser._test_state = 1 gevent.sleep(0.2) MyTestUser._test_state = 2 with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment(stop_timeout=None) worker = self.get_runner(environment=environment, user_classes=[MyTestUser]) self.assertEqual(1, len(client.outbox)) self.assertEqual("client_ready", client.outbox[0].type) client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyTestUser": 1}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) # print("outbox:", client.outbox) # wait for worker to spawn locusts self.assertIn("spawning", [m.type for m in client.outbox]) worker.spawning_greenlet.join() self.assertEqual(1, len(worker.user_greenlets)) # check that locust has started running gevent.sleep(0.01) self.assertEqual(1, MyTestUser._test_state) # send stop message client.mocked_send(Message("stop", None, "dummy_client_id")) worker.user_greenlets.join() # check that locust user did not get to finish self.assertEqual(1, MyTestUser._test_state) def test_spawn_message_with_older_timestamp_is_rejected(self): class MyUser(User): wait_time = constant(1) def start(self, group: Group): # We do this so that the spawning does not finish # too quickly gevent.sleep(0.1) return super().start(group) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyUser]) client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyUser": 10}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) sleep(0.6) self.assertEqual(STATE_SPAWNING, worker.state) worker.spawning_greenlet.join() self.assertEqual(10, worker.user_count) # Send same timestamp as the first message client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyUser": 9}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) worker.spawning_greenlet.join() # Still 10 users self.assertEqual(10, worker.user_count) # Send older timestamp than the first message client.mocked_send( Message( "spawn", { "timestamp": 1605538583, "user_classes_count": {"MyUser": 2}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) worker.spawning_greenlet.join() # Still 10 users self.assertEqual(10, worker.user_count) # Send newer timestamp than the first message client.mocked_send( Message( "spawn", { "timestamp": 1605538585, "user_classes_count": {"MyUser": 2}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) worker.spawning_greenlet.join() self.assertEqual(2, worker.user_count) worker.quit() def test_worker_messages_sent_to_master(self): """ Ensure that worker includes both "user_count" and "user_classes_count" when reporting to the master. """ class MyUser(User): wait_time = constant(1) def start(self, group: Group): # We do this so that the spawning does not finish # too quickly gevent.sleep(0.1) return super().start(group) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyUser]) client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyUser": 10}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) sleep(0.6) self.assertEqual(STATE_SPAWNING, worker.state) worker.spawning_greenlet.join() self.assertEqual(10, worker.user_count) sleep(2) message = next((m for m in reversed(client.outbox) if m.type == "stats"), None) self.assertIsNotNone(message) self.assertIn("user_count", message.data) self.assertIn("user_classes_count", message.data) self.assertEqual(message.data["user_count"], 10) self.assertEqual(message.data["user_classes_count"]["MyUser"], 10) message = next((m for m in client.outbox if m.type == "spawning_complete"), None) self.assertIsNotNone(message) self.assertIn("user_count", message.data) self.assertIn("user_classes_count", message.data) self.assertEqual(message.data["user_count"], 10) self.assertEqual(message.data["user_classes_count"]["MyUser"], 10) worker.quit() def test_worker_heartbeat_messages_sent_to_master(self): """ Validate content of the heartbeat payload sent to the master. """ class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyUser]) t0 = time.perf_counter() while len([m for m in client.outbox if m.type == "heartbeat"]) == 0: self.assertLessEqual(time.perf_counter() - t0, 3) sleep(0.1) message = next(m for m in reversed(client.outbox) if m.type == "heartbeat") self.assertEqual(len(message.data), 3) self.assertIn("state", message.data) self.assertIn("current_cpu_usage", message.data) self.assertIn("current_memory_usage", message.data) worker.quit() def test_change_user_count_during_spawning(self): class MyUser(User): wait_time = constant(1) def start(self, group: Group): # We do this so that the spawning does not finish # too quickly gevent.sleep(0.1) return super().start(group) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyUser]) client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyUser": 10}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) sleep(0.6) self.assertEqual(STATE_SPAWNING, worker.state) client.mocked_send( Message( "spawn", { "timestamp": 1605538585, "user_classes_count": {"MyUser": 9}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) sleep(0) worker.spawning_greenlet.join() self.assertEqual(9, len(worker.user_greenlets)) worker.quit() def test_computed_properties(self): class MyUser1(User): wait_time = constant(1) @task def my_task(self): pass class MyUser2(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyUser1, MyUser2]) client.mocked_send( Message( "spawn", { "timestamp": 1605538584, "user_classes_count": {"MyUser1": 10, "MyUser2": 10}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) worker.spawning_greenlet.join() self.assertDictEqual(worker.user_classes_count, {"MyUser1": 10, "MyUser2": 10}) self.assertDictEqual(worker.target_user_classes_count, {"MyUser1": 10, "MyUser2": 10}) self.assertEqual(worker.target_user_count, 20) client.mocked_send( Message( "spawn", { "timestamp": 1605538585, "user_classes_count": {"MyUser1": 1, "MyUser2": 2}, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) worker.spawning_greenlet.join() self.assertDictEqual(worker.user_classes_count, {"MyUser1": 1, "MyUser2": 2}) self.assertDictEqual(worker.target_user_classes_count, {"MyUser1": 1, "MyUser2": 2}) self.assertEqual(worker.target_user_count, 3) worker.quit() def test_custom_message_send(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() worker = self.get_runner(environment=environment, user_classes=[MyUser]) client.outbox.clear() worker.send_message("test_custom_msg", {"test_data": 123}) self.assertEqual("test_custom_msg", client.outbox[0].type) self.assertEqual(123, client.outbox[0].data["test_data"]) worker.quit() def test_custom_message_receive(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() test_custom_msg = [False] test_custom_msg_data = [{}] def on_custom_msg(msg, **kw): test_custom_msg[0] = True test_custom_msg_data[0] = msg.data worker = self.get_runner(environment=environment, user_classes=[MyUser]) worker.register_message("test_custom_msg", on_custom_msg) client.mocked_send(Message("test_custom_msg", {"test_data": 123}, "dummy_client_id")) self.assertTrue(test_custom_msg[0]) self.assertEqual(123, test_custom_msg_data[0]["test_data"]) worker.quit() def test_undefined_custom_message_receive(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() test_custom_msg = [False] def on_custom_msg(msg, **kw): test_custom_msg[0] = True worker = self.get_runner(environment=environment, user_classes=[MyUser]) worker.register_message("test_custom_msg", on_custom_msg) client.mocked_send(Message("unregistered_custom_msg", {}, "dummy_id")) self.assertFalse(test_custom_msg[0]) self.assertEqual(1, len(self.mocked_log.warning)) msg = self.mocked_log.warning[0] self.assertIn("Unknown message type received", msg) def test_start_event(self): class MyTestUser(User): _test_state = 0 @task def the_task(self): MyTestUser._test_state = 1 gevent.sleep(0.2) MyTestUser._test_state = 2 with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() run_count = [0] @environment.events.test_start.add_listener def on_test_start(*args, **kw): run_count[0] += 1 worker = self.get_runner(environment=environment, user_classes=[MyTestUser]) self.assertEqual(1, len(client.outbox)) self.assertEqual("client_ready", client.outbox[0].type) client.mocked_send( Message( "spawn", { "timestamp": 1605538585, "user_classes_count": {"MyTestUser": 1}, "spawn_rate": 1, "num_users": 1, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) # wait for worker to spawn locusts self.assertIn("spawning", [m.type for m in client.outbox]) worker.spawning_greenlet.join() self.assertEqual(1, len(worker.user_greenlets)) self.assertEqual(1, run_count[0]) # check that locust has started running gevent.sleep(0.01) self.assertEqual(1, MyTestUser._test_state) # change number of users and check that test_start isn't fired again client.mocked_send( Message( "spawn", { "timestamp": 1605538586, "user_classes_count": {"MyTestUser": 1}, "spawn_rate": 1, "num_users": 1, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) self.assertEqual(1, run_count[0]) # stop and start to make sure test_start is fired again client.mocked_send(Message("stop", None, "dummy_client_id")) client.mocked_send( Message( "spawn", { "timestamp": 1605538587, "user_classes_count": {"MyTestUser": 1}, "spawn_rate": 1, "num_users": 1, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) gevent.sleep(0.01) self.assertEqual(2, run_count[0]) client.mocked_send(Message("stop", None, "dummy_client_id")) def test_stop_event(self): class MyTestUser(User): _test_state = 0 @task def the_task(self): MyTestUser._test_state = 1 gevent.sleep(0.2) MyTestUser._test_state = 2 with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: environment = Environment() run_count = [0] @environment.events.test_stop.add_listener def on_test_stop(*args, **kw): run_count[0] += 1 worker = self.get_runner(environment=environment, user_classes=[MyTestUser]) self.assertEqual(1, len(client.outbox)) self.assertEqual("client_ready", client.outbox[0].type) client.mocked_send( Message( "spawn", { "timestamp": 1605538585, "user_classes_count": {"MyTestUser": 1}, "spawn_rate": 1, "num_users": 1, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) # wait for worker to spawn locusts self.assertIn("spawning", [m.type for m in client.outbox]) worker.spawning_greenlet.join() self.assertEqual(1, len(worker.user_greenlets)) # check that locust has started running gevent.sleep(0.01) self.assertEqual(1, MyTestUser._test_state) # stop and make sure test_stop is fired client.mocked_send(Message("stop", None, "dummy_client_id")) gevent.sleep(0.01) self.assertEqual(1, run_count[0]) # stop while stopped and make sure the event isn't fired again client.mocked_send(Message("stop", None, "dummy_client_id")) gevent.sleep(0.01) self.assertEqual(1, run_count[0]) # start and stop to check that the event is fired again client.mocked_send( Message( "spawn", { "timestamp": 1605538586, "user_classes_count": {"MyTestUser": 1}, "spawn_rate": 1, "num_users": 1, "host": "", "stop_timeout": None, "parsed_options": {}, }, "dummy_client_id", ) ) client.mocked_send(Message("stop", None, "dummy_client_id")) gevent.sleep(0.01) self.assertEqual(2, run_count[0]) class TestMessageSerializing(unittest.TestCase): def test_message_serialize(self): msg = Message("client_ready", __version__, "my_id") rebuilt = Message.unserialize(msg.serialize()) self.assertEqual(msg.type, rebuilt.type) self.assertEqual(msg.data, rebuilt.data) self.assertEqual(msg.node_id, rebuilt.node_id) class TestStopTimeout(LocustTestCase): def test_stop_timeout(self): short_time = 0.05 class MyTaskSet(TaskSet): @task def my_task(self): MyTaskSet.state = "first" gevent.sleep(short_time) MyTaskSet.state = "second" # should only run when run time + stop_timeout is > short_time gevent.sleep(short_time) MyTaskSet.state = "third" # should only run when run time + stop_timeout is > short_time * 2 class MyTestUser(User): tasks = [MyTaskSet] environment = Environment(user_classes=[MyTestUser]) runner = environment.create_local_runner() runner.start(1, 1, wait=False) gevent.sleep(short_time / 2) runner.quit() self.assertEqual("first", MyTaskSet.state) # exit with timeout environment = Environment(user_classes=[MyTestUser], stop_timeout=short_time / 2) runner = environment.create_local_runner() runner.start(1, 1, wait=False) gevent.sleep(short_time) runner.quit() self.assertEqual("second", MyTaskSet.state) # allow task iteration to complete, with some margin environment = Environment(user_classes=[MyTestUser], stop_timeout=short_time * 3) runner = environment.create_local_runner() runner.start(1, 1, wait=False) gevent.sleep(short_time) timeout = gevent.Timeout(short_time * 2) timeout.start() try: runner.quit() runner.greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception. Some locusts must have kept running after iteration finish") finally: timeout.cancel() self.assertEqual("third", MyTaskSet.state) def test_stop_timeout_during_on_start(self): short_time = 0.05 class MyTaskSet(TaskSet): finished_on_start = False my_task_run = False def on_start(self): gevent.sleep(short_time) MyTaskSet.finished_on_start = True @task def my_task(self): MyTaskSet.my_task_run = True class MyTestUser(User): tasks = [MyTaskSet] environment = create_environment([MyTestUser], mocked_options()) environment.stop_timeout = short_time runner = environment.create_local_runner() runner.start(1, 1) gevent.sleep(short_time / 2) runner.quit() self.assertTrue(MyTaskSet.finished_on_start) self.assertFalse(MyTaskSet.my_task_run) def test_stop_timeout_exit_during_wait(self): short_time = 0.05 class MyTaskSet(TaskSet): @task def my_task(self): pass class MyTestUser(User): tasks = [MyTaskSet] wait_time = constant(1) environment = Environment(user_classes=[MyTestUser], stop_timeout=short_time) runner = environment.create_local_runner() runner.start(1, 1) gevent.sleep(short_time) # sleep to make sure locust has had time to start waiting timeout = gevent.Timeout(short_time) timeout.start() try: runner.quit() runner.greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception. Waiting locusts should stop immediately, even when using stop_timeout.") finally: timeout.cancel() def test_stop_timeout_with_interrupt(self): short_time = 0.05 class MySubTaskSet(TaskSet): @task def a_task(self): gevent.sleep(0) self.interrupt(reschedule=True) class MyTaskSet(TaskSet): tasks = [MySubTaskSet] class MyTestUser(User): tasks = [MyTaskSet] environment = create_environment([MyTestUser], mocked_options()) environment.stop_timeout = short_time runner = environment.create_local_runner() runner.start(1, 1, wait=True) gevent.sleep(0) timeout = gevent.Timeout(short_time) timeout.start() try: runner.quit() runner.greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception. Interrupted locusts should exit immediately during stop_timeout.") finally: timeout.cancel() def test_stop_timeout_with_interrupt_no_reschedule(self): state = [0] class MySubTaskSet(TaskSet): @task def a_task(self): gevent.sleep(0.1) state[0] = 1 self.interrupt(reschedule=False) class MyTestUser(User): tasks = [MySubTaskSet] wait_time = constant(3) environment = create_environment([MyTestUser], mocked_options()) environment.stop_timeout = 0.3 runner = environment.create_local_runner() runner.start(1, 1, wait=True) gevent.sleep(0) timeout = gevent.Timeout(0.11) timeout.start() try: runner.quit() runner.greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception. Interrupted locusts should exit immediately during stop_timeout.") finally: timeout.cancel() self.assertEqual(1, state[0]) def test_kill_locusts_with_stop_timeout(self): short_time = 0.05 class MyTaskSet(TaskSet): @task def my_task(self): MyTaskSet.state = "first" gevent.sleep(short_time) MyTaskSet.state = "second" # should only run when run time + stop_timeout is > short_time gevent.sleep(short_time) MyTaskSet.state = "third" # should only run when run time + stop_timeout is > short_time * 2 class MyTestUser(User): tasks = [MyTaskSet] environment = create_environment([MyTestUser], mocked_options()) runner = environment.create_local_runner() runner.start(1, 1) gevent.sleep(short_time / 2) runner.stop_users({MyTestUser.__name__: 1}) self.assertEqual("first", MyTaskSet.state) runner.quit() environment.runner = None environment.stop_timeout = short_time / 2 # exit with timeout runner = environment.create_local_runner() runner.start(1, 1) gevent.sleep(short_time) runner.stop_users({MyTestUser.__name__: 1}) self.assertEqual("second", MyTaskSet.state) runner.quit() environment.runner = None environment.stop_timeout = short_time * 3 # allow task iteration to complete, with some margin runner = environment.create_local_runner() runner.start(1, 1) gevent.sleep(short_time) timeout = gevent.Timeout(short_time * 2) timeout.start() try: runner.stop_users({MyTestUser.__name__: 1}) runner.user_greenlets.join() except gevent.Timeout: self.fail("Got Timeout exception. Some locusts must have kept running after iteration finish") finally: timeout.cancel() self.assertEqual("third", MyTaskSet.state) def test_users_can_call_runner_quit_with_stop_timeout(self): class BaseUser(User): wait_time = constant(1) @task def trigger(self): self.environment.runner.quit() runner = Environment(user_classes=[BaseUser]).create_local_runner() runner.environment.stop_timeout = 1 runner.spawn_users({BaseUser.__name__: 1}, wait=False) timeout = gevent.Timeout(0.5) timeout.start() try: runner.greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception, runner must have hung somehow.") finally: timeout.cancel() def test_gracefully_handle_exceptions_in_listener(self): class MyUser(User): wait_time = constant(1) @task def my_task(self): pass test_stop_run = [0] environment = Environment(user_classes=[MyUser]) def on_test_stop_ok(*args, **kwargs): test_stop_run[0] += 1 def on_test_stop_fail(*args, **kwargs): assert False environment.events.test_stop.add_listener(on_test_stop_ok) environment.events.test_stop.add_listener(on_test_stop_fail) environment.events.test_stop.add_listener(on_test_stop_ok) runner = LocalRunner(environment) runner.start(user_count=3, spawn_rate=3, wait=False) self.assertEqual(0, test_stop_run[0]) runner.stop() self.assertEqual(2, test_stop_run[0]) def test_stop_timeout_with_ramp_down(self): """ The spawn rate does not have an effect on the rate at which the users are stopped. It is expected that the excess users will be stopped as soon as possible in parallel (while respecting the stop_timeout). """ class MyTaskSet(TaskSet): @task def my_task(self): gevent.sleep(1) class MyTestUser(User): tasks = [MyTaskSet] environment = Environment(user_classes=[MyTestUser], stop_timeout=2) runner = environment.create_local_runner() # Start load test, wait for users to start, then trigger ramp down ts = time.perf_counter() runner.start(10, 10, wait=False) runner.spawning_greenlet.join() delta = time.perf_counter() - ts self.assertTrue( 0 <= delta <= 0.05, f"Expected user count to increase to 10 instantaneously, instead it took {delta:f}" ) self.assertTrue( runner.user_count == 10, "User count has not decreased correctly to 2, it is : %i" % runner.user_count ) ts = time.perf_counter() runner.start(2, 4, wait=False) runner.spawning_greenlet.join() delta = time.perf_counter() - ts self.assertTrue(2 <= delta <= 2.05, f"Expected user count to decrease to 2 in 2s, instead it took {delta:f}") self.assertTrue( runner.user_count == 2, "User count has not decreased correctly to 2, it is : %i" % runner.user_count )
{ "content_hash": "ebdf71a4418a04464adfc6ba0c93f556", "timestamp": "", "source": "github", "line_count": 3806, "max_line_length": 167, "avg_line_length": 37.48003152916448, "alnum_prop": 0.5385877223114077, "repo_name": "mbeacom/locust", "id": "204726c86f311efc700ebfe6e6b323d72272fdbe", "size": "142649", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "locust/test/test_runners.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "732" }, { "name": "HTML", "bytes": "30187" }, { "name": "JavaScript", "bytes": "17229" }, { "name": "Makefile", "bytes": "436" }, { "name": "Python", "bytes": "809070" }, { "name": "Sass", "bytes": "10379" }, { "name": "Shell", "bytes": "3452" } ], "symlink_target": "" }
import xml.etree.ElementTree as ET import requests from requests.exceptions import ConnectionError from xml.dom import minidom from . import get_resources_dir, get_user_dir import os from glob import glob from tqdm import tqdm import logging CHUNK_SIZE = 1 * 1024 * 1024 # 5 MB backend_name = "GMQL-PythonAPI" BACKEND_URL = "https://github.com/DEIB-GECO/PyGMQL/releases/download/backend-stable/GMQL-PythonAPI-1.0-main-SNAPSHOT-jackofall.jar" class DependencyManager: def __init__(self): backend_jar_path = glob(os.path.join(get_user_dir(), "{}*.jar".format(backend_name))) backend_jar_path = backend_jar_path[0] if len(backend_jar_path) == 1 else None self.backend_jar_path = backend_jar_path def is_backend_present(self): return self.backend_jar_path is not None def resolve_dependencies(self): if not self.is_backend_present(): backend_jar_path = os.path.join(get_user_dir(), BACKEND_URL.split("/")[-1]) self.download_from_location(BACKEND_URL, backend_jar_path) self.backend_jar_path = backend_jar_path return self.backend_jar_path @staticmethod def download_from_location(location, output_path): r = requests.get(location, stream=True) total_size = int(r.headers.get("content-length", 0)) with open(output_path, "wb") as f: for data in tqdm(r.iter_content(chunk_size=CHUNK_SIZE), total=int(total_size/CHUNK_SIZE), unit="M"): f.write(data) @staticmethod def find_package(repo, repo_name, groupId, artifactId, version, classifier=None): first_part_url = "/".join(repo.split("/")[:3]) + "/" query_url = first_part_url + "service/local/artifact/maven/resolve?" query_url += "g={}&".format(groupId) query_url += "a={}&".format(artifactId) query_url += "v={}&".format(version) query_url += "r={}".format(repo_name) if classifier is not None: query_url += "&c={}".format(classifier) resp_text = requests.get(query_url).text resp = DependencyManager._parse_dependency_info_fromstring(resp_text) location = repo + resp['repositoryPath'] return location @staticmethod def _parse_dependency_info_fromstring(s): tree = ET.ElementTree(ET.fromstring(s)) return DependencyManager.__parse_dependency_info_from_tree(tree) @staticmethod def __parse_dependency_info_from_tree(tree): root = tree.getroot() data = root.find("data") res = {} for d in data: tag = d.tag text = d.text try: n = int(text) text = n except ValueError: pass res[tag] = text return res # class DependencyManager_OLD: # def __init__(self): # self.logger = logging.getLogger() # self.dependency_file_path = os.path.join(get_resources_dir(), "dependencies.xml") # self.repo_name, self.repo_url, self.deps = self._parse_dependency_file(self.dependency_file_path) # self.backend_info_file = os.path.join(get_user_dir(), "dependencies_info.xml") # backend_info = None # if os.path.isfile(self.backend_info_file): # backend_info = self._parse_dependency_info(self.backend_info_file) # self.backend_info = backend_info # backend_jar_path = glob(os.path.join(get_user_dir(), "{}*.jar".format(backend_name))) # backend_jar_path = backend_jar_path[0] if len(backend_jar_path) == 1 else None # self.backend_jar_path = backend_jar_path # def is_backend_present(self): # return self.backend_info is not None # def is_connection_on(self): # pass # @staticmethod # def _parse_dependency_file(path): # tree = ET.parse(path) # root = tree.getroot() # repository = root.find("repository") # repo_name = repository.find("name").text # repo_url = repository.find("url").text # if repo_url[-1] == '/': # repo_url = repo_url[:-1] # deps = [] # dependencies = root.find("dependencies") # if dependencies is not None: # for d in dependencies: # groupId = d.find("groupId").text # mandatory # artifactId = d.find("artifactId").text # mandatory # version = d.find("version").text # mandatory # dd = { # 'groupId': groupId, # "artifactId": artifactId, # "version": version # } # classifier = d.find("classifier") # optional # if classifier is not None: # classifier = classifier.text # dd['classifier'] = classifier # deps.append(dd) # return repo_name, repo_url, deps # @staticmethod # def _parse_dependency_info(path): # tree = ET.parse(path) # return DependencyManager.__parse_dependency_info_from_tree(tree) # @staticmethod # def _parse_dependency_info_fromstring(s): # tree = ET.ElementTree(ET.fromstring(s)) # return DependencyManager.__parse_dependency_info_from_tree(tree) # @staticmethod # def __parse_dependency_info_from_tree(tree): # root = tree.getroot() # data = root.find("data") # res = {} # for d in data: # tag = d.tag # text = d.text # try: # n = int(text) # text = n # except ValueError: # pass # res[tag] = text # return res # @staticmethod # def find_package(repo, repo_name, groupId, artifactId, version, classifier=None): # first_part_url = "/".join(repo.split("/")[:3]) + "/" # query_url = first_part_url + "service/local/artifact/maven/resolve?" # query_url += "g={}&".format(groupId) # query_url += "a={}&".format(artifactId) # query_url += "v={}&".format(version) # query_url += "r={}".format(repo_name) # if classifier is not None: # query_url += "&c={}".format(classifier) # resp_text = requests.get(query_url).text # resp = DependencyManager._parse_dependency_info_fromstring(resp_text) # location = repo + resp['repositoryPath'] # return location # def resolve_dependencies(self): # first_part_url = "/".join(self.repo_url.split("/")[:3]) # query_url = first_part_url + "/service/local/artifact/maven/resolve?" # for d in self.deps: # query_url += "g={}&".format(d['groupId']) # query_url += "a={}&".format(d['artifactId']) # query_url += "v={}&".format(d['version']) # query_url += "r={}".format(self.repo_name) # if "classifier" in d.keys(): # query_url += "&c={}".format(d['classifier']) # try: # resp_text = requests.get(query_url).text # except ConnectionError: # if self.is_backend_present(): # return self.backend_jar_path # else: # raise ValueError("Unable to connect to repository to retrieve GMQL backend. Check your connection") # resp = self._parse_dependency_info_fromstring(resp_text) # location = self.repo_url + resp['repositoryPath'] # output_path = os.path.join(get_user_dir(), resp['repositoryPath'].split("/")[-1]) # if not self.is_backend_present(): # self.logger.info("Downloading backend") # # there is no backend (first start) # self.download_from_location(location, output_path) # self._save_dependency(resp_text) # elif self.repo_name == 'snapshots' and \ # self.backend_info['snapshot'] == 'true': # # we have a snapshot backend and we are pulling a snapshot # current_timestamp = self.backend_info['snapshotTimeStamp'] # retrieved_timestamp = resp['snapshotTimeStamp'] # if current_timestamp < retrieved_timestamp: # # we are using an outdated backend # self.logger.info("Updating backend to latest version") # self.__set_backend(location, output_path, resp_text) # elif self.repo_name == 'releases': # # If we need a release, it always wins # if self.backend_info['snapshot'] == 'true': # self.logger.info("Updating backend") # self.__set_backend(location, output_path, resp_text) # else: # current_version = float(self.backend_info['version']) # retrieved_version = float(resp['version']) # current_classifier = self.backend_info.get("classifier") # retrieved_classifier = resp.get("classifier") # if (current_version != retrieved_version) or (current_classifier != retrieved_classifier): # # the versions do not match # self.logger.info("Updating backend") # self.__set_backend(location, output_path, resp_text) # else: # raise NotImplementedError("Need to implement the backend download in the case" # " of releases!!!!") # return output_path # def __set_backend(self, location, output_path, resp_text): # self._delete_current_backend() # self.download_from_location(location, output_path) # self._save_dependency(resp_text) # @staticmethod # def _save_dependency(resp): # resp_nice = minidom.parseString(resp) # resp_nice = resp_nice.toprettyxml() # with open(os.path.join(get_user_dir(), "dependencies_info.xml"), "w") as f: # f.write(resp_nice) # @staticmethod # def download_from_location(location, output_path): # r = requests.get(location, stream=True) # total_size = int(r.headers.get("content-length", 0)) # with open(output_path, "wb") as f: # for data in tqdm(r.iter_content(chunk_size=CHUNK_SIZE), total=int(total_size/CHUNK_SIZE), unit="M"): # f.write(data) # def _delete_current_backend(self): # # search for the only jar file in the resources path # os.remove(self.backend_jar_path) # os.remove(self.backend_info_file)
{ "content_hash": "e742b7f5df6649a9bf8c17a493818dfb", "timestamp": "", "source": "github", "line_count": 248, "max_line_length": 131, "avg_line_length": 43.04435483870968, "alnum_prop": 0.5613114754098361, "repo_name": "DEIB-GECO/PyGMQL", "id": "66614daa84459e58ad021a1033a567366eb452af", "size": "10675", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gmql/FileManagment/DependencyManager.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "40" }, { "name": "Dockerfile", "bytes": "777" }, { "name": "Python", "bytes": "283267" }, { "name": "Shell", "bytes": "1272" } ], "symlink_target": "" }
from test_support import verify, vereq, verbose, TestFailed from types import ModuleType as module # An uninitialized module has no __dict__ or __name__, and __doc__ is None foo = module.__new__(module) verify(foo.__dict__ is None) try: s = foo.__name__ except AttributeError: pass else: raise TestFailed, "__name__ = %s" % repr(s) # __doc__ is None by default in CPython but not in Jython. # We're not worrying about that now. #vereq(foo.__doc__, module.__doc__) try: foo_dir = dir(foo) except TypeError: pass else: raise TestFailed, "__dict__ = %s" % repr(foo_dir) try: del foo.somename except AttributeError: pass else: raise TestFailed, "del foo.somename" try: del foo.__dict__ except TypeError: pass else: raise TestFailed, "del foo.__dict__" try: foo.__dict__ = {} except TypeError: pass else: raise TestFailed, "foo.__dict__ = {}" verify(foo.__dict__ is None) # Regularly initialized module, no docstring foo = module("foo") vereq(foo.__name__, "foo") vereq(foo.__doc__, None) vereq(foo.__dict__, {"__name__": "foo", "__doc__": None}) # ASCII docstring foo = module("foo", "foodoc") vereq(foo.__name__, "foo") vereq(foo.__doc__, "foodoc") vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc"}) # Unicode docstring foo = module("foo", u"foodoc\u1234") vereq(foo.__name__, "foo") vereq(foo.__doc__, u"foodoc\u1234") vereq(foo.__dict__, {"__name__": "foo", "__doc__": u"foodoc\u1234"}) # Reinitialization should not replace the __dict__ foo.bar = 42 d = foo.__dict__ foo.__init__("foo", "foodoc") vereq(foo.__name__, "foo") vereq(foo.__doc__, "foodoc") vereq(foo.bar, 42) vereq(foo.__dict__, {"__name__": "foo", "__doc__": "foodoc", "bar": 42}) verify(foo.__dict__ is d) if verbose: print "All OK"
{ "content_hash": "9f6e47d466e73493d58fd849bb743eb5", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 74, "avg_line_length": 23.786666666666665, "alnum_prop": 0.6104260089686099, "repo_name": "mariaantoanelam/Licenta", "id": "8a86ffb542a7d2594544d5e5a3d82bd4a51fdb6b", "size": "1808", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "Lib/test/test_module.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "31035" }, { "name": "HTML", "bytes": "134311" }, { "name": "Java", "bytes": "161404" }, { "name": "JavaScript", "bytes": "11470" }, { "name": "Python", "bytes": "4053763" } ], "symlink_target": "" }
from functools import partial from Util.constant import KickForce from Util.role import Role from ai.STA.Strategy.strategy import Strategy from ai.STA.Tactic.go_kick import GoKick from ai.STA.Tactic.goalkeeper import GoalKeeper from ai.STA.Tactic.stop import Stop from ai.STA.Tactic.tactic_constants import Flags # noinspection PyMethodMayBeStatic,PyMethodMayBeStatic class TestGoalKeeper(Strategy): def __init__(self, p_game_state): super().__init__(p_game_state) our_goal = self.game_state.field.our_goal_pose self.create_node(Role.GOALKEEPER, GoalKeeper(self.game_state, self.assigned_roles[Role.GOALKEEPER])) attacker = self.assigned_roles[Role.FIRST_ATTACK] node_idle = self.create_node(Role.FIRST_ATTACK, Stop(self.game_state, attacker)) node_go_kick = self.create_node(Role.FIRST_ATTACK, GoKick(self.game_state, attacker, target=our_goal, kick_force=KickForce.HIGH, forbidden_areas=self.game_state.field.border_limits)) player_has_kicked = partial(self.has_kicked, Role.FIRST_ATTACK) node_idle.connect_to(node_go_kick, when=self.ball_is_outside_goal) node_go_kick.connect_to(node_idle, when=self.ball_is_inside_goal) node_go_kick.connect_to(node_go_kick, when=player_has_kicked) @classmethod def required_roles(cls): return [Role.GOALKEEPER, Role.FIRST_ATTACK] def has_kicked(self, role): return self.roles_graph[role].current_tactic.status_flag == Flags.SUCCESS def ball_is_outside_goal(self): return not self.ball_is_inside_goal() def ball_is_inside_goal(self): return self.game_state.field.our_goal_area.point_inside(self.game_state.ball_position) \ or self.game_state.field.field_length / 2 < self.game_state.ball_position.x
{ "content_hash": "9cc58498f03e446bac59a71aca9cf022", "timestamp": "", "source": "github", "line_count": 51, "max_line_length": 119, "avg_line_length": 41.23529411764706, "alnum_prop": 0.6153114598193058, "repo_name": "RoboCupULaval/StrategyIA", "id": "3cb7eac907a999e932c54f37a27c866f5965d537", "size": "2140", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "ai/STA/Strategy/test_goal_keeper.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "528666" }, { "name": "Shell", "bytes": "2438" } ], "symlink_target": "" }
""" Copyright 2013 Steven Diamond Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cvxpy.constraints.exponential import ExpCone, OpRelConeQuad, RelEntrQuad from cvxpy.constraints.finite_set import FiniteSet from cvxpy.constraints.nonpos import Inequality, NonNeg, NonPos from cvxpy.constraints.power import PowCone3D, PowConeND from cvxpy.constraints.psd import PSD from cvxpy.constraints.second_order import SOC from cvxpy.constraints.zero import Equality, Zero
{ "content_hash": "85f3834a9ebb887b646b3dca1b86ae54", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 77, "avg_line_length": 41.26086956521739, "alnum_prop": 0.8124341412012644, "repo_name": "merraksh/cvxpy", "id": "7fb86b5170565991c5b1b6a46caf06502408c6a1", "size": "949", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cvxpy/constraints/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "120010" }, { "name": "C++", "bytes": "5687983" }, { "name": "CMake", "bytes": "694" }, { "name": "Makefile", "bytes": "6320" }, { "name": "Python", "bytes": "2149670" }, { "name": "SWIG", "bytes": "2403" }, { "name": "Shell", "bytes": "3117" } ], "symlink_target": "" }
import functools import hashlib import logging import os import subprocess import requests from toolbox.config import CRP_DEPLOY_HOOK, CRP_DEPLOY_TOKEN, DOWNLOADABLE_FILES_BUCKET, REPO_OWNER from .utils import fatal_error, run_cmd def _hash_directory(path): digest = hashlib.sha1() for root, _, files in os.walk(path): for names in files: file_path = os.path.join(root, names) if os.path.isfile(file_path): with open(file_path, 'rb') as f: while True: buf = f.read(1024 * 1024) if not buf: break digest.update(buf) return digest.hexdigest() @functools.lru_cache(maxsize=8) def _get_downloads_path_prefix(repo_path: str, repo_name: str, repo_branch: str) -> str: challenge_key_hash = hashlib.sha1('/'.join((REPO_OWNER, repo_name, repo_branch)).encode('utf-8')).hexdigest() downloads_hash = _hash_directory(os.path.join(repo_path, 'downloads')) return '/'.join((challenge_key_hash, downloads_hash)) def list_auto_downloadable_files(repo_path: str, repo_name: str, repo_branch: str) -> list: downloads_path = os.path.join(repo_path, 'downloads') result = [] for root, _, files in os.walk(downloads_path): prefix = _get_downloads_path_prefix(repo_path, repo_name, repo_branch) for filename in files: relpath = os.path.relpath(os.path.join(root, filename), downloads_path) result.append('/'.join((prefix, relpath))) return result def upload_files(repo_path: str, repo_name: str, repo_branch: str): downloads_path = os.path.join(repo_path, 'downloads') if not list(os.walk(downloads_path)): return challenge_key_prefix, downloads_hash = _get_downloads_path_prefix(repo_path, repo_name, repo_branch).split('/') gs_parent_path = '/'.join((DOWNLOADABLE_FILES_BUCKET, challenge_key_prefix)) gs_target_path = '/'.join((gs_parent_path, downloads_hash)) try: ls_current = run_cmd(['gsutil', 'ls', gs_parent_path], raise_errors=True, check_output=True) current_content = list(map(lambda s: s.rstrip('/'), ls_current.decode('utf-8').rstrip().split('\n'))) except subprocess.CalledProcessError: current_content = [] if gs_target_path not in current_content: run_cmd(['gsutil', '-h', 'Content-Disposition: attachment', 'cp', '-r', downloads_path, gs_target_path]) if current_content: run_cmd(['gsutil', 'rm', '-r', *current_content]) def update_hook(repo_path: str, repo_name: str, repo_branch: str, config: dict): if not CRP_DEPLOY_HOOK or not CRP_DEPLOY_TOKEN: fatal_error('CRP_DEPLOY_HOOK/CRP_DEPLOY_TOKEN must be set!') # config['downloads'] is manual, whilst list_auto_downloadable_files are uploaded to Avatao. files = config.get('downloads', []) + list_auto_downloadable_files(repo_path, repo_name, repo_branch) payload = { # Challenge Key 'repo_owner': REPO_OWNER, 'repo_name': repo_name, 'version': repo_branch, # Challenge Config 'config': config, 'files': files, } logging.debug('Sending update hook to %s ...', CRP_DEPLOY_HOOK) logging.debug(payload) response = requests.post( url=CRP_DEPLOY_HOOK, json=payload, headers={ 'Content-Type': 'application/json', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0', 'X-Avatao-Token': CRP_DEPLOY_TOKEN, }) if response.status_code not in (200, 204): fatal_error('%d %s: %s', response.status_code, response.reason, response.content) if response.status_code == 200: logging.debug(response.content)
{ "content_hash": "a038a2cd581b49d02eb5c954ffe38b87", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 115, "avg_line_length": 37.83168316831683, "alnum_prop": 0.6241821512693012, "repo_name": "avatao-content/challenge-toolbox", "id": "a0c71b5cc1ca728700f010006c0479e2336c75bc", "size": "3821", "binary": false, "copies": "1", "ref": "refs/heads/v3", "path": "toolbox/utils/deploy.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "59085" }, { "name": "C#", "bytes": "3300" }, { "name": "C++", "bytes": "3059" }, { "name": "CMake", "bytes": "10789" }, { "name": "CSS", "bytes": "3865" }, { "name": "Dockerfile", "bytes": "8735" }, { "name": "HTML", "bytes": "4518" }, { "name": "Hack", "bytes": "1479" }, { "name": "Java", "bytes": "1896" }, { "name": "JavaScript", "bytes": "640" }, { "name": "PHP", "bytes": "14005" }, { "name": "Python", "bytes": "81584" }, { "name": "Shell", "bytes": "2420" } ], "symlink_target": "" }
import numpy as np import operator import matplotlib import matplotlib.pyplot as plt from dircache import listdir def createDataSet(): """ create test case """ group = np.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]]) labels = ['A', 'B', 'A', 'B'] return group, labels def classify0(inX, dataSet, labels, k): """ k-nn clasification inX: the input vector to classify, e.g. [1, 2, 3] dataSet: full matrix of training examples labels: a vector of labels """ dataSetSize = dataSet.shape[0] # compute distances # create n * 1 matrix with inX diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet sqDiffMat = diffMat ** 2 # sum all column values together sqDistances = sqDiffMat.sum(axis=1) distances = sqDistances ** 0.5 # sort array and return sorted indexes sortedDistIndicies = distances.argsort() classCount = {} # choose k nodes with minimum distance for i in range(k): voteIlabel = labels[sortedDistIndicies[i]] classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1 # sort on dict.iteritems by [1] sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] def file2matrix(filename): """ parsing data from a text """ fr = open(filename) arraryOlines = fr.readlines() numberOfLines = len(arraryOlines) # create n * 3 array returnMat = np.zeros((numberOfLines, 3)) classLabelVector = [] index = 0 for line in arraryOlines: line = line.strip() listFromLine = line.split('\t') # read the first 3 columns returnMat[index, :] = listFromLine[0:3] # read the labels classLabelVector.append(int(listFromLine[-1])) index += 1 return returnMat, classLabelVector def autoNorm(dataSet): """ normalized the values based on the total range """ minVals = dataSet.min(0) maxVals = dataSet.max(0) ranges = maxVals - minVals normDataSet = np.zeros(np.shape(dataSet)) m = dataSet.shape[0] normDataSet = dataSet - np.tile(minVals, (m, 1)) normDataSet = normDataSet / np.tile(ranges, (m, 1)) return normDataSet, ranges, minVals def dataingClassTest(): """ knn on 'datingTestSet2.txt' """ hoRatio = 0.10 datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') # normalized data normMat, ranges, minVals = autoNorm(datingDataMat) m = normMat.shape[0] # compute size numTestVecs = int(m * hoRatio) errorCount = 0.0 # train with 90% and test with 10% for i in range(numTestVecs): classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3) print "the classifer came back with: %d, the real answer is: %d " % (classifierResult, datingLabels[i]) if (classifierResult != datingLabels[i]): errorCount += 1.0 print "the total error rate is: %f" % (errorCount / float(numTestVecs)) print '#' * 30 def img2vector(filename): returnVect = np.zeros((1, 1024)) fr = open(filename) for i in range(32): lineStr = fr.readline() for j in range(32): returnVect[0, 32 * i + j] = int(lineStr[j]) return returnVect def handwritingClassTest(): """ knn on digits """ hwLabels = [] trainingFileList = listdir('trainingDigits') m = len(trainingFileList) trainMat = np.zeros((m, 1024)) for i in range(m): fileNameStr = trainingFileList[i] fileStr = fileNameStr.split('.')[0] classNumStr = int(fileStr.split('_')[0]) hwLabels.append(classNumStr) trainMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr) testFileList = listdir('testDigits') errorCount = 0.0 mTest = len(testFileList) for i in range(mTest): fileNameStr = testFileList[i] fileStr = fileNameStr.split('.')[0] classNumStr = int(fileStr.split('_')[0]) vectorUnderTest = img2vector('testDigits/%s' % fileNameStr) classifierResult = classify0(vectorUnderTest, trainMat, hwLabels, 3) print "the classifier came back with: %d, the real answer is: %d"\ % (classifierResult, classNumStr) if classifierResult != classNumStr: errorCount += 1.0 print "the total error rate is: %f" % (errorCount / float(mTest)) print '#' * 30 def plot_result(datingDataMat, datingLabels): """ plot the dataset with labels on x=[1] y=[2] """ fig = plt.figure() ax = fig.add_subplot(111) # ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2]) # plot with different size and color for diferent labes # 15 * means multiple 15 to all label values ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2], 15.0 * np.array(datingLabels), 15.0 * np.array(datingLabels)) plt.show() if __name__ == '__main__': # example 1 # group, labels = createDataSet() # print classify0([0, 0], group, labels, 3) # example 2 datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') plot_result(datingDataMat, datingLabels) # dataingClassTest() # handwritingClassTest()
{ "content_hash": "389756b0015a09472d31a1f5cb86ddc7", "timestamp": "", "source": "github", "line_count": 168, "max_line_length": 111, "avg_line_length": 31.68452380952381, "alnum_prop": 0.6210783392823596, "repo_name": "qiyuangong/Machine_Learning_in_Action_QYG", "id": "38d4827d6dbd11097fa1e19f52b5613e0dc6101e", "size": "5323", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "02_kNN/kNN.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "446692" }, { "name": "Python", "bytes": "116471" } ], "symlink_target": "" }
from django.conf.urls import patterns, url urlpatterns = patterns('mysite.apps.base.views', (r'^/?$', 'homepage'), )
{ "content_hash": "d38fe50dabba756d6bf9f091bac04152", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 48, "avg_line_length": 28.4, "alnum_prop": 0.5774647887323944, "repo_name": "trueship/oauth2app", "id": "48ac82400ccd41d0ac031c5f21899d35380d5685", "size": "165", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "examples/mysite/apps/base/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "42633" }, { "name": "HTML", "bytes": "12381" }, { "name": "JavaScript", "bytes": "27490" }, { "name": "Python", "bytes": "128432" } ], "symlink_target": "" }
from collections import defaultdict import logging from multiprocessing import Manager, Lock, Process, Queue import signal import time class Machine(object): """ Implements a state machine The life cycle of a machine can be described as follows:: 1. A machine instance is created and configured:: a_bot = ShellBot(...) machine = Machine(bot=a_bot) machine.set(states=states, transitions=transitions, ... 2. The machine is switched on and ticked at regular intervals:: machine.start() 3. Machine can process more events than ticks:: machine.execute('hello world') 4. When a machine is expecting data from the chat space, it listens from the ``fan`` queue used by the shell:: engine.fan.put('special command') 5. When the machine is coming end of life, resources can be disposed:: machine.stop() credit: Alex Bertsch <[email protected]> securitybot/state_machine.py """ DEFER_DURATION = 0.0 # time to pause before working, in seconds TICK_DURATION = 0.2 # time to wait between ticks, in seconds def __init__(self, bot=None, states=None, transitions=None, initial=None, during=None, on_enter=None, on_exit=None, **kwargs): """ Implements a state machine :param bot: the bot linked to this machine :type : ShellBot :param states: All states supported by this machine :type states: list of str :param transitions: Transitions between states. Each transition is a dictionary. Each dictionary must feature following keys: source (str): The source state of the transition target (str): The target state of the transition Each dictionary may contain following keys: condition (function): A condition that must be true for the transition to occur. If no condition is provided then the state machine will transition on a step. action (function): A function to be executed while the transition occurs. :type transitions: list of dict :param initial: The initial state :type initial: str :param during: A mapping of states to functions to execute while in that state. Each key should map to a callable function. :type during: dict :param on_enter: A mapping of states to functions to execute when entering that state. Each key should map to a callable function. :type on_enter: dict :param on_exit: A mapping of states to functions to execute when exiting that state. Each key should map to a callable function. :type on_exit: dict Example:: machine = Machine(bot=bot) """ self.bot = bot self.lock = Lock() # prevent Manager() process to be interrupted handler = signal.signal(signal.SIGINT, signal.SIG_IGN) self.mutables = Manager().dict() # restore current handler for the rest of the program signal.signal(signal.SIGINT, handler) self.mixer = Queue() self.on_init(**kwargs) if states: self.build(states, transitions, initial, during, on_enter, on_exit) def on_init(self, **kwargs): """ Adds to machine initialisation This function should be expanded in sub-class, where necessary. Example:: def on_init(self, prefix='my.machine', **kwargs): ... """ pass def get(self, key, default=None): """ Retrieves the value of one key :param key: one attribute of this state machine instance :type key: str :param default: default value is the attribute has not been set yet :type default: an type that can be serialized This function can be used across multiple processes, so that a consistent view of the state machine is provided. """ with self.lock: value = self.mutables.get(key, default) if value is not None: return value return default def set(self, key, value): """ Remembers the value of one key :param key: one attribute of this state machine instance :type key: str :param value: new value of the attribute :type value: an type that can be serialized This function can be used across multiple processes, so that a consistent view of the state machine is provided. """ with self.lock: self.mutables[key] = value def build(self, states, transitions, initial, during=None, on_enter=None, on_exit=None): """ Builds a complete state machine :param states: All states supported by this machine :type states: list of str :param transitions: Transitions between states. Each transition is a dictionary. Each dictionary must feature following keys: source (str): The source state of the transition target (str): The target state of the transition Each dictionary may contain following keys: condition (function): A condition that must be true for the transition to occur. If no condition is provided then the state machine will transition on a step. action (function): A function to be executed while the transition occurs. :type transitions: list of dict :param initial: The initial state :type initial: str :param during: A mapping of states to functions to execute while in that state. Each key should map to a callable function. :type during: dict :param on_enter: A mapping of states to functions to execute when entering that state. Each key should map to a callable function. :type on_enter: dict :param on_exit: A mapping of states to functions to execute when exiting that state. Each key should map to a callable function. :type on_exit: dict """ if during is None: during = {} if on_enter is None: on_enter = {} if on_exit is None: on_exit = {} states = sorted(list(set(states))) self._states = dict() for state in states: self._states[state] = State(state, during.get(state, None), on_enter.get(state, None), on_exit.get(state, None)) try: self.mutables['initial_state'] = self._states[initial].name self.mutables['state'] = self.mutables['initial_state'] except KeyError: raise ValueError(u'Invalid initial state {}'.format(initial)) self._transitions = defaultdict(list) for transition in transitions: try: source_state = self._states[transition['source']] except KeyError: if 'source' not in transition: raise ValueError(u'Missing source state') else: raise ValueError(u'Invalid source state {}'.format( transition['source'])) try: target_state = self._states[transition['target']] except KeyError: if 'target' not in transition: raise ValueError(u'Missing target state') else: raise ValueError(u'Invalid target state {}'.format( transition['target'])) item = Transition(source_state, target_state, transition.get('condition', None), transition.get('action', None)) self._transitions[transition['source']].append(item) def state(self, name): """ Provides a state by name :param name: The label of the target state :type name: str :return: State This function raises KeyError if an unknown name is provided. """ return self._states[name] @property def current_state(self): """ Provides current state :return: State This function raises AttributeError if it is called before ``build()``. """ try: name = self.mutables['state'] except KeyError: raise AttributeError('Machine has not been built') return self._states[name] def reset(self): """ Resets a state machine before it is restarted :return: True if the machine has been actually reset, else False This function moves a state machine back to its initial state. A typical use case is when you have to recycle a state machine multiple times, like in the following example:: if new_cycle(): machine.reset() machine.start() If the machine is running, calling ``reset()`` will have no effect and you will get False in return. Therefore, if you have to force a reset, you may have to stop the machine first. Example of forced reset:: machine.stop() machine.reset() """ if self.is_running: logging.warning(u"Cannot reset a running state machine") return False # purge the mixer queue while not self.mixer.empty(): self.mixer.get() # restore initial state self.set('state', self.get('initial_state')) logging.warning(u"Resetting machine to '{}'".format( self.current_state.name)) # do the rest self.on_reset() return True def on_reset(self): """ Adds processing to machine reset This function should be expanded in sub-class, where necessary. Example:: def on_reset(self): self.sub_machine.reset() """ pass def step(self, **kwargs): """ Brings some life to the state machine Thanks to ``**kwargs``, it is easy to transmit parameters to underlying functions: - ``current_state.during(**kwargs)`` - ``transition.condition(**kwargs)`` Since parameters can vary on complex state machines, you are advised to pay specific attention to the signatures of related functions. If you expect some parameter in a function, use ``kwargs.get()``to get its value safely. For example, to inject the value of a gauge in the state machine on each tick:: def remember(**kwargs): gauge = kwargs.get('gauge') if gauge: db.save(gauge) during = { 'measuring', remember } ... machine.build(during=during, ... ) while machine.is_running: machine.step(gauge=get_measurement()) Or, if you have to transition on a specific threshold for a gauge, you could do:: def if_threshold(**kwargs): gauge = kwargs.get('gauge') if gauge > 20: return True return False def raise_alarm(): mail.post_message() transitions = [ {'source': 'normal', 'target': 'alarm', 'condition': if_threshold, 'action': raise_alarm}, ... ] ... machine.build(transitions=transitions, ... ) while machine.is_running: machine.step(gauge=get_measurement()) Shellbot is using this mechanism for itself, and the function can be called at various occasions: - machine tick - This is done at regular intervals in time - input from the chat - Typically, in response to a question - inbound message - Received from subscription, over the network Following parameters are used for machine ticks: - event='tick' - fixed value Following parameters are used for chat input: - event='input' - fixed value - arguments - the text that is submitted from the chat Following parameters are used for subscriptions: - event='inbound' - fixed value - message - the object that has been transmitted This machine should report on progress by sending messages with one or multiple ``self.bot.say("Whatever message")``. """ self.current_state.during(**kwargs) for transition in self._transitions[self.current_state.name]: if transition.condition(**kwargs): logging.debug('Transitioning: {0}'.format(transition)) transition.action() self.current_state.on_exit() self.mutables['state'] = transition.target.name self.current_state.on_enter() break def start(self, tick=None, defer=None): """ Starts the machine :param tick: The duration set for each tick (optional) :type tick: positive number :param defer: wait some seconds before the actual work (optional) :type defer: positive number :return: either the process that has been started, or None This function starts a separate thread to tick the machine in the background. """ if tick: assert tick > 0.0 # number of seconds self.TICK_DURATION = tick if defer is not None: assert defer >= 0.0 # number of seconds self.DEFER_DURATION = defer process = Process(target=self.run) # do not daemonize process.start() while not self.is_running: # prevent race condition on stop() time.sleep(0.001) return process def restart(self, **kwargs): """ Restarts the machine This function is very similar to reset(), except that it also starts the machine on successful reset. Parameters given to it are those that are expected by start(). Note: this function has no effect on a running machine. """ if not self.reset(): return False self.start(**kwargs) return True def stop(self): """ Stops the machine This function sends a poison pill to the queue that is read on each tick. """ if self.is_running: self.mixer.put(None) time.sleep(self.TICK_DURATION+0.05) def run(self): """ Continuously ticks the machine This function is looping in the background, and calls ``step(event='tick')`` at regular intervals. The recommended way for stopping the process is to call the function ``stop()``. For example:: machine.stop() The loop is also stopped when the parameter ``general.switch`` is changed in the context. For example:: engine.set('general.switch', 'off') """ logging.info(u"Starting machine") self.set('is_running', True) self.on_start() time.sleep(self.DEFER_DURATION) try: while self.bot.engine.get('general.switch', 'on') == 'on': try: if self.mixer.empty(): self.on_tick() time.sleep(self.TICK_DURATION) continue item = self.mixer.get(True, self.TICK_DURATION) if item is None: logging.debug('Stopping machine on poison pill') break logging.debug('Processing item') self.execute(arguments=item) except Exception as feedback: logging.exception(feedback) break except KeyboardInterrupt: pass self.on_stop() self.set('is_running', False) logging.info(u"Machine has been stopped") def on_start(self): """ Adds to machine start This function is invoked when the machine is started or restarted. It can be expanded in sub-classes where required. Example:: def on_start(self): # clear bot store on machine start self.bot.forget() """ pass def on_stop(self): """ Adds to machine stop This function is invoked when the machine is stopped. It can be expanded in sub-classes where required. Example:: def on_stop(self): # dump bot store on machine stop self.bot.publisher.put( self.bot.id, self.bot.recall('input')) """ pass def on_tick(self): """ Processes one tick """ self.step(event='tick') message = self.bot.subscriber.get() if message: self.step(event='inbound', message=message) def execute(self, arguments=None, **kwargs): """ Processes data received from the chat :param arguments: input to be injected into the state machine :type arguments: str is recommended This function can be used to feed the machine asynchronously """ self.step(event='input', arguments=arguments, **kwargs) @property def is_running(self): """ Determines if this machine is runnning :return: True or False """ return self.get('is_running', False) class State(object): """ Represents a state of the machine Each state has a function to perform while it's active, when it's entered into, and when it's exited. These functions may be None. """ def __init__(self, name, during=None, on_enter=None, on_exit=None): """ Represents a state in the machine :param name: name of the state :type name: str ;param during: A function to call while this state is active. :type during: function :param on_enter: A function to call when transitioning into this state. :type on_enter: function :param on_exit: Function to call when transitioning out of this state. :type on_exit: function """ self.name = name self._during = during self._on_enter = on_enter self._on_exit = on_exit def __repr__(self): """ Provides a representation of this state :rtype: str """ return u"State({0}, {1}, {2}, {3})".format(self.name, self._during, self._on_enter, self._on_exit ) def __str__(self): """ Provides a string handle to this state :rtype: str """ return self.name def during(self, **kwargs): """ Does some stuff while in this state """ if self._during is not None: self._during(**kwargs) def on_enter(self): """ Does some stuf while transitioning into this state """ if self._on_enter is not None: self._on_enter() def on_exit(self): """ Does some stuff while transitioning out of this state """ if self._on_exit is not None: self._on_exit() class Transition(object): """ Represents a transition between two states Each transition object holds a reference to its source and destination states, as well as the condition function it requires for transitioning and the action to perform upon transitioning. """ def __init__(self, source, target, condition=None, action=None): """ Represents a transition between two states Args: source (State): The source State for this transition. target (State): The destination State for this transition. condition (function): The transitioning condition callback. action (function): An action to perform upon transitioning. """ self.source = source self.target = target self._condition = condition self._action = action def __repr__(self): """ Provides a representation of this transition :rtype: str """ return u"Transition({0}, {1}, {2}, {3})".format(repr(self.source), repr(self.target), self._condition, self._action ) def __str__(self): """ Provides a string handle to this transition :rtype: str """ return "{0} => {1}".format(self.source, self.target) def condition(self, **kwargs): """ Checks if transition can be triggered :return: True or False Condition default to True if none is provided """ return True if self._condition is None else self._condition(**kwargs) def action(self): """ Does some stuff while transitioning """ if self._action is not None: self._action()
{ "content_hash": "a3cf959aa84f2a8e0e62933754f90709", "timestamp": "", "source": "github", "line_count": 749, "max_line_length": 79, "avg_line_length": 29.85580774365821, "alnum_prop": 0.547893748323048, "repo_name": "bernard357/shellbot", "id": "65a3d7a222ef2d79df2069838f446fb130682932", "size": "23169", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "shellbot/machines/base.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "2233" }, { "name": "Python", "bytes": "807558" } ], "symlink_target": "" }
COMMAND = "0" SYSTEM = "1" PEER = "2" CLIENT = "4" # Command based messages. COMMAND_SHUT_DOWN_REQUEST = "001" COMMAND_SET_MASTER_REQUEST = "002" COMMAND_REMOTE_CONNECT_REQUEST = "010" COMMAND_REMOTE_DISCONNECT_REQUEST = "011" COMMAND_REMOTE_CONNECT_PIKA_REQUEST = "012" COMMAND_REMOTE_DISCONNECT_PIKA_REQUEST = "013" COMMAND_REPLY = "020" COMMAND_GET_QUEUE_SIZE_REQUEST = "021" COMMAND_GET_QUEUE_SIZE_REPLY_MESSAGE = "022" COMMAND_PURGE_QUEUES_REQUEST = "023" COMMAND_DELETE_QUEUES_REQUEST = "024" COMMAND_UNLOCK_QUEUE_REQUEST = "025" COMMAND_FREEZE_QUEUE_REQUEST = "026" COMMAND_LIST_QUEUES_REQUEST = "030" COMMAND_LIST_QUEUES_REPLY = "031" COMMAND_ADD_WORKERS_REQUEST = "040" COMMAND_REMOVE_WORKERS_REQUEST = "041" COMMAND_GET_PECKING_ORDER_REQUEST = "050" COMMAND_GET_PECKING_ORDER_REPLY = "051" COMMAND_GET_SETUP_DATA_REQUEST = "052" COMMAND_GET_SETUP_DATA_REPLY_MESSAGE = "053" COMMAND_GET_STATISTICS_REQUEST = "060" COMMAND_GET_STATISTICS_REPLY = "061" # System based messages (internal). SYSTEM_NOTIFICATION_MESSAGE = "100" SYSTEM_ERROR_MESSAGE = "101" SYSTEM_STATS_MESSAGE = "102" SYSTEM_THREAD_STATE = "110" SYSTEM_STOP_THREAD = "111" SYSTEM_SOCKET_STATE = "112" SYSTEM_PEER_CONNECTION_UDPATE = "120" SYSTEM_CONNECT_PEER_REQUEST_SOCKET = "130" SYSTEM_DISCONNECT_PEER_REQUEST_SOCKET = "131" SYSTEM_BRIDGE_WORKER_TIMED_OUT = "170" SYSTEM_ORDERED_QUEUE_OWNERS_EXHAUSTED = "171" SYSTEM_UPDATE_SHARED_MEMORY_CONNECTIONS = "181" SYSTEM_SET_PQM_QUEUE_ACCESS_DATA = "182" SYSTEM_DATA_WORKER_STATUS_REPORT = "190" SYSTEM_UPDATE_DATA_WORKER_CONTROL_DATA = "191" SYSTEM_UPDATE_DATA_WORKER_SETUP_DATA = "192" SYSTEM_UPDATE_QM_QUEUE_SIZE_DICTIONARY = "195" SYSTEM_UPDATE_PQM_QUEUE_SIZE_DICTIONARIES = "196" SYSTEM_PUSH_LOCAL_QUEUE_DATA = "197" # Peer based messages (networked). PEER_ONLINE_HANDSHAKE_REQUEST = "200" PEER_ONLINE_HANDSHAKE_REPLY = "201" PEER_OFFLINE = "202" PEER_HEART_BEAT = "210" PEER_HEART_BEAT_FAILURE = "211" PEER_DISCONNECTING_DUE_TO_HEART_BEAT_FAILURE = "212" PEER_CLIENT_DECLARE_EXCHANGES_REQUEST = "230" PEER_CLIENT_DECLARE_QUEUES_REQUEST = "231" PEER_CLIENT_DELETE_QUEUES_REQUEST = "232" PEER_CLIENT_LOCK_QUEUES_REQUEST = "233" PEER_CLIENT_UNLOCK_QUEUES_REQUEST = "234" PEER_MASTER_SETUP_DATA = "240" PEER_MASTER_CONTROL_DATA = "241" PEER_REQUEST_MASTER_DATA = "242" PEER_ORDERED_QUEUES_OWNERS_EXHAUSTED = "250" PEER_FORWARDED_COMMAND_MESSAGE = "260" # Client based messages (networked). CLIENT_DECLARE_EXCHANGES_REQUEST = "400" CLIENT_DECLARE_EXCHANGES_REPLY = "401" CLIENT_DECLARE_QUEUES_REQUEST = "402" CLIENT_DECLARE_QUEUES_REPLY = "403" CLIENT_DELETE_QUEUES_REQUEST = "404" CLIENT_DELETE_QUEUES_REPLY = "405" CLIENT_DATA_PUSH_REQUEST = "410" CLIENT_DATA_PUSH_REPLY = "411" CLIENT_DATA_PULL_REQUEST = "412" CLIENT_DATA_PULL_REPLY = "413" CLIENT_REQUEUE_DATA_REQUEST = "414" CLIENT_REQUEUE_DATA_REPLY = "415" CLIENT_LOCK_QUEUES_REQUEST = "420" CLIENT_LOCK_QUEUES_REPLY = "421" CLIENT_UNLOCK_QUEUES_REQUEST = "422" CLIENT_UNLOCK_QUEUES_REPLY = "423" CLIENT_DATA_STORE_REQUEST = "430" CLIENT_DATA_STORE_REPLY = "431" CLIENT_DATA_RETRIEVE_REQUEST = "432" CLIENT_DATA_RETRIEVE_REPLY = "433" CLIENT_GET_DATA_STORES_REQUEST = "434" CLIENT_GET_DATA_STORES_REPLY = "435" CLIENT_GET_PECKING_ORDER_REQUEST = "436" CLIENT_GET_PECKING_ORDER_REPLY = "437"
{ "content_hash": "6f75ac258802cf5a9153203b9730b7e2", "timestamp": "", "source": "github", "line_count": 95, "max_line_length": 53, "avg_line_length": 34.63157894736842, "alnum_prop": 0.743161094224924, "repo_name": "appfirst/distributed_queue_manager", "id": "c375b48ce27eb5d70b6552db66de199d2174097a", "size": "3314", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "afqueue/messages/message_types.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "851775" } ], "symlink_target": "" }
from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class AvailableSkusOperations(object): """AvailableSkusOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.databoxedge.v2020_05_01_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.DataBoxEdgeSkuList"] """List all the available Skus and information related to them. List all the available Skus and information related to them. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either DataBoxEdgeSkuList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataBoxEdgeSkuList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.DataBoxEdgeSkuList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('DataBoxEdgeSkuList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBoxEdge/availableSkus'} # type: ignore
{ "content_hash": "5f885c6925193b3f83e665f55dbe2486", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 133, "avg_line_length": 43.342592592592595, "alnum_prop": 0.651997436445204, "repo_name": "Azure/azure-sdk-for-python", "id": "01d14b89253e663ccb080839eaad1faf48152add", "size": "5148", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_05_01_preview/operations/_available_skus_operations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
"""SCons.Platform.os2 Platform-specific initialization for OS/2 systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Platform/os2.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo" import win32 def generate(env): if 'ENV' not in env: env['ENV'] = {} env['OBJPREFIX'] = '' env['OBJSUFFIX'] = '.obj' env['SHOBJPREFIX'] = '$OBJPREFIX' env['SHOBJSUFFIX'] = '$OBJSUFFIX' env['PROGPREFIX'] = '' env['PROGSUFFIX'] = '.exe' env['LIBPREFIX'] = '' env['LIBSUFFIX'] = '.lib' env['SHLIBPREFIX'] = '' env['SHLIBSUFFIX'] = '.dll' env['LIBPREFIXES'] = '$LIBPREFIX' env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ] env['HOST_OS'] = 'os2' env['HOST_ARCH'] = win32.get_architecture().arch # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
{ "content_hash": "d6eb8a578a5f6fb4e56ab8c0659bdfae", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 107, "avg_line_length": 38.91379310344828, "alnum_prop": 0.689853788214444, "repo_name": "angad/libjingle-mac", "id": "0576abdd65a45161009dbbfa755e5226734bee3c", "size": "2257", "binary": false, "copies": "14", "ref": "refs/heads/master", "path": "scons-2.2.0/build/lib/SCons/Platform/os2.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "2015946" }, { "name": "C++", "bytes": "9306077" }, { "name": "Objective-C", "bytes": "28091" }, { "name": "Perl", "bytes": "50523" }, { "name": "Python", "bytes": "4283804" }, { "name": "Shell", "bytes": "1445083" } ], "symlink_target": "" }
import re from PIL import Image, ImageOps import urllib2 import oembed from recaptcha.client import captcha from akismet import Akismet import pifilter import logging import time import uuid from time import gmtime, strftime import random from datetime import datetime import urllib import simplejson as json from math import log import rest def startakismet(akismetcfg): return Akismet(key=akismetcfg['apikey'], agent=akismetcfg['clientname']) def getembedcode(url, **kwargs): '''Oembed + Oohembed. Accept a URL and one of the arguments below, return dict with JSON. From sample on http://api.embed.ly/docs/oembed''' api_url = 'http://api.embed.ly/1/oembed?' ACCEPTED_ARGS = ['maxwidth', 'maxheight', 'format'] params = {'url':url } for key, value in kwargs.items(): if key not in ACCEPTED_ARGS: raise ValueError("Invalid Argument %s" % key) params[key] = value oembed_call = "%s%s" % (api_url, urllib.urlencode(params)) result = json.loads(urllib2.urlopen(oembed_call).read()) if 'html' in result: return result['html'] else: return None def buildtree(master,messagedb): '''Recieve a message, return a consolidated tree of all comments''' tree = [] def expandchildren(commenttoexpand): '''Expand child comments, adding to tree''' childcommentids = commenttoexpand['comments'] for childcommentid in childcommentids: childcomment = messagedb.find_one({'_id':childcommentid}) # Make sure DB actually returned some comments if childcomment is not None: tree.append(childcomment) if len(childcomment['comments']): tree.append('in') expandchildren(childcomment) tree.append('out') expandchildren(master) return tree class Message(object): '''Submitted message''' def __init__(self,config=None,antispam=None,_id=None,parentid=None,localfile=None,handler=None,messagedata=None,dehydrated=None): '''Create message''' if dehydrated: # Recreate from a dict # See http://stackoverflow.com/questions/1305532/convert-python-dict-to-object self.__dict__.update(dehydrated) return # FIXME: combine if config is None: logging.error('Please provide config when creating new messages') return if antispam is None: logging.error('Please provide antispam info when creating new messages') return if _id is None: logging.error('Please provide _id info when creating new messages') return # Info that's common across all messages # Note we store a list as it's JSON serializable. A native datetime object isn't. now = datetime.utcnow() self.posttime = { 'year':now.year, 'month':now.month, 'day':now.day, 'hour':now.hour, 'minute':now.minute, 'second':now.second } self.prettydate = self.getprettydate() self._id = _id self.localfile = localfile self.preview, self.embedcode, self.headline, self.intro, self.thread, self.availavatars, self.likecount = None, None, None, None, None, None, None self.treecount = 0 self.useralerts, self.comments = [], [] self.score = 1 if messagedata: # Preconfigured data from injector self.author = messagedata['author'] self.posttext = messagedata['posttext'] self.challenge = messagedata['challenge'] self.response = messagedata['response'] self.ip = messagedata['ip'] self.useragent = messagedata['useragent'] self.referer = messagedata['referer'] self.imagedata = messagedata['imagedata'] self.host = messagedata['host'] self.article = messagedata['article'] self.sessionid = '1' elif handler: # Create message from handler data self.author = 'Anonymous' self.posttext = handler.get_argument('posttext') self.useragent = handler.request.headers['User-Agent'] # nginx real IP if 'X-Real-Ip' in handler.request.headers: self.ip = handler.request.headers['X-Real-Ip'] else: self.ip = handler.request.remote_ip self.referer = handler.request.headers['Referer'] self.host = handler.request.headers['Host'] # Add capctha info if enabled if handler.application.config['captcha'].as_bool('enabled'): self.challenge = handler.get_argument('recaptcha_challenge_field') self.response = handler.get_argument('recaptcha_response_field') else: self.challenge,self.response = None, None # Add image data if enabled if 'image' in handler.request.files: self.imagedata = handler.request.files['image'] else: self.imagedata = None self.sessionid = handler.getorsetsessionid() else: logging.error('No handler specified, and not dehydrated! Cannot create message') # Are we an article or a reply self.parentid = parentid if self.istop(): # We're a top-level article logging.info('Creating new article '+str(self._id)) # Available avatars for sessions - copy of config. self.availavatars = config['posting']['avatars'] logging.info('DEBUG:'+str(len(self.availavatars))+' created in this article') random.shuffle(self.availavatars) # Create dict of session / avatar matchings self.sessionavatars = {} # Thread is myself self.thread = self._id # Grab an avatar from my own list! self.sessionavatars[self.sessionid] = self.availavatars.pop() logging.info('DEBUG:'+str(len(self.availavatars))+' left in this article') # Currently only top level messages can have links, pictures or embeds # Process embeds (FIXME - must come before saveimages due to check for existing embeds in saveimages) self.checklinksandembeds(config) # If there's no local image file, save image from web url if self.localfile is None: self.saveimages(config) # Make preview if self.localfile is not None and config['images'].as_bool('enabled'): self.makepreviewpic(self.localfile,config['images']) logging.info('Made preview picture.') else: logging.warn('Not making image as local file not specified or images disabled.') logging.info('Preview pic is: '+str(self.preview)) else: # We're a reply logging.info('Creating new reply '+str(self._id)) # Add our new comment ID as a child of parent, increment parents score parent = handler.application.dbconnect.messages.find_one({'_id':int(parentid)}) if parentid: # We're a reply parent['comments'].append(_id) # Increment parent score for message parent['score'] += config['scoring'].as_int('comment') # Every reply copies its 'thread' from its parent, which points back to the original post self.thread = parent['thread'] logging.info('Thread is '+str(self.thread)) # Save parent now # Note that 'ancestor' (later) may be the same comment, so we need to save this now. logging.info('Adding comment '+str(self._id)+' as child of parent '+str(parentid)) handler.application.dbconnect.messages.save(parent) # Take an avatar from the sessions avatar/dict in ancestor ancestor = handler.application.dbconnect.messages.find_one({'_id':int(self.thread)}) if self.sessionid not in ancestor['sessionavatars']: # This is the first time this sessionid has commented # Grab an available avatar from the ancestor to use for this sessionid myavatar = ancestor['availavatars'].pop() logging.info('DEBUG:'+str(len(ancestor['availavatars']))+' left in the parent') ancestor['sessionavatars'][self.sessionid] = myavatar logging.info('Sessionid '+self.sessionid+' has commented in this thread for the first time. Assigned '+myavatar+' for message '+str(self._id)) ancestor['treecount'] += 1 handler.application.dbconnect.messages.save(ancestor) # Here ancestor is saved to db with correct info, but it gets overridden later else: logging.info('This sessionid '+self.sessionid+' has commented in this thread before, using existing avatar '+ancestor['sessionavatars'][self.sessionid]) else: logging.warn('Error! Could not find parent with parentid '+str(parentid)+' in DB') # Validate the users input #self.getimagetext(self.localfile,config['images']) self.checktext(config) self.checkcaptcha(config) self.checkspam(config,antispam) self.checkporn(config) self.makeintro(config['posting']) # Override existing links self.link = '/discuss/'+str(self._id) def saveimages(self,config): '''Save images for original posts''' if config['images'].as_bool('enabled'): if self.imagedata is None and self.embedcode is None: self.useralerts.append(config['alerts']['noimageorembed']) elif self.imagedata is not None: # Save image data to local file imagefile = self.imagedata[0] logging.info('Saving image: '+imagefile['filename']) self.localfile = config['images']['cachedir']+str(self._id)+'.'+imagefile['filename'].split('.')[-1] open(self.localfile,'wb').write(imagefile['body']) # Set self.imagedata to None now we've saved our image data to a file. # We need this as leaving the unencoded messagedata around will screw mongo up. self.imagedata = None else: # We have no image data as we're an embed only post return return def makepreviewpic(self,imagefile,imageconfig): '''Reduce images larger than a certain size''' myimage = Image.open(imagefile) width,height = myimage.size aspect = float(width) / float(height) maxwidth = imageconfig.as_int('maxwidth') maxheight = imageconfig.as_int('maxheight') maxsize = (maxwidth,maxheight) # Don't bother if image is already smaller if width < maxwidth and height < maxheight: logging.info('Small image, using existing pic at: '+imagefile) self.preview = imagefile # Resize, save, return preview file name else: myimage.thumbnail(maxsize,Image.ANTIALIAS) newfilename = imagefile.replace('cache','thumbs').split('.')[-2]+'_preview.'+imagefile.split('.')[-1] try: myimage.save(newfilename) logging.info('Saved preview pic to: '+newfilename) self.preview = newfilename except: pass return def checktext(self,config): '''Ensure they're ranting enough, but not too much!''' postwords = self.posttext.strip() wordlist = postwords.split() uniquewords = set(wordlist) # Zero sized posts if len(postwords) == 0: self.useralerts.append(config['alerts']['zero']) # Overlong posts elif len(wordlist) > config['posting'].as_float('longpost'): self.useralerts.append(config['alerts']['overlong']) else: # Check text isn't full of dupes totalwords = len(wordlist) totaluniquewords = len(uniquewords) # Float so our answer is a float if totaluniquewords / float(totalwords) < config['posting'].as_float('threshhold'): self.useralerts.append(config['alerts']['notunique']) # Check post doesnt mention banned words for bannedword in config['posting']['bannedwords']: if bannedword in uniquewords: self.useralerts.append(config['alerts']['bannedwords']) return def checkcaptcha(self,config): '''Check for correct CAPTCHA answer''' if config['captcha'].as_bool('enabled'): recaptcha_response = captcha.submit(self.challenge, self.response, config['captcha']['privkey'], self.ip) if not recaptcha_response.is_valid: self.useralerts.append(config['alerts']['nothuman']) self.nothuman = True return def checkspam(self,config,antispam): '''Check for spam using Akismet''' if config['posting']['akismet'].as_bool('enabled'): try: spam = antispam.comment_check(self.posttext,data = { 'user_ip':self.ip, 'user_agent':self.useragent, 'referrer':self.referer, 'SERVER_ADDR':self.host }, build_data=True, DEBUG=False) # Python Akismet library can fail on some types of unicode except UnicodeEncodeError: spam = True if spam: self.useralerts.append(config['alerts']['spam']) self.spam = True return def checklinksandembeds(self,config): '''Process any links in the text''' maxwidth = config['images'].as_int('maxwidth') maxheight = config['images'].as_int('maxheight') linkre = re.compile(r"(http://[^ ]+)") for link in linkre.findall(self.posttext): # lopp through links gettng embeds logging.info('Getting embed data for link: '+link) try: self.embedcode = getembedcode(link, maxwidth=maxwidth, maxheight=maxheight) if self.embedcode: logging.info('Embed data found!') else: logging.info('Embed data not found!') except: logging.warn('Getting embed data for link failed - most likely embedly 404') pass self.original = self.posttext self.posttext = linkre.sub('', self.posttext) return def checkporn(self,config): '''Check images for porn''' def savegrayscale(imagefile): '''Convert image to greyscale and save''' adultimage = Image.open(imagefile) adultimage = ImageOps.grayscale(adultimage) adultimage.save(imagefile) return if self.localfile and config['images'].as_bool('enabled') and config['images'].as_bool('adult'): count = 1 response = {} while count < 3: try: logging.info('Checking for porn, try '+str(count)) response = pifilter.checkimage( self.localfile, config['posting']['pifilter']['customerid'], aggressive = config['posting']['pifilter'].as_bool('aggressive') ) break except: logging.error('Could not open pifilter URL') time.sleep(5) count = count+1 if 'result' in response: if response['result']: logging.warn('message submission '+str(self._id)+' with image '+self.localfile+' is porn.') # Make a greyscale version and use that instead if config['images']['adultaction'] in ['gray','grey']: savegrayscale(self.localfile) logging.info('Saving greyscale version...') else: self.useralerts.append(config['alerts']['porn']) else: logging.info('image is clean') else: # No response from pifilter pass return def makeintro(self,postingconfig): '''Reduce the headline text in very long posts if needed''' postwords = self.posttext.replace('\n',' NEWLINE ').split() leeway = postingconfig.as_int('leeway') choplen = postingconfig.as_int('choplen') longpost = postingconfig.as_int('longpost') if len(postwords) < leeway: self.headline = self.posttext else: self.headline = ' '.join(postwords[:choplen]).replace('NEWLINE','')+'...' self.intro = '...'+' '.join(postwords[choplen:longpost]) return def getcountry(self,ip): '''Get user country - currently unused''' gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE) print gi.country_code_by_addr(ip) def getposttimedt(self): '''Return a datetime version of 'posttime' style dictionary ''' return datetime(self.posttime['year'], self.posttime['month'], self.posttime['day'], self.posttime['hour'], self.posttime['minute'], self.posttime['second']) def getprettydate(self): '''Return pretty printed date, with suffixes (st, nd) and no leading zeros''' posttimedt = self.getposttimedt() prettytime = str(int(posttimedt.strftime("%I")))+':'+posttimedt.strftime("%M %p") daysuffixes = ['st','nd','rd'] + 17*['th'] + ['st','nd','rd'] + 7*['th'] + ['st'] prettydate = str(int(posttimedt.strftime("%d")))+daysuffixes[int(posttimedt.strftime("%d"))-1]+posttimedt.strftime(" %B %Y") return prettytime+' '+prettydate def getrank(self): '''Get rank for message. Based on http://amix.dk/blog/post/19574 (Reddit style) ''' order = log(max(abs(self.score), 1), 10) sign = 1 if self.score > 0 else -1 if self.score < 0 else 0 seconds = time.mktime(self.getposttimedt().timetuple()) - 1134028003 rank = round(order + sign * seconds / 45000, 7) return rank def updatetreecount(self,db): '''Get total of children and grandchildren''' #logging.info('Updating tree count on '+str(self._id)) def addchildrentototal(item): '''Add children of a post recursively''' # Only count down level posts, not top level if item.parentid: self.treecount += 1 if len(item.comments) == 0: return else: for commentid in item.comments: commentdoc = db.messages.find_one(commentid) # Some docs have been deleted from the DB if commentdoc is not None: comment = Message(dehydrated=commentdoc) addchildrentototal(comment) self.treecount = 0 addchildrentototal(self) return def getlikecount(self): '''Return count of Facebook likes for a target URL''' targeturl = 'http://imeveryone.com/discuss/'+str(self._id) endpoint = 'https://api.facebook.com' page = '/method/fql.query' querydict = { 'query':'''SELECT total_count FROM link_stat WHERE url="'''+targeturl+'''"''', 'format':'json', } fqlhelper = rest.RESTHelper(endpoint='https://api.facebook.com') queryresult = fqlhelper.get(page,querydict=querydict,usejson=True) self.likecount = queryresult[0]['total_count'] return def istop(self): '''Check if message is toplevel''' return not self.parentid
{ "content_hash": "bb5744660511a1ec5525bc5f8cf2dc40", "timestamp": "", "source": "github", "line_count": 471, "max_line_length": 180, "avg_line_length": 43.81528662420382, "alnum_prop": 0.5662644764258371, "repo_name": "mikemaccana/imeveryone", "id": "ca4d64adedd9157d01e58d2d7199452483a84e88", "size": "20662", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "usermessages.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "18405" }, { "name": "JavaScript", "bytes": "7631" }, { "name": "Python", "bytes": "66662" } ], "symlink_target": "" }
data_dir = '/home/cn/data/sample_tick/' import os import shutil # higher level file operations - more choices for error handling os.path.join('usr', 'bin', 'spam') # join path cur_dir = os.getcwd() # current working dir os.chdir('/tmp'); os.getcwd() # move around os.chdir('/home/cn/program/python/sandbox'); os.getcwd() if not os.path.exists('/tmp/blah'): os.mkdir('/tmp/blah') os.rmdir('/tmp/blah') # only work if the dir is empty shutil.rmtree('/tmp/blah', ignore_errors=True) # works for most dir - shutils is more adaptable ## ABS and REL paths os.path.abspath('.') os.path.isabs('.') os.path.relpath('/tmp/blah') ## deal with names - split names etc. os.path.basename(os.path.join(os.getcwd(), 'test_file.py')) os.path.dirname(os.path.join(os.getcwd(), 'test_file.py')) os.path.split(os.path.join(os.getcwd(), 'test_file.py')) # zip, unzip, tar, untar etc. shutil.disk_usage('.') # create a new file if not os.path.exists('/tmp/to_arc'): os.mkdir('/tmp/to_arc') to_arc = '/tmp/to_arc/test_arc.txt' with open(to_arc, 'a') as fh: # touch behavior - will throw if no immediate dir available os.utime(to_arc, times=None) fh.writelines('\n'.join(['ha', 'asdfjalsdjadf'])) # writelines does NOT add new lines. Genius! shutil.get_archive_formats() # all supported formats - depending on other tools in the os # make archive needs a dir to archive so you need to move everything into that dir first # syntax is quite tricky shutil.make_archive('/tmp/test_arc.txt', base_dir='to_arc', root_dir='/tmp', format='gztar') # zip or tar work too shutil.unpack_archive(('/tmp/test_arc.txt.tar.gz'), extract_dir='/tmp/unpack/crazy') for root, dirs, files in os.walk('/tmp/unpack/crazy'): ## hmm - need to review os.walk() print(files) # finding directory contents base_dir = os.environ['HOME'] + '/data/sample_tick' # first way: kk = os.listdir(base_dir) # list things in that directory only - level 1 for name in kk: name = os.path.join(base_dir, name) print( name, ", is dir:", os.path.isdir(name), ", is file:", os.path.isfile(name)) # second way: for cur_dir, subdirs, filenames in os.walk(base_dir): """ per iteration, list all subdirs and filenames under cur_dir, then go deeper into subdirs in the next iterations. It basically does a tree_walk """ print( 'the current dir is %s' % cur_dir) for subdir in subdirs: print('\tthe current subdir is %s' % subdir) for filename in filenames: print('\tthe current filename is %s' % filename) # TODO: could use regex to detect if a file is a .gz or .csv file and then do some stuff with it
{ "content_hash": "21eacadcca1fcddf83ec9408a1961a73", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 115, "avg_line_length": 39.985074626865675, "alnum_prop": 0.6655468458379993, "repo_name": "nguyentu1602/pyexp", "id": "79ca99ec9c38c330f283c775275d974d28ec8439", "size": "2679", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyexp/files_os_practice.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "47" }, { "name": "Python", "bytes": "76593" } ], "symlink_target": "" }
import numpy as np import scipy import h5py from pprint import pprint import os import util from util import pad_axis_toN_with_constant import scipy.io as sio def get_mask_value(config): if config['transform_x']=='mag': return -1. elif config['transform_y']=='logmag': return -1. else: return 0. def load_data(config, dataset='train', downsample=1): if config['transform_x']=='mag': transform_x = (lambda x: np.sqrt(x[:x.shape[0]/2,:]**2 + x[x.shape[0]/2:,:]**2)) elif config['transform_y']=='logmag': transform_x = (lambda x: np.log(np.float32(1.) + np.sqrt(x[:x.shape[0]/2,:]**2 + x[x.shape[0]/2:,:]**2))) else: transform_x = (lambda x : x) mask_value = get_mask_value(config) metrics=[] if config['transform_y']=='mag': transform_y = (lambda y: np.sqrt(y[:y.shape[0]/2,:]**2 + y[y.shape[0]/2:,:]**2)) elif config['transform_y']=='logmag': transform_y = (lambda y: np.log(np.float32(1.) + np.sqrt(y[:y.shape[0]/2,:]**2 + y[y.shape[0]/2:,:]**2))) def mse_of_mag(y_true, y_pred): mask = np.float32(y_true >= 0.) mask_inverse_proportion = np.float32(mask.size)/np.sum(mask) return K.mean( mask*( (K.exp(y_true) - K.exp(y_pred))**2 ) )*mask_inverse_proportion metrics.append(mse_of_mag) else: transform_y = (lambda y : y) mask_value = get_mask_value(config) if (dataset=='test'): D_test=AudioDataset(config['taskfile_x_test'], config['taskfile_y_test'], datafile=config['datafile_test'], params_stft=config['params_stft'], downsample=downsample) print " Loading test data..." x_test, y_test, mask_test = D_test.get_padded_data_matrix(transform_x=transform_x, transform_y=transform_y, pad_value=mask_value, maxlen=None) print " Padding data to ensure equal sequence lengths..." maxseq=x_test.shape[1] x_test = pad_axis_toN_with_constant(x_test, 1, maxseq, mask_value) y_test = pad_axis_toN_with_constant(y_test, 1, maxseq, mask_value) mask_test = pad_axis_toN_with_constant(mask_test, 1, maxseq, 0.) return x_test, y_test, mask_test elif (dataset=='valid'): # development data D_valid=AudioDataset(config['taskfile_x_valid'], config['taskfile_y_valid'], datafile=config['datafile_valid'], params_stft=config['params_stft'], downsample=downsample) print " Loading validation data..." x_valid, y_valid, mask_valid = D_valid.get_padded_data_matrix(transform_x=transform_x, transform_y=transform_y, pad_value=mask_value, maxlen=config['maxlen']) print " Padding data to ensure equal sequence lengths..." maxseq=x_valid.shape[1] x_valid = pad_axis_toN_with_constant(x_valid, 1, maxseq, mask_value) y_valid = pad_axis_toN_with_constant(y_valid, 1, maxseq, mask_value) mask_valid = pad_axis_toN_with_constant(mask_valid, 1, maxseq, 0.) return x_valid, y_valid, mask_valid elif (dataset=='train'): # training data D_train=AudioDataset(config['taskfile_x_train'], config['taskfile_y_train'], datafile=config['datafile_train'], params_stft=config['params_stft'], downsample=downsample) print " Loading training data..." x_train, y_train, mask_train = D_train.get_padded_data_matrix(transform_x=transform_x, transform_y=transform_y, pad_value=mask_value, maxlen=config['maxlen']) print " Padding data to ensure equal sequence lengths..." maxseq=x_train.shape[1] x_train = pad_axis_toN_with_constant(x_train, 1, maxseq, mask_value) y_train = pad_axis_toN_with_constant(y_train, 1, maxseq, mask_value) mask_train = pad_axis_toN_with_constant(mask_train, 1, maxseq, 0.) return x_train, y_train, mask_train else: ValueError("Unsupported dataset '%s'" % dataset) def clip_x_to_y(x,y,xfidx,yfidx): """ Clips the length of x to the length of y """ xlens=xfidx[:,1]-xfidx[:,0] ylens=yfidx[:,1]-yfidx[:,0] nutt=xfidx.shape[0] idx=0 for iutt in range(nutt): xcur=x[:,xfidx[iutt,0]:xfidx[iutt,1]] x[:,idx:idx+ylens[iutt]]=xcur[:,0:ylens[iutt]] idx=idx+ylens[iutt] yframes=y.shape[1] x=x[:,0:yframes] return x def add_to_table(f,data,label,filters): try: atom = tables.Atom.from_dtype(data.dtype) t_data = f.createCArray(f.root,label,atom,data.shape,filters=filters) t_data[:] = data except: f.createArray(f.root,label,data) def reshape_and_pad_stacks(x_stack,y_stack,fidx,transform_x=(lambda x: x),transform_y=(lambda y: y),pad_value=0., maxlen=None, verbose=False): #convert the concatenated STFTs of shape (2(N/2+1), <total STFT frames>) into #into shape (<total number of wavfiles>, maxseq, 2(N/2+1)). Use a mask to #keep track of the padding of the arrays: maxseq = np.max(fidx[:,1]-fidx[:,0]) if maxlen is None or (maxlen > maxseq): maxlen = maxseq d = transform_x(x_stack[:,0:1]).shape[0] if maxlen == maxseq: n_sequences=fidx.shape[0] else: n_sequences=0 for i in range(fidx.shape[0]): t = 0 while t < (fidx[i,1] - fidx[i,0]): n_sequences = n_sequences + 1 t = t + maxlen """ x_test = pad_value*np.ones((n_sequences, maxlen, d)).astype(x_stack.dtype) y_test = pad_value*np.ones((n_sequences, maxlen, d)).astype(y_stack.dtype) mask_test = np.zeros((n_sequences, maxlen, 1)).astype(x_stack.dtype) for i in range(n_sequences): x_test[i, :(fidx[i,1]-fidx[i,0]), :] = transform_x(x_stack[:, fidx[i,0]:fidx[i,1]]).T y_test[i, :(fidx[i,1]-fidx[i,0]), :] = transform_y(y_stack[:, fidx[i,0]:fidx[i,1]]).T mask_test[i, :(fidx[i,1]-fidx[i,0]), :] = 1. """ x = pad_value*np.ones((n_sequences, maxlen, d)).astype(x_stack.dtype) y = pad_value*np.ones((n_sequences, maxlen, d)).astype(y_stack.dtype) mask = np.zeros((n_sequences, maxlen, 1)).astype(x_stack.dtype) t = 0 i_wavfile = 0 for i in range(n_sequences): t_end = t + maxlen flag_increment_i_wavfile = False if t_end >= fidx[i_wavfile,1]: t_end = fidx[i_wavfile,1] flag_increment_i_wavfile = True if verbose: print "Sequence %d of %d: t0=%d, t1=%d, duration=%d" % (i+1, n_sequences, t, t_end, t_end-t) x[i, :t_end-t, :] = transform_x(x_stack[:, t:t_end]).T y[i, :t_end-t, :] = transform_y(y_stack[:, t:t_end]).T mask[i, :t_end-t, :] = 1. if flag_increment_i_wavfile and (i < (n_sequences-1)): i_wavfile += 1 t = fidx[i_wavfile,0] else: t += maxlen return x, y, mask class AudioDataset: """ Object for an audio dataset. The load function computes the short-time Fourier transform for each wav file. These STFTs are return in a form that can easily be passed into Keras. The STFT uses sqrt-Hann analysis and synthesis windows. Required inputs: taskfile_input: text file that consists of a list of desired input audio files in .wav format taskfile_output: text file that consists of a list of desired output audio files in .wav format. Each line of this file should correspond to a line in taskfile_input. Optional inputs: datafile: HDF5 file to save the dataset to. If None, no HDF5 file is created params_stft: parameters of the short-time Fourier transform (STFT) Keys: 'N': STFT window duration in samples (default 320, which is 20ms for fs=16kHz) 'hop': STFT window hop in samples (default 160, which is 10ms for fs=16kHz) 'nch': number of channels to have in the output; if input is multichannel and nch is less than number of input channels, the first nch channels are returned (default 1) """ def __init__(self, taskfile_input, taskfile_output, datafile=None, params_stft={'N':320, 'hop': 160, 'nch': 1}, downsample=1): self.taskfile_input = taskfile_input self.taskfile_output = taskfile_output self.datafile = datafile self.params_stft=params_stft self.params_stft['window']=np.sqrt(scipy.signal.hann(params_stft['N'],sym=False).astype(np.float32)) self.downsample = downsample self.load_from_wavfiles() def load_from_wavfiles(self): taskfile_input=self.taskfile_input taskfile_output=self.taskfile_output datafile=self.datafile params_stft=self.params_stft if os.path.isfile(datafile): #print "Specified data file '%s' already exists. Use 'get_data_stacks' or 'get_padded_data_matrix' to retrieve the data." % datafile f = h5py.File(datafile,'r') else: #read the wavfiles: with open(taskfile_input) as f: x_wavfiles = f.readlines() x_wavfiles = [wavfile.strip() for wavfile in x_wavfiles] with open(taskfile_output) as f: y_wavfiles = f.readlines() y_wavfiles = [wavfile.strip() for wavfile in y_wavfiles] x_wavfiles = x_wavfiles[::self.downsample] y_wavfiles = y_wavfiles[::self.downsample] #Compute the STFTs; input is 'x', output is 'y'. THe outputs of #util.compute_STFTs are the concatenated STFTs in an array of #shape (2(N/2+1), <total number of STFT frames>), and the "fidx" #variable is an array of shape (<total number of wavfiles>, 2) #that contains the starting and ending indices of the STFT frames #for each wavfile. The output dimension of the stack is "2(N/2+1)" #because the complex numbers are encoded in real-composite form, #which stacks the real part on top of the imaginary part print "Computing STFTs..." x_stack, x_fidx = util.compute_STFTs(x_wavfiles, params_stft) y_stack, y_fidx = util.compute_STFTs(y_wavfiles, params_stft) fidx_are_the_same = np.allclose(x_fidx, y_fidx) inputs_length_gte_outputs_length = all(x_fidx[:,1]>=y_fidx[:,1]) if not fidx_are_the_same: if inputs_length_gte_outputs_length: #clip the lengths of the input STFTs to the lengths of the output STFTs x_stack = clip_x_to_y(x_stack, y_stack, x_fidx, y_fidx) else: ValueError("Not all input files have greater than or equal length to all output files!") # the indices within the stacks should be the same now: fidx = y_fidx #save the STFTs to the datafile, if one is specified if datafile is not None: print "Saving data to file '%s'..." % datafile f = h5py.File(datafile, 'w') f.create_dataset("x_stack", data=x_stack) f.create_dataset("y_stack", data=y_stack) f.create_dataset("fidx", data=fidx) f.create_dataset("x_wavfiles", data=x_wavfiles) f.create_dataset("y_wavfiles", data=y_wavfiles) grp_stft=f.create_group("stft") for key in params_stft: grp_stft.attrs[key] = params_stft[key] self.data = f self.x_stack = f['x_stack'] self.y_stack = f['y_stack'] self.fidx = f['fidx'] self.x_wavfiles = f['x_wavfiles'] self.y_wavfiles = f['y_wavfiles'] f.close() def reconstruct_x(self, idx, mask=None): X_stft = self.x_stack[:,self.fidx[idx,0]:self.fidx[idx,1]] if not(mask is None): if mask.shape[0] < X_stft.shape[0]: mask = np.tile(mask,(X_stft.shape[0]/mask.shape[0],1)) X_stft = mask*X_stft if len(X_stft.shape) == 2: X_stft = np.expand_dims(X_stft, 2) X_stft = X_stft[:X_stft.shape[0]/2,:,:] + np.complex64(1j)*X_stft[X_stft.shape[0]/2:,:,:] xr,_=util.istft_mc(X_stft,self.params_stft['hop'],flag_noDiv=1,window=self.params_stft['window']) return xr def reconstruct_y(self, idx, mask=None): Y_stft = self.y_stack[:,self.fidx[idx,0]:self.fidx[idx,1]] if not(mask is None): if mask.shape[0] < Y_stft.shape[0]: mask = np.tile(mask,(Y_stft.shape[0]/mask.shape[0],1)) Y_stft = mask*Y_stft if len(Y_stft.shape) == 2: Y_stft = np.expand_dims(Y_stft, 2) Y_stft = Y_stft[:Y_stft.shape[0]/2,:,:] + np.complex64(1j)*Y_stft[Y_stft.shape[0]/2:,:,:] yr,_=util.istft_mc(Y_stft,self.params_stft['hop'],flag_noDiv=1,window=self.params_stft['window']) return yr def reconstruct_audio(self, description, irm=None, mask=None, idx=None, test=False): n_wavfiles = len(self.x_wavfiles) if idx is None: for j in range(n_wavfiles): if irm is None or mask is None: yest = self.reconstruct_x(j) else: yest = self.reconstruct_x(j, mask=irm[j, :np.sum(mask[j,:]),:].T) y = self.reconstruct_y(j) wavfile_enhanced = self.y_wavfiles[j].replace('scaled', 'enhanced_%s' % description) if not os.path.exists(os.path.dirname(wavfile_enhanced)): os.makedirs(os.path.dirname(wavfile_enhanced)) util.wavwrite(wavfile_enhanced, 16e3, yest) elif isinstance(idx, list): for j in idx: if irm is None or mask is None: yest = self.reconstruct_x(j) else: yest = self.reconstruct_x(j, mask=irm[j, :np.sum(mask[j,:]),:].T) y = self.reconstruct_y(j) if test: y_orig = util.wavread(self.y_wavfiles[j])[0:1,:] x = util.wavread(self.x_wavfiles[j])[0:1,:] if yest.shape[1] > x.shape[1]: yest = yest[:, :x.shape[1]] if y.shape[1] > y_orig.shape[1]: y = y[:, :y_orig.shape[1]] print "For file %d, NMSE between original x and yest is %e" % (j, np.mean( (x-yest)**2)/np.mean(x**2)) print "For file %d, NMSE between original y_orig and y is %e" % (j, np.mean( (y_orig-y)**2)/np.mean(y_orig**2)) else: wavfile_enhanced = self.y_wavfiles[j].replace('scaled', 'enhanced_%s' % description) if not os.path.exists(os.path.dirname(wavfile_enhanced)): os.makedirs(os.path.dirname(wavfile_enhanced)) util.wavwrite(wavfile_enhanced, 16e3, yest) else: if irm is None: yest = self.reconstruct_x(idx) else: yest = self.reconstruct_x(idx, mask=irm) wavfile_enhanced = self.y_wavfiles[idx].replace('scaled', 'enhanced_%s' % description) if not os.path.exists(os.path.dirname(wavfile_enhanced)): os.makedirs(os.path.dirname(wavfile_enhanced)) util.wavwrite(wavfile_enhanced, 16e3, yest) return def get_data_stacks(self): """ Returns the x and y data stacks, along with the frame indices fidx """ datafile=self.datafile if not os.path.isfile(datafile): self.load_from_wavfiles() print "Loading data from file '%s'..." % datafile #data=tables.open_file(datafile,"r") f=h5py.File(datafile,"r") for key in ['N', 'hop', 'nch', 'window']: if not np.all(self.params_stft[key] == f['stft'].attrs[key]): ValueError("STFT parameter '%s' of loaded data does not match specified STFT parameter '%s'" % (key,key)) x_stack = f['x_stack'][:] y_stack = f['y_stack'][:] fidx = f['fidx'][:] x_wavfiles = f['x_wavfiles'][:] y_wavfiles = f['y_wavfiles'][:] f.close() return x_stack, y_stack, fidx def get_padded_data_matrix(self, transform_x=(lambda x: x), transform_y=(lambda y: y), pad_value=0., maxlen=None): """ Reshapes the x and y data stacks to shape (<total num. wavfiles>, maxseq, 2(N/2+1)) arrays, where 'maxseq' is the maximum number of STFT frames for any wavfile. This procedure wastes memory, since zero-padding is used to store the variable-length sequences. TODO: implement ability to pass 'maxlen', which allows chunking long sequences into multiple shorter sequences of max length 'maxlen' Outputs: x: data matrix for input data x y: data matrix for output data y mask: a binary matrix, equal to 1. where there is data and 0. where there is padding """ x, y, mask = reshape_and_pad_stacks(self.x_stack, self.y_stack, self.fidx, transform_x=transform_x, transform_y=transform_y, pad_value=pad_value, maxlen=maxlen) return x, y, mask def score_audio_savefile_exists(self, description, snr=None, savefile=None, verbose=False, datadir=""): """ Returns true if scores already exist """ if savefile is None: if snr is None: savefile = datadir + ("scores/scores_%s.mat" %(description)) else: savefile = datadir + ("scores/scores_%s_%s.mat" %(description,snr)) return os.path.isfile(savefile) def score_audio(self, description, snr=None, savefile=None, verbose=False, datadir="", flag_rescore=False): """ Computes scores for enhanced audio files """ if snr is None: enhanced_wavfiles = [wavfile.replace('scaled', 'enhanced_%s' % description) for wavfile in self.y_wavfiles] reference_wavfiles = [wavfile for wavfile in self.y_wavfiles] else: enhanced_wavfiles = [wavfile.replace('scaled', 'enhanced_%s' % description) for wavfile in self.y_wavfiles if ('/' + snr + '/') in wavfile] reference_wavfiles = [wavfile for wavfile in self.y_wavfiles if ('/' + snr + '/') in wavfile] enhanced_taskfile = "taskfile_enhanced.txt" with open(enhanced_taskfile, 'w') as f: for wavfile in enhanced_wavfiles: f.write("%s\n" % wavfile) reference_taskfile = "taskfile_reference.txt" with open(reference_taskfile, 'w') as f: for wavfile in reference_wavfiles: f.write("%s\n" % wavfile) if savefile is None: if snr is None: savefile = datadir + ("scores/scores_%s.mat" %(description)) else: savefile = datadir + ("scores/scores_%s_%s.mat" %(description,snr)) if (not os.path.isfile(savefile)) or flag_rescore: cmd_matlab = "/usr/local/MATLAB/R2017a/bin/matlab -nosplash -nodesktop -nodisplay -r \"score_audio('%s', '%s', '%s', %d); quit();\"" %(enhanced_taskfile, reference_taskfile, savefile, verbose) if not verbose: cmd_matlab = cmd_matlab + " > /dev/null" print("Running Matlab command %s" % cmd_matlab) os.system(cmd_matlab) print("Loading scores from savefile '%s'..." % (savefile)) scores = sio.loadmat(open(savefile,'rb')) return scores
{ "content_hash": "b19067d0154a56114687498d1e352e5b", "timestamp": "", "source": "github", "line_count": 435, "max_line_length": 265, "avg_line_length": 45.08505747126437, "alnum_prop": 0.5782684070976953, "repo_name": "stwisdom/dr-nmf", "id": "5c519714862082235aafd7f4d131e6ffeeb68e6f", "size": "19612", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "audio_dataset.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "254483" }, { "name": "Matlab", "bytes": "16671" }, { "name": "Python", "bytes": "136245" }, { "name": "Shell", "bytes": "5152" } ], "symlink_target": "" }
"""Unit tests for artifact conversion to and from Tensorflow SavedModel v2.""" import base64 import glob import json import os import shutil import tempfile import unittest import numpy as np import tensorflow.compat.v2 as tf from tensorflow_decision_forests.keras import GradientBoostedTreesModel from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import variables from tensorflow.python.training.tracking import tracking from tensorflow.python.tools import freeze_graph from tensorflow.python.saved_model.save import save import tensorflow_hub as hub from tensorflowjs import version from tensorflowjs.converters import graph_rewrite_util from tensorflowjs.converters import tf_saved_model_conversion_v2 from tensorflowjs.converters.common import ASSETS_DIRECTORY_NAME SAVED_MODEL_DIR = 'saved_model' HUB_MODULE_DIR = 'hub_module' FROZEN_MODEL_DIR = 'frozen_model' class ConvertTest(tf.test.TestCase): def setUp(self): super(ConvertTest, self).setUp() self._tmp_dir = tempfile.mkdtemp() def tearDown(self): if os.path.isdir(self._tmp_dir): shutil.rmtree(self._tmp_dir) super(ConvertTest, self).tearDown() def _create_saved_model_v1(self): """Create a TensorFlow SavedModel for testing.""" graph = tf.Graph() with graph.as_default(): x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]]) w = tf.compat.v1.get_variable('w', shape=[2, 2]) y = tf.compat.v1.matmul(x, w) output = tf.compat.v1.nn.softmax(y) init_op = w.initializer # Create a builder. save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_dir) with tf.compat.v1.Session() as sess: # Run the initializer on `w`. sess.run(init_op) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map={ 'serving_default': tf.compat.v1.saved_model \ .signature_def_utils.predict_signature_def( inputs={'x': x}, outputs={'output': output}) }, assets_collection=None) builder.save() def _create_saved_model_v1_with_hashtable(self): """Create a TensorFlow SavedModel V1 with unused hash table for testing.""" graph = tf.Graph() with graph.as_default(): x = tf.compat.v1.placeholder('int32', [None, 2, 2]) t = tf.compat.v1.to_float(x) w = tf.compat.v1.get_variable('w', shape=[2, 2]) output = tf.compat.v1.matmul(t, w) init_op = w.initializer # Add a hash table that is not used by the output. keys = tf.constant(['key']) values = tf.constant([1]) initializer = tf.lookup.KeyValueTensorInitializer(keys, values) table = tf.lookup.StaticHashTable(initializer, -1) # Create a builder. save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) builder = tf.compat.v1.saved_model.builder.SavedModelBuilder( save_dir) with tf.compat.v1.Session() as sess: # Run the initializer on `w`. sess.run(init_op) table.lookup(keys) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map={ 'serving_default': tf.compat.v1.saved_model \ .signature_def_utils.predict_signature_def( inputs={'t': t}, outputs={'output': output}) }, assets_collection=None) builder.save() def _create_saved_model_v2_with_hashtable(self): """Create a TensorFlow SavedModel V2 with hash table for testing.""" class Table(tf.Module): def __init__(self): super(Table, self).__init__() keys = tf.constant(['a', 'b']) vals= tf.constant([0, 1]) init = tf.lookup.KeyValueTensorInitializer(keys, vals) self.table = tf.lookup.StaticHashTable(init, -1) def initializeTable(self): @tf.function def lookup(input): return self.table.lookup(input) return lookup model = Table() concrete_fn = model.initializeTable().get_concrete_function( input=tf.TensorSpec([None], tf.string)) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf.saved_model.save(model, save_dir, signatures={"serving_default": concrete_fn}) def _create_saved_model_with_fusable_conv2d(self, use_bias): """Test a basic model with fusable conv2d.""" layers = [ tf.keras.layers.Conv2D( 16, [3, 3], padding='same', use_bias=use_bias), tf.keras.layers.BatchNormalization(), tf.keras.layers.ReLU() ] model = tf.keras.Sequential(layers) model.predict(tf.ones((1, 224, 224, 3))) tf.keras.backend.set_learning_phase(0) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf.saved_model.save(model, save_dir) def _create_saved_model_with_fusable_depthwise_conv2d(self): """Test a basic model with fusable depthwise conv2d.""" layers = [ tf.keras.layers.DepthwiseConv2D( 1, use_bias=True, bias_initializer=tf.initializers.constant(0.25)), tf.keras.layers.ReLU() ] model = tf.keras.Sequential(layers) model.predict(tf.ones((1, 2, 2, 3))) tf.keras.backend.set_learning_phase(0) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf.saved_model.save(model, save_dir) def _create_saved_model_with_prelu(self): """Test a basic model with fusable conv2d.""" layers = [ tf.keras.layers.Conv2D( 16, [3, 3], padding='same', use_bias=True, bias_initializer=tf.initializers.constant(0.25)), tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)), tf.keras.layers.DepthwiseConv2D( 1, use_bias=True, bias_initializer=tf.initializers.constant(0.25)), tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)) ] model = tf.keras.Sequential(layers) model.predict(tf.ones((1, 224, 224, 3))) tf.keras.backend.set_learning_phase(0) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf.saved_model.save(model, save_dir) def _create_saved_model_with_unfusable_prelu(self): """Test a basic model with unfusable prelu.""" layers = [ tf.keras.layers.ReLU(), tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)) ] model = tf.keras.Sequential(layers) model.predict(tf.ones((1, 224, 3))) tf.keras.backend.set_learning_phase(0) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf.saved_model.save(model, save_dir) def _create_saved_model(self): """Test a basic model with functions to make sure functions are inlined.""" input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) save(root, save_dir, to_save) def _create_saved_model_with_fusable_matmul(self): """Test a fusable matmul model.""" input_data = constant_op.constant(1., shape=[1, 1]) bias_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v2 = variables.Variable([[2.]]) root.f = def_function.function( lambda x: tf.nn.relu(tf.nn.bias_add(tf.matmul(x, root.v2), bias_data))) to_save = root.f.get_concrete_function(input_data) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) save(root, save_dir, to_save) def _create_saved_model_with_control_flow(self): """Test a basic model with control flow to inlined.""" @tf.function def find_next_odd(v): v1 = v + 1 while tf.equal(v1 % 2, 0): v1 = v1 + 1 return v1 root = tracking.AutoTrackable() root.f = find_next_odd to_save = root.f.get_concrete_function( tensor_spec.TensorSpec([], dtypes.int32)) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) save(root, save_dir, to_save) def _create_saved_model_with_tfdf(self): """Test a basic TFDF model.""" P = 5 NUM_EXAMPLES = 10 NUM_FEATURES = 4 x_train = np.random.uniform(size=(NUM_EXAMPLES, NUM_FEATURES)) y_train = np.random.uniform(size=NUM_EXAMPLES) > 0.5 w_train = y_train * (P - 1) + 1 # 1 or p depending on the class. model = GradientBoostedTreesModel() model.fit(x=x_train, y=y_train, sample_weight=w_train) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) model.save(save_dir) def _create_unsupported_saved_model(self): root = tracking.AutoTrackable() root.w = variables.Variable(tf.random.uniform([2, 2])) @def_function.function def exported_function(x): root.x = constant_op.constant([[37.0, -23.0], [1.0, 4.0]]) root.y = tf.matmul(root.x, root.w) # unsupported op: linalg.diag root.z = tf.linalg.diag(root.y) return root.z * x root.f = exported_function to_save = root.f.get_concrete_function( tensor_spec.TensorSpec([], dtypes.float32)) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) save(root, save_dir, to_save) def _create_saved_model_with_debug_ops(self): root = tracking.AutoTrackable() root.w = variables.Variable(tf.random.uniform([2, 2])) @def_function.function def exported_function(x): root.x = constant_op.constant([[37.0, -23.0], [1.0, 4.0]]) root.y = tf.matmul(root.x, root.w) tf.compat.v1.Print(root.x, [root.x]) tf.compat.v1.Assert(tf.greater(tf.reduce_max(root.x), 0), [root.x]) tf.compat.v1.check_numerics(root.x, 'NaN found') return root.y * x root.f = exported_function to_save = root.f.get_concrete_function( tensor_spec.TensorSpec([], dtypes.float32)) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) save(root, save_dir, to_save) def _create_saved_model_with_structured_outputs(self): def create_input(name): return tf.keras.layers.Input(name=name, shape=(1,), dtype=tf.float32) input1 = create_input("input1") input3 = create_input("input3") input2 = create_input("input2") output1 = tf.keras.layers.Dense(1, name='a') output1 = output1(tf.keras.layers.concatenate([input1, input3], axis=1)) output2 = tf.keras.layers.Dense(1, name='b')(input2) output3 = tf.keras.layers.Multiply(name='c')([output1, output2]) inputs = { "input1": input1, "input3": input3, "input2": input2 } outputs = { "a": output1, "c": output3, "b": output2 } model = tf.keras.Model(inputs=inputs, outputs=outputs) save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf.saved_model.save(model, save_dir) def _create_hub_module(self): # Module function that doubles its input. def double_module_fn(): w = tf.Variable([2.0, 4.0]) x = tf.compat.v1.placeholder(dtype=tf.float32) hub.add_signature(inputs=x, outputs=x*w) graph = tf.Graph() with graph.as_default(): spec = hub.create_module_spec(double_module_fn) m = hub.Module(spec) # Export the module. with tf.compat.v1.Session(graph=graph) as sess: sess.run(tf.compat.v1.global_variables_initializer()) m.export(os.path.join(self._tmp_dir, HUB_MODULE_DIR), sess) def create_frozen_model(self): graph = tf.Graph() saved_model_dir = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR) with graph.as_default(): x = tf.constant([[37.0, -23.0], [1.0, 4.0]]) w = tf.Variable(tf.random.uniform([2, 2])) y = tf.matmul(x, w) tf.nn.softmax(y) init_op = w.initializer # Create a builder builder = tf.compat.v1.saved_model.builder.SavedModelBuilder( saved_model_dir) with tf.compat.v1.Session() as sess: # Run the initializer on `w`. sess.run(init_op) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map=None, assets_collection=None) builder.save() frozen_file = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'model.frozen') freeze_graph.freeze_graph( '', '', True, '', "Softmax", '', '', frozen_file, True, '', saved_model_tags=tf.compat.v1.saved_model.tag_constants.SERVING, input_saved_model_dir=saved_model_dir) def test_convert_saved_model_v1(self): self._create_saved_model_v1() input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) output_dir = os.path.join(input_dir, 'js') tf_saved_model_conversion_v2.convert_tf_saved_model( input_dir, output_dir ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js') # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*'))) def test_convert_saved_model_v1_with_hashtable(self): self._create_saved_model_v1_with_hashtable() input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) output_dir = os.path.join(input_dir, 'js') tf_saved_model_conversion_v2.convert_tf_saved_model( input_dir, output_dir ) expected_weights_manifest = [{ 'paths': ['group1-shard1of1.bin'], 'weights': [ {'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}, {'dtype': 'string', 'name': 'Const', 'shape': [1]}, {'dtype': 'int32', 'name': 'Const_1', 'shape': [1]} ]}] tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js') # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) self.assertTrue(model_json['modelInitializer']) for node in model_json['modelTopology']['node']: if node['name'] == 'ToFloat' and node['op'] == 'Placeholder': self.assertEqual(node['attr']['shape'], {'shape': {'dim': [ {'size': '-1'}, {'size': '2'}, {'size': '2'}]}}) weights_manifest = model_json['weightsManifest'] self.assertEqual(weights_manifest, expected_weights_manifest) # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*'))) def test_convert_saved_model_v2_with_hashtable(self): self._create_saved_model_v2_with_hashtable() input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) output_dir = os.path.join(input_dir, 'js') tf_saved_model_conversion_v2.convert_tf_saved_model( input_dir, output_dir ) expected_signature = { 'inputs': { 'input': { 'name': 'input:0', 'dtype': 'DT_STRING', 'tensorShape': {'dim': [{'size': '-1'}]} }, 'unknown:0': { 'name': 'unknown:0', 'dtype': 'DT_RESOURCE', 'tensorShape': {}, 'resourceId': None } }, 'outputs': { 'output_0': { 'name': 'Identity:0', 'dtype': 'DT_INT32', 'tensorShape': {'dim': [{'size': '-1'}]} } } } expected_initializer_signature = { 'outputs': { 'Identity:0': { 'name': 'Identity:0', 'dtype': 'DT_RESOURCE', 'tensorShape': {}, 'resourceId': None } } } expected_weights_manifest = [{ 'paths': ['group1-shard1of1.bin'], 'weights': [ {'name': 'unknown_0', 'shape': [], 'dtype': 'int32'}, {'name': '4609', 'shape': [2], 'dtype': 'string'}, {'name': '4611', 'shape': [2], 'dtype': 'int32'} ]}] tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js') # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) # Check resource ids match which indicates the initializer output is mapped # to the inference input. signature_resource_id = model_json['signature']['inputs']['unknown:0']['resourceId'] initializer_resource_id = model_json['initializerSignature']['outputs']['Identity:0']['resourceId'] self.assertTrue(signature_resource_id) self.assertEqual(signature_resource_id, initializer_resource_id) # Update expected signatures with resourceId since it is a runtime value. expected_signature['inputs']['unknown:0']['resourceId'] = signature_resource_id expected_initializer_signature['outputs']['Identity:0']['resourceId'] = signature_resource_id self.assertEqual(model_json['signature'], expected_signature) self.assertEqual(model_json['initializerSignature'], expected_initializer_signature) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) model_ops = [node['op'] for node in model_json['modelTopology']['node']] self.assertIn('LookupTableFindV2', model_ops) self.assertTrue(model_json['modelInitializer']) initializer_ops = [node['op'] for node in model_json['modelInitializer']['node']] self.assertIn('HashTableV2', initializer_ops) self.assertIn('LookupTableImportV2', initializer_ops) weights_manifest = model_json['weightsManifest'] self.assertEqual(weights_manifest, expected_weights_manifest) # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*'))) def test_convert_saved_model_v1_with_metadata(self): self._create_saved_model_v1() input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) output_dir = os.path.join(input_dir, 'js') metadata_json = {'a': 1} tf_saved_model_conversion_v2.convert_tf_saved_model( input_dir, output_dir, metadata={'key': metadata_json} ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js') # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key']) def test_convert_saved_model(self): self._create_saved_model() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) def test_convert_saved_model_with_frozen_file(self): self._create_saved_model() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR), frozen_graph_dir=os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) frozen_file_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'model.json.frozen') # Check model.json.frozen exist. self.assertTrue( glob.glob(frozen_file_path)) def test_convert_saved_model_with_metadata(self): self._create_saved_model() metadata_json = {'a': 1} tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR), metadata={'key': metadata_json} ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key']) def test_convert_saved_model_with_fused_conv2d(self): for use_bias in [True, False]: self._create_saved_model_with_fusable_conv2d(use_bias) tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) nodes = model_json['modelTopology']['node'] fused_op = None for node in nodes: self.assertNotIn('BatchNorm', node['op']) self.assertNotIn('Relu', node['op']) self.assertNotIn('BiasAdd', node['op']) if node['op'] == '_FusedConv2D': fused_op = node self.assertIsNot(fused_op, None) self.assertEqual( base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][0]), b'BiasAdd') self.assertEqual( base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][1]), b'Relu') # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_fused_matmul(self): self._create_saved_model_with_fusable_matmul() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) nodes = model_json['modelTopology']['node'] fused_op = None for node in nodes: self.assertNotEqual(node['op'], 'MatMul') self.assertNotIn('Relu', node['op']) self.assertNotIn('BiasAdd', node['op']) if node['op'] == graph_rewrite_util.FUSED_MATMUL: fused_op = node self.assertIsNot(fused_op, None) self.assertIsNot(fused_op['attr']['transpose_a'], None) self.assertIsNot(fused_op['attr']['transpose_b'], None) self.assertEqual( base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][0]), b'BiasAdd') self.assertEqual( base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][1]), b'Relu') # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_fused_depthwise_conv2d(self): self._create_saved_model_with_fusable_depthwise_conv2d() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) nodes = model_json['modelTopology']['node'] fused_op = None for node in nodes: self.assertNotIn('BatchNorm', node['op']) self.assertNotIn('Relu', node['op']) self.assertNotIn('BiasAdd', node['op']) if node['op'] == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D: fused_op = node self.assertIsNot(fused_op, None) self.assertIsNot(fused_op['attr']['dilations'], None) self.assertIsNot(fused_op['attr']['strides'], None) self.assertEqual( base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][0]), b'BiasAdd') self.assertEqual( base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][1]), b'Relu') # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_prelu(self): self._create_saved_model_with_prelu() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) nodes = model_json['modelTopology']['node'] prelu_op = None fused_op = None depthwise_fused_op = None for node in nodes: if node['op'] == 'Prelu': prelu_op = node if node['op'] == '_FusedConv2D': fused_op = node if node['op'] == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D: depthwise_fused_op = node self.assertTrue(prelu_op is None) self.assertIsNot(fused_op, None) self.assertIsNot(depthwise_fused_op, None) fused_ops = list(map(base64.b64decode, fused_op['attr']['fused_ops']['list']['s'])) self.assertEqual(fused_ops, [b'BiasAdd', b'Prelu']) self.assertEqual(fused_op['attr']['num_args']['i'], '2') depthwise_fused_ops = list( map(base64.b64decode, depthwise_fused_op['attr']['fused_ops']['list']['s'])) self.assertEqual(depthwise_fused_ops, [b'BiasAdd', b'Prelu']) self.assertEqual(depthwise_fused_op['attr']['num_args']['i'], '2') # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_unfusable_prelu(self): self._create_saved_model_with_unfusable_prelu() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) nodes = model_json['modelTopology']['node'] prelu_op = None for node in nodes: if node['op'] == 'Prelu': prelu_op = node break self.assertTrue(prelu_op) # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_control_flow(self): self._create_saved_model_with_control_flow() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_control_flow_v2(self): self._create_saved_model_with_control_flow() tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf_saved_model_conversion_v2.convert_tf_saved_model( tfjs_path, tfjs_path, control_flow_v2=True ) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) add_y_weight = None for weight in weights_manifest[0]['weights']: if 'add/y' in weight['name']: add_y_weight = weight self.assertIsNot(add_y_weight, None) self.assertFalse(add_y_weight['name'].startswith('add/y')) nodes = model_json['modelTopology']['node'] while_op = None for node in nodes: self.assertNotIn('Merge', node['op']) self.assertNotIn('Switch', node['op']) if node['op'] == 'StatelessWhile': while_op = node self.assertIsNot(while_op, None) # Check meta-data in the artifact JSON. self.assertEqual(model_json['format'], 'graph-model') self.assertEqual( model_json['convertedBy'], 'TensorFlow.js Converter v%s' % version.version) self.assertEqual(model_json['generatedBy'], tf.__version__) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_with_tfdf(self): self._create_saved_model_with_tfdf() tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf_saved_model_conversion_v2.convert_tf_saved_model( tfjs_path, tfjs_path, skip_op_check=True ) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) # Check TFDF ops are present. model_ops = [node['op'] for node in model_json['modelTopology']['node']] self.assertIn('SimpleMLInferenceOpWithHandle', model_ops) initializer_ops = [node['op'] for node in model_json['modelInitializer']['node']] self.assertIn('SimpleMLCreateModelResource', initializer_ops) self.assertIn('SimpleMLLoadModelFromPathWithHandle', initializer_ops) # Check assets containing TFDF files were copied over. self.assertTrue( os.path.exists( os.path.join(tfjs_path, ASSETS_DIRECTORY_NAME + '.zip'))) def test_convert_saved_model_sharded(self): self._create_saved_model() model_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Do initial conversion without sharding. tf_saved_model_conversion_v2.convert_tf_saved_model(model_path, tfjs_path) weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin')) # Get size of weights in bytes after graph optimizations. optimized_total_weight = sum([os.path.getsize(f) for f in weight_files]) # Due to the shard size, there ought to be 2 shards after conversion. weight_shard_size_bytes = int(optimized_total_weight * 0.8) tfjs_path = os.path.join(self._tmp_dir, 'sharded_model') # Convert Saved Model again with shard argument set. tf_saved_model_conversion_v2.convert_tf_saved_model( model_path, tfjs_path, weight_shard_size_bytes=weight_shard_size_bytes) weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin'))) self.assertEqual(len(weight_files), 2) weight_file_sizes = [os.path.getsize(f) for f in weight_files] self.assertEqual(sum(weight_file_sizes), optimized_total_weight) self.assertLess(weight_file_sizes[1], weight_file_sizes[0]) def test_optimizer_add_unsupported_op(self): self._create_unsupported_saved_model() with self.assertRaisesRegexp( # pylint: disable=deprecated-method ValueError, r'^Unsupported Ops'): tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR) ) def test_convert_saved_model_skip_op_check(self): self._create_unsupported_saved_model() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR), skip_op_check=True ) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) # (TODO: piyu) disable this test, need to change # convert_variables_to_constants_v2 to set function_optimization=aggressive. @unittest.skip('not supported') def test_convert_saved_model_strip_debug_ops(self): self._create_saved_model_with_debug_ops() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR), strip_debug_ops=True) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_saved_model_structured_outputs_true(self): self._create_saved_model_with_structured_outputs() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR), use_structured_outputs_names=True) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) self.assertEqual(["a", "b", "c"], model_json['userDefinedMetadata']['structuredOutputKeys']) def test_convert_saved_model_structured_outputs_false(self): self._create_saved_model_with_structured_outputs() tf_saved_model_conversion_v2.convert_tf_saved_model( os.path.join(self._tmp_dir, SAVED_MODEL_DIR), os.path.join(self._tmp_dir, SAVED_MODEL_DIR)) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertIs(model_json.get('userDefinedMetadata'), None) def test_convert_hub_module_v1(self): self._create_hub_module() module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_hub_module_v1_sharded(self): self._create_hub_module() module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) # Do initial conversion without sharding. tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path) weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin')) # Get size of weights in bytes after graph optimizations. optimized_total_weight = sum([os.path.getsize(f) for f in weight_files]) # Due to the shard size, there ought to be 3 shards after conversion. weight_shard_size_bytes = int(optimized_total_weight * 0.4) tfjs_path = os.path.join(self._tmp_dir, 'sharded_model') # Convert Hub model again with shard argument set. tf_saved_model_conversion_v2.convert_tf_hub_module( module_path, tfjs_path, weight_shard_size_bytes=weight_shard_size_bytes) weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin'))) self.assertEqual(len(weight_files), 3) weight_file_sizes = [os.path.getsize(f) for f in weight_files] self.assertEqual(sum(weight_file_sizes), optimized_total_weight) self.assertEqual(weight_file_sizes[0], weight_file_sizes[1]) self.assertLess(weight_file_sizes[2], weight_file_sizes[0]) def test_convert_hub_module_v1_with_metadata(self): self._create_hub_module() module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) metadata_json = {'a': 1} tf_saved_model_conversion_v2.convert_tf_hub_module( module_path, tfjs_path, metadata={'key': metadata_json}) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key']) def test_convert_hub_module_v2(self): self._create_saved_model() module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf_saved_model_conversion_v2.convert_tf_hub_module( module_path, tfjs_path, "serving_default", "serve") # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) def test_convert_hub_module_v2_with_metadata(self): self._create_saved_model() module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) metadata_json = {'a': 1} tf_saved_model_conversion_v2.convert_tf_hub_module( module_path, tfjs_path, "serving_default", "serve", metadata={'key': metadata_json}) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key']) def test_convert_frozen_model(self): self.create_frozen_model() print(glob.glob( os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, '*'))) tf_saved_model_conversion_v2.convert_tf_frozen_model( os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'model.frozen'), 'Softmax', os.path.join(self._tmp_dir, FROZEN_MODEL_DIR)) tfjs_path = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) # frozen model signature has no input nodes. self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'group*-*'))) def test_convert_frozen_model_with_metadata(self): self.create_frozen_model() print(glob.glob( os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, '*'))) metadata_json = {'a': 1} tf_saved_model_conversion_v2.convert_tf_frozen_model( os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'model.frozen'), 'Softmax', os.path.join(self._tmp_dir, FROZEN_MODEL_DIR), metadata={'key': metadata_json}) tfjs_path = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertEqual(metadata_json, model_json['userDefinedMetadata']['key']) def test_convert_keras_model_to_saved_model(self): keras_model = tf.keras.Sequential( [tf.keras.layers.Dense(1, input_shape=[2])]) tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR) tf_saved_model_conversion_v2.convert_keras_model_to_graph_model( keras_model, tfjs_path) # Check model.json and weights manifest. with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f: model_json = json.load(f) self.assertTrue(model_json['modelTopology']) self.assertIsNot(model_json['modelTopology']['versions'], None) signature = model_json['signature'] self.assertIsNot(signature, None) self.assertIsNot(signature['inputs'], None) self.assertIsNot(signature['outputs'], None) weights_manifest = model_json['weightsManifest'] self.assertCountEqual(weights_manifest[0]['paths'], ['group1-shard1of1.bin']) self.assertIn('weights', weights_manifest[0]) self.assertTrue( glob.glob( os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*'))) if __name__ == '__main__': tf.test.main()
{ "content_hash": "ad6e0070de230a025391e979ad256801", "timestamp": "", "source": "github", "line_count": 1283, "max_line_length": 103, "avg_line_length": 38.34294621979735, "alnum_prop": 0.6386144651786804, "repo_name": "tensorflow/tfjs", "id": "ab96e9ac893abb7453b7c7a191a3efaa98d9eb65", "size": "49849", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "2165" }, { "name": "C", "bytes": "1149" }, { "name": "C++", "bytes": "511030" }, { "name": "CSS", "bytes": "27067" }, { "name": "Dockerfile", "bytes": "1840" }, { "name": "HTML", "bytes": "132169" }, { "name": "Java", "bytes": "4081" }, { "name": "JavaScript", "bytes": "1200362" }, { "name": "Objective-C", "bytes": "5247" }, { "name": "Python", "bytes": "518704" }, { "name": "Ruby", "bytes": "1981" }, { "name": "Shell", "bytes": "76252" }, { "name": "Starlark", "bytes": "176198" }, { "name": "TypeScript", "bytes": "10878537" } ], "symlink_target": "" }
import pytest import numpy as np import sklearn.datasets as datasets import sklearn.manifold as manifold import pandas_ml as pdml import pandas_ml.util.testing as tm class TestManifold(tm.TestCase): def test_objectmapper(self): df = pdml.ModelFrame([]) self.assertIs(df.manifold.LocallyLinearEmbedding, manifold.LocallyLinearEmbedding) self.assertIs(df.manifold.Isomap, manifold.Isomap) self.assertIs(df.manifold.MDS, manifold.MDS) self.assertIs(df.manifold.SpectralEmbedding, manifold.SpectralEmbedding) self.assertIs(df.manifold.TSNE, manifold.TSNE) def test_locally_linear_embedding(self): iris = datasets.load_iris() df = pdml.ModelFrame(iris) result = df.manifold.locally_linear_embedding(3, 3) expected = manifold.locally_linear_embedding(iris.data, 3, 3) self.assertEqual(len(result), 2) self.assertIsInstance(result[0], pdml.ModelFrame) tm.assert_index_equal(result[0].index, df.index) tm.assert_numpy_array_equal(result[0].values, expected[0]) self.assertEqual(result[1], expected[1]) def test_spectral_embedding(self): N = 10 m = np.random.random_integers(50, 200, size=(N, N)) m = (m + m.T) / 2 df = pdml.ModelFrame(m) self.assert_numpy_array_almost_equal(df.data.values, m) result = df.manifold.spectral_embedding(random_state=self.random_state) expected = manifold.spectral_embedding(m, random_state=self.random_state) self.assertIsInstance(result, pdml.ModelFrame) tm.assert_index_equal(result.index, df.index) # signs can be inversed self.assert_numpy_array_almost_equal(np.abs(result.data.values), np.abs(expected)) @pytest.mark.parametrize("algo", ['Isomap']) def test_Isomap(self, algo): iris = datasets.load_iris() df = pdml.ModelFrame(iris) mod1 = getattr(df.manifold, algo)() mod2 = getattr(manifold, algo)() df.fit(mod1) mod2.fit(iris.data) result = df.transform(mod1) expected = mod2.transform(iris.data) self.assertIsInstance(result, pdml.ModelFrame) tm.assert_index_equal(result.index, df.index) self.assert_numpy_array_almost_equal(result.data.values, expected) @pytest.mark.parametrize("algo", ['MDS']) def test_MDS(self, algo): iris = datasets.load_iris() df = pdml.ModelFrame(iris) mod1 = getattr(df.manifold, algo)(random_state=self.random_state) mod2 = getattr(manifold, algo)(random_state=self.random_state) result = df.fit_transform(mod1) expected = mod2.fit_transform(iris.data) self.assertIsInstance(result, pdml.ModelFrame) tm.assert_index_equal(result.index, df.index) self.assert_numpy_array_almost_equal(result.data.values, expected) @pytest.mark.parametrize("algo", ['TSNE']) def test_TSNE(self, algo): digits = datasets.load_digits() df = pdml.ModelFrame(digits) mod1 = getattr(df.manifold, algo)(n_components=2, random_state=self.random_state) mod2 = getattr(manifold, algo)(n_components=2, random_state=self.random_state) # np.random.seed(1) result = df.fit_transform(mod1) # np.random.seed(1) expected = mod2.fit_transform(digits.data) self.assertIsInstance(result, pdml.ModelFrame) tm.assert_index_equal(result.index, df.index) self.assert_numpy_array_almost_equal(result.data.shape, expected.shape)
{ "content_hash": "a2393c9b77287abb6531dbcdf9cbf873", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 89, "avg_line_length": 36.94059405940594, "alnum_prop": 0.6330742428303404, "repo_name": "pandas-ml/pandas-ml", "id": "e846dac0a8b09051532a0ef1c07d4fabe836305e", "size": "3754", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pandas_ml/skaccessors/test/test_manifold.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "398175" }, { "name": "Shell", "bytes": "898" } ], "symlink_target": "" }
from django.db import models class Praise(models.Model): to = models.CharField(max_length=500, blank=False, null=False) by = models.CharField(max_length=500, blank=False, null=False) description = models.TextField(blank=False, null=False) number_of_hearts = models.IntegerField(default=0) created = models.DateTimeField(auto_now_add=True)
{ "content_hash": "70968a587f0940ef002b2a2e6211d15d", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 66, "avg_line_length": 40.111111111111114, "alnum_prop": 0.739612188365651, "repo_name": "prontotools/pronto-praise", "id": "1147e8f7259fe8e1e672168c9d4e9c65fb98c757", "size": "361", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "pronto_praise/praises/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1509846" }, { "name": "HTML", "bytes": "7159" }, { "name": "JavaScript", "bytes": "1533005" }, { "name": "Python", "bytes": "16539" }, { "name": "Shell", "bytes": "1538" } ], "symlink_target": "" }
""" Test functions for the sparse.linalg._eigen.lobpcg module """ import itertools import platform import sys import numpy as np from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, assert_array_less, suppress_warnings) import pytest from numpy import ones, r_, diag from scipy.linalg import eig, eigh, toeplitz, orth from scipy.sparse import spdiags, diags, eye from scipy.sparse.linalg import eigs, LinearOperator from scipy.sparse.linalg._eigen.lobpcg import lobpcg _IS_32BIT = (sys.maxsize < 2**32) def ElasticRod(n): """Build the matrices for the generalized eigenvalue problem of the fixed-free elastic rod vibration model. """ L = 1.0 le = L/n rho = 7.85e3 S = 1.e-4 E = 2.1e11 mass = rho*S*le/6. k = E*S/le A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1)) B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1)) return A, B def MikotaPair(n): """Build a pair of full diagonal matrices for the generalized eigenvalue problem. The Mikota pair acts as a nice test since the eigenvalues are the squares of the integers n, n=1,2,... """ x = np.arange(1, n+1) B = diag(1./x) y = np.arange(n-1, 0, -1) z = np.arange(2*n-1, 0, -2) A = diag(z)-diag(y, -1)-diag(y, 1) return A, B def compare_solutions(A, B, m): """Check eig vs. lobpcg consistency. """ n = A.shape[0] rnd = np.random.RandomState(0) V = rnd.random((n, m)) X = orth(V) eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False) eigvals.sort() w, _ = eig(A, b=B) w.sort() assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2) def test_Small(): A, B = ElasticRod(10) with pytest.warns(UserWarning, match="The problem size"): compare_solutions(A, B, 10) A, B = MikotaPair(10) with pytest.warns(UserWarning, match="The problem size"): compare_solutions(A, B, 10) def test_ElasticRod(): A, B = ElasticRod(20) with pytest.warns(UserWarning, match="Exited at iteration"): compare_solutions(A, B, 2) def test_MikotaPair(): A, B = MikotaPair(20) compare_solutions(A, B, 2) @pytest.mark.filterwarnings("ignore:Exited at iteration 0") def test_nonhermitian_warning(capsys): """Check the warning of a Ritz matrix being not Hermitian by feeding a non-Hermitian input matrix. Also check stdout since verbosityLevel=1 and lack of stderr. """ n = 10 X = np.arange(n * 2).reshape(n, 2).astype(np.float32) A = np.arange(n * n).reshape(n, n).astype(np.float32) with pytest.warns(UserWarning, match="Matrix gramA"): _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0) out, err = capsys.readouterr() # Capture output assert out.startswith("Solving standard eigenvalue") # Test stdout assert err == '' # Test empty stderr # Make the matrix symmetric and the UserWarning dissappears. A += A.T _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0) out, err = capsys.readouterr() # Capture output assert out.startswith("Solving standard eigenvalue") # Test stdout assert err == '' # Test empty stderr def test_regression(): """Check the eigenvalue of the identity matrix is one. """ # https://mail.python.org/pipermail/scipy-user/2010-October/026944.html n = 10 X = np.ones((n, 1)) A = np.identity(n) w, _ = lobpcg(A, X) assert_allclose(w, [1]) def test_diagonal(): """Check for diagonal matrices. """ rnd = np.random.RandomState(0) n = 100 m = 4 # Define the generalized eigenvalue problem Av = cBv # where (c, v) is a generalized eigenpair, # and where we choose A to be the diagonal matrix whose entries are 1..n # and where B is chosen to be the identity matrix. vals = np.arange(1, n+1, dtype=float) A = diags([vals], [0], (n, n)) B = eye(n) # Let the preconditioner M be the inverse of A. M = diags([1./vals], [0], (n, n)) # Pick random initial vectors. X = rnd.random((n, m)) # Require that the returned eigenvectors be in the orthogonal complement # of the first few standard basis vectors. m_excluded = 3 Y = np.eye(n, m_excluded) eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False) assert_allclose(eigvals, np.arange(1+m_excluded, 1+m_excluded+m)) _check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3) def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14): """Check if the eigenvalue residual is small. """ mult_wV = np.multiply(w, V) dot_MV = M.dot(V) assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol) def _check_fiedler(n, p): """Check the Fiedler vector computation. """ # This is not necessarily the recommended way to find the Fiedler vector. col = np.zeros(n) col[1] = 1 A = toeplitz(col) D = np.diag(A.sum(axis=1)) L = D - A # Compute the full eigendecomposition using tricks, e.g. # http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf tmp = np.pi * np.arange(n) / n analytic_w = 2 * (1 - np.cos(tmp)) analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp)) _check_eigen(L, analytic_w, analytic_V) # Compute the full eigendecomposition using eigh. eigh_w, eigh_V = eigh(L) _check_eigen(L, eigh_w, eigh_V) # Check that the first eigenvalue is near zero and that the rest agree. assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14) assert_allclose(eigh_w[1:], analytic_w[1:]) # Check small lobpcg eigenvalues. X = analytic_V[:, :p] lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False) assert_equal(lobpcg_w.shape, (p,)) assert_equal(lobpcg_V.shape, (n, p)) _check_eigen(L, lobpcg_w, lobpcg_V) assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14) assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p]) # Check large lobpcg eigenvalues. X = analytic_V[:, -p:] lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True) assert_equal(lobpcg_w.shape, (p,)) assert_equal(lobpcg_V.shape, (n, p)) _check_eigen(L, lobpcg_w, lobpcg_V) assert_allclose(np.sort(lobpcg_w), analytic_w[-p:]) # Look for the Fiedler vector using good but not exactly correct guesses. fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2))) X = np.vstack((np.ones(n), fiedler_guess)).T lobpcg_w, _ = lobpcg(L, X, largest=False) # Mathematically, the smaller eigenvalue should be zero # and the larger should be the algebraic connectivity. lobpcg_w = np.sort(lobpcg_w) assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14) def test_fiedler_small_8(): """Check the dense workaround path for small matrices. """ # This triggers the dense path because 8 < 2*5. with pytest.warns(UserWarning, match="The problem size"): _check_fiedler(8, 2) def test_fiedler_large_12(): """Check the dense workaround path avoided for non-small matrices. """ # This does not trigger the dense path, because 2*5 <= 12. _check_fiedler(12, 2) def test_failure_to_run_iterations(): """Check that the code exists gracefully without breaking. Issue #10974. """ rnd = np.random.RandomState(4120349) X = rnd.standard_normal((100, 10)) A = X @ X.T Q = rnd.standard_normal((X.shape[0], 4)) with pytest.warns(UserWarning, match="Exited at iteration"): eigenvalues, _ = lobpcg(A, Q, maxiter=20) assert(np.max(eigenvalues) > 0) @pytest.mark.filterwarnings("ignore:The problem size") def test_hermitian(): """Check complex-value Hermitian cases. """ rnd = np.random.RandomState(0) sizes = [3, 10, 50] ks = [1, 3, 10, 50] gens = [True, False] for s, k, gen in itertools.product(sizes, ks, gens): if k > s: continue H = rnd.random((s, s)) + 1.j * rnd.random((s, s)) H = 10 * np.eye(s) + H + H.T.conj() X = rnd.random((s, k)) if not gen: B = np.eye(s) w, v = lobpcg(H, X, maxiter=5000) w0, _ = eigh(H) else: B = rnd.random((s, s)) + 1.j * rnd.random((s, s)) B = 10 * np.eye(s) + B.dot(B.T.conj()) w, v = lobpcg(H, X, B, maxiter=5000, largest=False) w0, _ = eigh(H, B) for wx, vx in zip(w, v.T): # Check eigenvector assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx) / np.linalg.norm(H.dot(vx)), 0, atol=5e-4, rtol=0) # Compare eigenvalues j = np.argmin(abs(w0 - wx)) assert_allclose(wx, w0[j], rtol=1e-4) # The n=5 case tests the alternative small matrix code path that uses eigh(). @pytest.mark.filterwarnings("ignore:The problem size") @pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)]) def test_eigs_consistency(n, atol): """Check eigs vs. lobpcg consistency. """ vals = np.arange(1, n+1, dtype=np.float64) A = spdiags(vals, 0, n, n) rnd = np.random.RandomState(0) X = rnd.random((n, 2)) lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100) vals, _ = eigs(A, k=2) _check_eigen(A, lvals, lvecs, atol=atol, rtol=0) assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14) def test_verbosity(tmpdir): """Check that nonzero verbosity level code runs. """ rnd = np.random.RandomState(0) X = rnd.standard_normal((10, 10)) A = X @ X.T Q = rnd.standard_normal((X.shape[0], 1)) with pytest.warns(UserWarning, match="Exited at iteration"): _, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9) @pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32', reason="tolerance violation on windows") @pytest.mark.xfail(platform.machine() == 'ppc64le', reason="fails on ppc64le") def test_tolerance_float32(): """Check lobpcg for attainable tolerance in float32. """ rnd = np.random.RandomState(0) n = 50 m = 3 vals = -np.arange(1, n + 1) A = diags([vals], [0], (n, n)) A = A.astype(np.float32) X = rnd.standard_normal((n, m)) X = X.astype(np.float32) eigvals, _ = lobpcg(A, X, tol=1e-5, maxiter=50, verbosityLevel=0) assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1.5e-5) def test_random_initial_float32(): """Check lobpcg in float32 for specific initial. """ rnd = np.random.RandomState(0) n = 50 m = 4 vals = -np.arange(1, n + 1) A = diags([vals], [0], (n, n)) A = A.astype(np.float32) X = rnd.random((n, m)) X = X.astype(np.float32) eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1) assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2) def test_maxit(): """Check lobpcg if maxit=10 runs 10 iterations if maxit=None runs 20 iterations (the default) by checking the size of the iteration history output, which should be the number of iterations plus 2 (initial and final values). """ rnd = np.random.RandomState(0) n = 50 m = 4 vals = -np.arange(1, n + 1) A = diags([vals], [0], (n, n)) A = A.astype(np.float32) X = rnd.standard_normal((n, m)) X = X.astype(np.float32) with pytest.warns(UserWarning, match="Exited at iteration"): _, _, l_h = lobpcg(A, X, tol=1e-8, maxiter=10, retLambdaHistory=True) assert_allclose(np.shape(l_h)[0], 10+2) with pytest.warns(UserWarning, match="Exited at iteration"): _, _, l_h = lobpcg(A, X, tol=1e-8, retLambdaHistory=True) assert_allclose(np.shape(l_h)[0], 20+2) @pytest.mark.slow def test_diagonal_data_types(): """Check lobpcg for diagonal matrices for all matrix types. """ rnd = np.random.RandomState(0) n = 40 m = 4 # Define the generalized eigenvalue problem Av = cBv # where (c, v) is a generalized eigenpair, # and where we choose A and B to be diagonal. vals = np.arange(1, n + 1) list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'] sparse_formats = len(list_sparse_format) for s_f_i, s_f in enumerate(list_sparse_format): As64 = diags([vals * vals], [0], (n, n), format=s_f) As32 = As64.astype(np.float32) Af64 = As64.toarray() Af32 = Af64.astype(np.float32) listA = [Af64, As64, Af32, As32] Bs64 = diags([vals], [0], (n, n), format=s_f) Bf64 = Bs64.toarray() listB = [Bf64, Bs64] # Define the preconditioner function as LinearOperator. Ms64 = diags([1./vals], [0], (n, n), format=s_f) def Ms64precond(x): return Ms64 @ x Ms64precondLO = LinearOperator(matvec=Ms64precond, matmat=Ms64precond, shape=(n, n), dtype=float) Mf64 = Ms64.toarray() def Mf64precond(x): return Mf64 @ x Mf64precondLO = LinearOperator(matvec=Mf64precond, matmat=Mf64precond, shape=(n, n), dtype=float) Ms32 = Ms64.astype(np.float32) def Ms32precond(x): return Ms32 @ x Ms32precondLO = LinearOperator(matvec=Ms32precond, matmat=Ms32precond, shape=(n, n), dtype=np.float32) Mf32 = Ms32.toarray() def Mf32precond(x): return Mf32 @ x Mf32precondLO = LinearOperator(matvec=Mf32precond, matmat=Mf32precond, shape=(n, n), dtype=np.float32) listM = [None, Ms64precondLO, Mf64precondLO, Ms32precondLO, Mf32precondLO] # Setup matrix of the initial approximation to the eigenvectors # (cannot be sparse array). Xf64 = rnd.random((n, m)) Xf32 = Xf64.astype(np.float32) listX = [Xf64, Xf32] # Require that the returned eigenvectors be in the orthogonal complement # of the first few standard basis vectors (cannot be sparse array). m_excluded = 3 Yf64 = np.eye(n, m_excluded, dtype=float) Yf32 = np.eye(n, m_excluded, dtype=np.float32) listY = [Yf64, Yf32] tests = list(itertools.product(listA, listB, listM, listX, listY)) # This is one of the slower tests because there are >1,000 configs # to test here, instead of checking product of all input, output types # test each configuration for the first sparse format, and then # for one additional sparse format. this takes 2/7=30% as long as # testing all configurations for all sparse formats. if s_f_i > 0: tests = tests[s_f_i - 1::sparse_formats-1] for A, B, M, X, Y in tests: eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4, maxiter=100, largest=False) assert_allclose(eigvals, np.arange(1 + m_excluded, 1 + m_excluded + m))
{ "content_hash": "b78e395691d4521bc7f9c819f61ac35d", "timestamp": "", "source": "github", "line_count": 441, "max_line_length": 82, "avg_line_length": 34.3469387755102, "alnum_prop": 0.5941110450914372, "repo_name": "zerothi/scipy", "id": "bc513803aab1ff7e2bb2067246fa828723fb946c", "size": "15147", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "4306215" }, { "name": "C++", "bytes": "3692292" }, { "name": "Fortran", "bytes": "5573034" }, { "name": "HTML", "bytes": "124330" }, { "name": "MATLAB", "bytes": "4346" }, { "name": "Makefile", "bytes": "76425" }, { "name": "Python", "bytes": "10541152" }, { "name": "Shell", "bytes": "2218" }, { "name": "TeX", "bytes": "52106" } ], "symlink_target": "" }
""" stratus // db.py Author: Alex Kalicki (https://github.com/akalicki) """ import pymongo import dbox, navigate client = pymongo.MongoClient() db = client.stratus def directory_exists(parent, name): """Returns whether the given directory already exists""" return db.dirs.find({'parent': parent, 'name': name}).count() > 0 def directory_empty(parent, name): """Returns whether the given directory is empty""" if parent == '': abs_path = name else: abs_path = parent + '/' + name return (db.dirs.find({'parent': abs_path}).count() == 0 and db.files.find({'parent': abs_path}).count() == 0) def create_directory(parent, name): """Creates stratus directory with given name in the parent folder""" up_parent, up_name = navigate.split_path(parent) if up_parent is not None and not directory_exists(up_parent, up_name): print "Error: '" + up_name + "' is not a valid directory." elif directory_exists(parent, name): print "Error: '" + name + "' already exists." else: new_dir = {'parent': parent, 'name': name} db.dirs.insert(new_dir) def remove_directory(parent, name): """Deletes stratus directory with given name in the parent folder""" if not directory_exists(parent, name): print "Error: '" + name + "' is not a valid directory." elif not directory_empty(parent, name): print "Error: '" + name + "' is not empty." else: db.dirs.remove({'parent': parent, 'name': name}) def add_account(access_token): """Adds Dropbox account info to the database for future use""" available_space = dbox.account_space(access_token) new_account = {'access_token': access_token, 'available_space': available_space} db.accounts.insert(new_account) def find_account_with_space(space): """Returns account with free space or None if not found""" needed_space = space - dbox.ACCOUNT_BUFFER return db.accounts.find_one({'available_space': {'$gte': needed_space}}) def file_exists(parent, name): return db.files.find({'parent': parent, 'name': name}).count() > 0 def add_file(access_token, parent, name): """Adds file info to the database for future use""" new_file = {'access_token': access_token, 'parent': parent, 'name': name} db.files.insert(new_file) updated_space = dbox.account_space(access_token) db.accounts.update({'access_token': access_token}, {'$set': {'available_space': updated_space}}) def remove_file(access_token, parent, name): """Deletes stratus file with given name in the parent folder""" if not file_exists(parent, name): print "Error: '" + name + "' does not exist." else: db.files.remove({'parent': parent, 'name': name}) updated_space = dbox.account_space(access_token) db.accounts.update({'access_token': access_token}, {'$set': {'available_space': updated_space}}) def move_file(cur_parent, cur_name, new_parent, new_name): """Moves a file from one location to another""" if not file_exists(cur_parent, cur_name): print "Error: '" + name + "' does not exist." else: db.files.update({'parent': cur_parent, 'name': cur_name}, {'$set': {'parent': new_parent, 'name': new_name}}) def get_access_to_file(parent, name): """Returns access token to Dropbox account storing queried file""" if not file_exists(parent, name): print "Error: '" + name + "' does not exist." return None sfile = db.files.find_one({'parent': parent, 'name': name}) return sfile["access_token"] def list_files(path): """Lists all folders and files in given stratus directory""" dirs = db.dirs.find({'parent': path}) for d in dirs: print d['name'] + "/" files = db.files.find({'parent': path}) for f in files: print f['name']
{ "content_hash": "a2b5fbccf1cc1fcdc4e9009ef73d1651", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 77, "avg_line_length": 39.04950495049505, "alnum_prop": 0.6222109533468559, "repo_name": "akalicki/stratus", "id": "ae79e0d1d6d2b1116cfb739d4ba00e568ed81142", "size": "3944", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/db.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "17308" } ], "symlink_target": "" }
import xbmc, xbmcaddon, xbmcgui, xbmcplugin, os, sys, xbmcvfs, glob import shutil import urllib2,urllib import re import uservar import time try: from sqlite3 import dbapi2 as database except: from pysqlite2 import dbapi2 as database from datetime import date, datetime, timedelta from resources.libs import wizard as wiz ADDON_ID = uservar.ADDON_ID ADDONTITLE = uservar.ADDONTITLE ADDON = wiz.addonId(ADDON_ID) DIALOG = xbmcgui.Dialog() HOME = xbmc.translatePath('special://home/') ADDONS = os.path.join(HOME, 'addons') USERDATA = os.path.join(HOME, 'userdata') PLUGIN = os.path.join(ADDONS, ADDON_ID) PACKAGES = os.path.join(ADDONS, 'packages') ADDONDATA = os.path.join(USERDATA, 'addon_data', ADDON_ID) ADDOND = os.path.join(USERDATA, 'addon_data') LOGINFOLD = os.path.join(ADDONDATA, 'login') ICON = os.path.join(PLUGIN, 'icon.png') TODAY = date.today() TOMORROW = TODAY + timedelta(days=1) THREEDAYS = TODAY + timedelta(days=3) KEEPLOGIN = wiz.getS('keeplogin') LOGINSAVE = wiz.getS('loginlastsave') COLOR1 = uservar.COLOR1 COLOR2 = uservar.COLOR2 ORDER = ['stargate', 'iplayerwww', 'musicbox'] LOGINID = { 'stargate': { 'name' : 'Stargate Streams', 'plugin' : 'plugin.video.stargate-streams', 'saved' : 'loginstargatestreams', 'path' : os.path.join(ADDONS, 'plugin.video.stargate-streams'), 'icon' : os.path.join(ADDONS, 'plugin.video.stargate-streams', 'icon.png'), 'fanart' : os.path.join(ADDONS, 'plugin.video.stargate-streams', 'fanart.jpg'), 'file' : os.path.join(LOGINFOLD, 'stargate_login'), 'settings' : os.path.join(ADDOND, 'plugin.video.stargate-streams', 'settings.xml'), 'default' : 'Username', 'data' : ['Username', 'Password'], 'activate' : ''}, 'iplayerwww': { 'name' : 'iPlayer WWW', 'plugin' : 'plugin.video.iplayerwww', 'saved' : 'loginiplayerwww', 'path' : os.path.join(ADDONS, 'plugin.video.iplayerwww'), 'icon' : os.path.join(ADDONS, 'plugin.video.iplayerwww', 'icon.png'), 'fanart' : os.path.join(ADDONS, 'plugin.video.iplayerwww', 'fanart.jpg'), 'file' : os.path.join(LOGINFOLD, 'iplayerwww_login'), 'settings' : os.path.join(ADDOND, 'plugin.video.iplayerwww', 'settings.xml'), 'default' : 'bbc_id_username', 'data' : ['bbc_id_username', 'bbc_id_password'], 'activate' : ''}, 'musicbox': { 'name' : 'Music Box', 'plugin' : 'plugin.audio.musicbox', 'saved' : 'loginmusicbox', 'path' : os.path.join(ADDONS, 'plugin.audio.musicbox'), 'icon' : os.path.join(ADDONS, 'plugin.audio.musicbox', 'icon.png'), 'fanart' : os.path.join(ADDONS, 'plugin.audio.musicbox', 'fanart.jpg'), 'file' : os.path.join(LOGINFOLD, 'musicbox_login'), 'settings' : os.path.join(ADDOND, 'plugin.audio.musicbox', 'settings.xml'), 'default' : '8tracks_email', 'data' : ['8tracks_email', '8tracks_password', 'lastfm_email', 'lastfm_password', 'vk_email', 'vk_password'], 'activate' : ''} } def loginUser(who): user=None if LOGINID[who]: if os.path.exists(LOGINID[who]['path']): try: add = wiz.addonId(LOGINID[who]['plugin']) user = add.getSetting(LOGINID[who]['default']) except: pass return user def loginIt(do, who): if not os.path.exists(ADDONDATA): os.makedirs(ADDONDATA) if not os.path.exists(LOGINFOLD): os.makedirs(LOGINFOLD) if who == 'all': for log in ORDER: if os.path.exists(LOGINID[log]['path']): try: addonid = wiz.addonId(LOGINID[log]['plugin']) default = LOGINID[log]['default'] user = addonid.getSetting(default) if user == '' and do == 'update': continue updateLogin(do, log) except: pass else: wiz.log('[Login Data] %s(%s) is not installed' % (LOGINID[log]['name'],LOGINID[log]['plugin']), xbmc.LOGERROR) wiz.setS('loginlastsave', str(THREEDAYS)) else: if LOGINID[who]: if os.path.exists(LOGINID[who]['path']): updateLogin(do, who) else: wiz.log('[Login Data] Invalid Entry: %s' % who, xbmc.LOGERROR) def clearSaved(who, over=False): if who == 'all': for login in LOGINID: clearSaved(login, True) elif LOGINID[who]: file = LOGINID[who]['file'] if os.path.exists(file): os.remove(file) wiz.LogNotify('[COLOR %s]%s[/COLOR]' % (COLOR1, LOGINID[who]['name']), '[COLOR %s]Login Data: Removed![/COLOR]' % COLOR2, 2000, LOGINID[who]['icon']) wiz.setS(LOGINID[who]['saved'], '') if over == False: wiz.refresh() def updateLogin(do, who): file = LOGINID[who]['file'] settings = LOGINID[who]['settings'] data = LOGINID[who]['data'] addonid = wiz.addonId(LOGINID[who]['plugin']) saved = LOGINID[who]['saved'] default = LOGINID[who]['default'] user = addonid.getSetting(default) suser = wiz.getS(saved) name = LOGINID[who]['name'] icon = LOGINID[who]['icon'] if do == 'update': if not user == '': try: with open(file, 'w') as f: for login in data: f.write('<login>\n\t<id>%s</id>\n\t<value>%s</value>\n</login>\n' % (login, addonid.getSetting(login))) f.close() user = addonid.getSetting(default) wiz.setS(saved, user) wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, name),'[COLOR %s]Login Data: Saved![/COLOR]' % COLOR2, 2000, icon) except Exception, e: wiz.log("[Login Data] Unable to Update %s (%s)" % (who, str(e)), xbmc.LOGERROR) else: wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, name),'[COLOR %s]Login Data: Not Registered![/COLOR]' % COLOR2, 2000, icon) elif do == 'restore': if os.path.exists(file): f = open(file,mode='r'); g = f.read().replace('\n','').replace('\r','').replace('\t',''); f.close(); match = re.compile('<login><id>(.+?)</id><value>(.+?)</value></login>').findall(g) try: if len(match) > 0: for login, value in match: addonid.setSetting(login, value) user = addonid.getSetting(default) wiz.setS(saved, user) wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, name), '[COLOR %s]Login: Restored![/COLOR]' % COLOR2, 2000, icon) except Exception, e: wiz.log("[Login Data] Unable to Restore %s (%s)" % (who, str(e)), xbmc.LOGERROR) #else: wiz.LogNotify(name,'login Data: [COLOR red]Not Found![/COLOR]', 2000, icon) elif do == 'clearaddon': wiz.log('%s SETTINGS: %s' % (name, settings), xbmc.LOGDEBUG) if os.path.exists(settings): try: f = open(settings, "r"); lines = f.readlines(); f.close() f = open(settings, "w") for line in lines: match = wiz.parseDOM(line, 'setting', ret='id') if len(match) == 0: f.write(line) else: if match[0] not in data: f.write(line) else: wiz.log('Removing Line: %s' % line, xbmc.LOGNOTICE) f.close() wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, name),'[COLOR %s]Addon Data: Cleared![/COLOR]' % COLOR2, 2000, icon) except Exception, e: wiz.log("[Trakt Data] Unable to Clear Addon %s (%s)" % (who, str(e)), xbmc.LOGERROR) wiz.refresh() def autoUpdate(who): if who == 'all': for log in LOGINID: if os.path.exists(LOGINID[log]['path']): autoUpdate(log) elif LOGINID[who]: if os.path.exists(LOGINID[who]['path']): u = loginUser(who) su = wiz.getS(LOGINID[who]['saved']) n = LOGINID[who]['name'] if u == None or u == '': return elif su == '': loginIt('update', who) elif not u == su: if DIALOG.yesno(ADDONTITLE, "[COLOR %s]Would you like to save the [COLOR %s]Login[/COLOR] data for [COLOR %s]%s[/COLOR]?" % (COLOR2, COLOR1, COLOR1, n), "Addon: [COLOR green][B]%s[/B][/COLOR]" % u, "Saved:[/COLOR] [COLOR red][B]%s[/B][/COLOR]" % su if not su == '' else 'Saved:[/COLOR] [COLOR red][B]None[/B][/COLOR]', yeslabel="[B][COLOR green]Save Data[/COLOR][/B]", nolabel="[B][COLOR red]No Cancel[/COLOR][/B]"): loginIt('update', who) else: loginIt('update', who) def importlist(who): if who == 'all': for log in LOGINID: if os.path.exists(LOGINID[log]['file']): importlist(log) elif LOGINID[who]: if os.path.exists(LOGINID[who]['file']): d = LOGINID[who]['default'] sa = LOGINID[who]['saved'] su = wiz.getS(sa) n = LOGINID[who]['name'] f = open(LOGINID[who]['file'],mode='r'); g = f.read().replace('\n','').replace('\r','').replace('\t',''); f.close(); m = re.compile('<login><id>%s</id><value>(.+?)</value></login>' % d).findall(g) if len(m) > 0: if not m[0] == su: if DIALOG.yesno(ADDONTITLE, "[COLOR %s]Would you like to import the [COLOR %s]Login[/COLOR] data for [COLOR %s]%s[/COLOR]?" % (COLOR2, COLOR1, COLOR1, n), "File: [COLOR green][B]%s[/B][/COLOR]" % m[0], "Saved:[/COLOR] [COLOR red][B]%s[/B][/COLOR]" % su if not su == '' else 'Saved:[/COLOR] [COLOR red][B]None[/B][/COLOR]', yeslabel="[B][COLOR green]Save Data[/COLOR][/B]", nolabel="[B][COLOR red]No Cancel[/COLOR][/B]"): wiz.setS(sa, m[0]) wiz.log('[Import Data] %s: %s' % (who, str(m)), xbmc.LOGNOTICE) else: wiz.log('[Import Data] Declined Import(%s): %s' % (who, str(m)), xbmc.LOGNOTICE) else: wiz.log('[Import Data] Duplicate Entry(%s): %s' % (who, str(m)), xbmc.LOGNOTICE) else: wiz.log('[Import Data] No Match(%s): %s' % (who, str(m)), xbmc.LOGNOTICE) def activateLogin(who): if LOGINID[who]: if os.path.exists(LOGINID[who]['path']): act = LOGINID[who]['activate'] addonid = wiz.addonId(LOGINID[who]['plugin']) if act == '': addonid.openSettings() else: url = xbmc.executebuiltin(LOGINID[who]['activate']) else: DIALOG.ok(ADDONTITLE, '%s is not currently installed.' % LOGINID[who]['name']) else: wiz.refresh() return check = 0 while loginUser(who) == None or loginUser(who) == "": if check == 30: break check += 1 time.sleep(10) wiz.refresh()
{ "content_hash": "b4c9468976f8d59709669a811f9366fe", "timestamp": "", "source": "github", "line_count": 229, "max_line_length": 425, "avg_line_length": 42.633187772925766, "alnum_prop": 0.6163064631773021, "repo_name": "TheWardoctor/Wardoctors-repo", "id": "f90fc3e4d9ef664003787d906ded52249d0fa966", "size": "11222", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plugin.program.beta.wizard/resources/libs/loginit.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "3208" }, { "name": "JavaScript", "bytes": "115722" }, { "name": "Python", "bytes": "34405207" }, { "name": "Shell", "bytes": "914" } ], "symlink_target": "" }
import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard import api from openstack_dashboard.dashboards.project.networks.ports \ import forms as project_forms LOG = logging.getLogger(__name__) class CreatePort(forms.SelfHandlingForm): network_name = forms.CharField(label=_("Network Name"), widget=forms.TextInput( attrs={'readonly': 'readonly'})) network_id = forms.CharField(label=_("Network ID"), widget=forms.TextInput( attrs={'readonly': 'readonly'})) name = forms.CharField(max_length=255, label=_("Name"), required=False) admin_state = forms.BooleanField(label=_("Admin State"), initial=True, required=False) device_id = forms.CharField(max_length=100, label=_("Device ID"), help_text=_("Device ID attached to the port"), required=False) device_owner = forms.CharField(max_length=100, label=_("Device Owner"), help_text=_("Device owner attached to the " "port"), required=False) def __init__(self, request, *args, **kwargs): super(CreatePort, self).__init__(request, *args, **kwargs) if api.neutron.is_extension_supported(request, 'mac-learning'): self.fields['mac_state'] = forms.BooleanField( label=_("MAC Learning State"), initial=False, required=False) def handle(self, request, data): try: # We must specify tenant_id of the network which a subnet is # created for if admin user does not belong to the tenant. network = api.neutron.network_get(request, data['network_id']) data['tenant_id'] = network.tenant_id data['admin_state_up'] = data['admin_state'] del data['network_name'] del data['admin_state'] if 'mac_state' in data: data['mac_learning_enabled'] = data['mac_state'] del data['mac_state'] port = api.neutron.port_create(request, **data) msg = _('Port %s was successfully created.') % port['id'] LOG.debug(msg) messages.success(request, msg) return port except Exception: msg = _('Failed to create a port for network %s') \ % data['network_id'] LOG.info(msg) redirect = reverse('horizon:admin:networks:detail', args=(data['network_id'],)) exceptions.handle(request, msg, redirect=redirect) class UpdatePort(project_forms.UpdatePort): # tenant_id = forms.CharField(widget=forms.HiddenInput()) device_id = forms.CharField(max_length=100, label=_("Device ID"), help_text=_("Device ID attached to the port"), required=False) device_owner = forms.CharField(max_length=100, label=_("Device Owner"), help_text=_("Device owner attached to the " "port"), required=False) failure_url = 'horizon:admin:networks:detail' def handle(self, request, data): try: LOG.debug('params = %s' % data) extension_kwargs = {} if 'mac_state' in data: extension_kwargs['mac_learning_enabled'] = data['mac_state'] port = api.neutron.port_update(request, data['port_id'], name=data['name'], admin_state_up=data['admin_state'], device_id=data['device_id'], device_owner=data['device_owner'], **extension_kwargs) msg = _('Port %s was successfully updated.') % data['port_id'] LOG.debug(msg) messages.success(request, msg) return port except Exception: msg = _('Failed to update port %s') % data['port_id'] LOG.info(msg) redirect = reverse(self.failure_url, args=[data['network_id']]) exceptions.handle(request, msg, redirect=redirect)
{ "content_hash": "6c153ba216fba589a428b96af5e99407", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 78, "avg_line_length": 45.883495145631066, "alnum_prop": 0.5135421074904782, "repo_name": "jumpstarter-io/horizon", "id": "cc2755e7847ff8a0d63902c1fa4cc9e30e06e770", "size": "5334", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "openstack_dashboard/dashboards/admin/networks/ports/forms.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from __future__ import absolute_import import warnings """M2Crypto support for Python's httplib. Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved.""" import base64 import socket from M2Crypto import SSL, py27plus, six from M2Crypto.six.moves.urllib_parse import urlsplit, urlunsplit from M2Crypto.six.moves.http_client import * # noqa # This is not imported with just '*' from M2Crypto.six.moves.http_client import HTTPS_PORT if py27plus: from typing import Any, AnyStr, Callable, Dict, List, Optional # noqa class HTTPSConnection(HTTPConnection): """ This class allows communication via SSL using M2Crypto. """ default_port = HTTPS_PORT def __init__(self, host, port=None, strict=None, **ssl): # type: (str, Optional[int], Optional[bool], **Any) -> None """ Represents one transaction with an HTTP server over the SSL connection. :param host: host name :param port: port number :param strict: if switched on, it raises BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line. :param ssl: dict with all remaining named real parameters of the function. Specifically, ``ssl_context`` is expected to be included with SSL.Context; if it is not default ``'sslv23'`` is substituted). """ self.session = None # type: bytes self.host = host self.port = port keys = set(ssl.keys()) - set(('key_file', 'cert_file', 'ssl_context')) if keys: raise ValueError('unknown keyword argument: %s', keys) try: self.ssl_ctx = ssl['ssl_context'] assert isinstance(self.ssl_ctx, SSL.Context), self.ssl_ctx except KeyError: self.ssl_ctx = SSL.Context() HTTPConnection.__init__(self, host, port, strict) def connect(self): # type: () -> None error = None # We ignore the returned sockaddr because SSL.Connection.connect needs # a host name. for (family, _, _, _, _) in \ socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): sock = None try: sock = SSL.Connection(self.ssl_ctx, family=family) # set SNI server name since we know it at this point sock.set_tlsext_host_name(self.host) if self.session is not None: sock.set_session(self.session) sock.connect((self.host, self.port)) self.sock = sock sock = None return except socket.error as e: # Other exception are probably SSL-related, in that case we # abort and the exception is forwarded to the caller. error = e finally: if sock is not None: sock.close() if error is None: raise AssertionError("Empty list returned by getaddrinfo") raise error def close(self): # type: () -> None # This kludges around line 545 of httplib.py, # which closes the connection in this object; # the connection remains open in the response # object. # # M2Crypto doesn't close-here-keep-open-there, # so, in effect, we don't close until the whole # business is over and gc kicks in. # # XXX Long-running callers beware leakage. # # XXX 05-Jan-2002: This module works with Python 2.2, # XXX but I've not investigated if the above conditions # XXX remain. pass def get_session(self): # type: () -> SSL.Session.Session return self.sock.get_session() def set_session(self, session): # type: (SSL.Session.Session) -> None self.session = session class ProxyHTTPSConnection(HTTPSConnection): """ An HTTPS Connection that uses a proxy and the CONNECT request. When the connection is initiated, CONNECT is first sent to the proxy (along with authorization headers, if supplied). If successful, an SSL connection will be established over the socket through the proxy and to the target host. Finally, the actual request is sent over the SSL connection tunneling through the proxy. """ _ports = {'http': 80, 'https': 443} _AUTH_HEADER = "Proxy-Authorization" _UA_HEADER = "User-Agent" def __init__(self, host, port=None, strict=None, username=None, password=None, **ssl): # type: (str, Optional[int], Optional[bool], Optional[AnyStr], Optional[AnyStr], **Any) -> None """ Create the ProxyHTTPSConnection object. :param host: host name of the proxy server :param port: port number of the proxy server :param strict: if switched on, it raises BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line. :param username: username on the proxy server, when required Username can be ``str``, but preferred type is ``bytes``. M2Crypto does some conversion to ``bytes`` when necessary, but it's better when the user of the library does it on its own. :param password: password on the proxy server, when required The same as with ``username``, ``str`` is accepted, but ``bytes`` are preferred. :param ssl: dict with all remaining named real parameters of the function. Specifically, ``ssl_context`` is expected to be included with SSL.Context; if it is not default ``'sslv23'`` is substituted). """ HTTPSConnection.__init__(self, host, port, strict, **ssl) self._username = username.encode('utf8') \ if isinstance(username, six.string_types) else username self._password = password.encode('utf8') \ if isinstance(password, six.string_types) else password self._proxy_auth = None # type: str self._proxy_UA = None # type: str def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): # type: (AnyStr, AnyStr, int, int) -> None """ putrequest is called before connect, so can interpret url and get real host/port to be used to make CONNECT request to proxy """ proto, netloc, path, query, fragment = urlsplit(url) if not proto: raise ValueError("unknown URL type: %s" % url) # get host & port try: username_password, host_port = netloc.split('@') except ValueError: host_port = netloc try: host, port_s = host_port.split(':') port = int(port_s) except ValueError: host = host_port # try to get port from proto try: port = self._ports[proto] except KeyError: raise ValueError("unknown protocol for: %s" % url) self._real_host = host # type: str self._real_port = port # type: int rest = urlunsplit(('', '', path, query, fragment)) HTTPSConnection.putrequest(self, method, rest, skip_host, skip_accept_encoding) def putheader(self, header, value): # type: (AnyStr, AnyStr) -> None # Store the auth header if passed in. if header.lower() == self._UA_HEADER.lower(): self._proxy_UA = value if header.lower() == self._AUTH_HEADER.lower(): self._proxy_auth = value else: HTTPSConnection.putheader(self, header, value) def endheaders(self, *args, **kwargs): # type: (*Any, **Any) -> None # We've recieved all of hte headers. Use the supplied username # and password for authorization, possibly overriding the authstring # supplied in the headers. if not self._proxy_auth: self._proxy_auth = self._encode_auth() HTTPSConnection.endheaders(self, *args, **kwargs) def connect(self): # type: () -> None HTTPConnection.connect(self) # send proxy CONNECT request self.sock.sendall(self._get_connect_msg()) response = HTTPResponse(self.sock) response.begin() code = response.status if code != 200: # proxy returned and error, abort connection, and raise exception self.close() raise socket.error("Proxy connection failed: %d" % code) self._start_ssl() def _get_connect_msg(self): # type: () -> bytes """ Return an HTTP CONNECT request to send to the proxy. """ msg = "CONNECT %s:%d HTTP/1.1\r\n" % (self._real_host, self._real_port) msg = msg + "Host: %s:%d\r\n" % (self._real_host, self._real_port) if self._proxy_UA: msg = msg + "%s: %s\r\n" % (self._UA_HEADER, self._proxy_UA) if self._proxy_auth: msg = msg + "%s: %s\r\n" % (self._AUTH_HEADER, self._proxy_auth) msg = msg + "\r\n" return six.ensure_binary(msg) def _start_ssl(self): # type: () -> None """ Make this connection's socket SSL-aware. """ self.sock = SSL.Connection(self.ssl_ctx, self.sock) self.sock.setup_ssl() self.sock.set_connect_state() self.sock.connect_ssl() def _encode_auth(self): # type: () -> Optional[bytes] """ Encode the username and password for use in the auth header. """ if not (self._username and self._password): return None # Authenticated proxy userpass = "%s:%s" % (self._username, self._password) with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) enc_userpass = base64.encodestring(userpass).replace("\n", "") return six.ensure_binary("Basic %s" % enc_userpass)
{ "content_hash": "83a1a57f56e3c77b1d2c592311f6f2f9", "timestamp": "", "source": "github", "line_count": 269, "max_line_length": 103, "avg_line_length": 38.25278810408922, "alnum_prop": 0.5724003887269193, "repo_name": "Edzvu/Edzvu.github.io", "id": "f62486569d3a1c39763997474a709183b2c61626", "size": "10290", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "M2Crypto-0.35.2/M2Crypto/httpslib.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import sys # pylint: skip-file if sys.version_info[0] == 2: import ipaddr as ipaddress # pylint:disable=F0401 ipaddress.ip_address = ipaddress.IPAddress int_from_byte = ord FileNotFoundError = IOError def int_from_bytes(b): if b: return int(b.encode("hex"), 16) return 0 byte_from_int = chr else: import ipaddress # pylint:disable=F0401 int_from_byte = lambda x: x FileNotFoundError = FileNotFoundError int_from_bytes = lambda x: int.from_bytes(x, 'big') byte_from_int = lambda x: bytes([x])
{ "content_hash": "39dec10c71bb95af6b0c199eaa8ded86", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 55, "avg_line_length": 20.571428571428573, "alnum_prop": 0.6354166666666666, "repo_name": "kustomzone/Fuzium", "id": "14c988324e6f470dd360490106b2118403b958b3", "size": "576", "binary": false, "copies": "22", "ref": "refs/heads/master", "path": "core/plugins/Sidebar/maxminddb/compat.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1204" }, { "name": "C", "bytes": "34092" }, { "name": "CSS", "bytes": "373182" }, { "name": "CoffeeScript", "bytes": "88917" }, { "name": "HTML", "bytes": "123191" }, { "name": "JavaScript", "bytes": "2133526" }, { "name": "Python", "bytes": "2843920" }, { "name": "Shell", "bytes": "898" } ], "symlink_target": "" }
""" [2016-06-27] Challenge #273 [Easy] Getting a degree https://www.reddit.com/r/dailyprogrammer/comments/4q35ip/20160627_challenge_273_easy_getting_a_degree/ # Description Welcome to DailyProgrammer University. Today you will be earning a degree in converting degrees. This includes Fahrenheit, Celsius, Kelvin, Degrees (angle), and Radians. # Input Description You will be given two lines of text as input. On the first line, you will receive a number followed by two letters, the first representing the unit that the number is currently in, the second representing the unit it needs to be converted to. Examples of valid units are: * `d` for degrees of a circle * `r` for radians # Output Description You must output the given input value, in the unit specified. It must be followed by the unit letter. You may round to a whole number, or to a few decimal places. # Challenge Input 3.1416rd 90dr # Challenge Output 180d 1.57r # Bonus Also support these units: * `c` for Celsius * `f` for Fahrenheit * `k` for Kelvin If the two units given are incompatible, give an error message as output. # Bonus Input 212fc 70cf 100cr 315.15kc # Bonus Output 100c 158f No candidate for conversion 42c # Notes * [See here](https://en.wikipedia.org/wiki/Conversion_of_units_of_temperature) for a wikipedia page with temperature conversion formulas. * [See here](http://www.teacherschoice.com.au/maths_library/angles/angles.htm) for a random web link about converting between degrees and radians. # Finally Have a good challenge idea? Consider submitting it to /r/dailyprogrammer_ideas """ def main(): pass if __name__ == "__main__": main()
{ "content_hash": "f2c1c28c84b3b473edd7706baeeb3772", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 102, "avg_line_length": 28.216666666666665, "alnum_prop": 0.7389249852333136, "repo_name": "DayGitH/Python-Challenges", "id": "e527188b94a1a883d96180e8bbdd87074b648641", "size": "1693", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "DailyProgrammer/DP20160627A.py", "mode": "33188", "license": "mit", "language": [ { "name": "OpenEdge ABL", "bytes": "5002" }, { "name": "Python", "bytes": "2471582" } ], "symlink_target": "" }
""" slick.py is a library for talking to slick (http://code.google.com/p/slickqa). Copyright 2013 AccessData Group, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __author__ = 'Jason Corbett' import requests import logging import sys import traceback import pprint try: from urllib.parse import urlencode, quote except ImportError: from urllib import urlencode, quote from .micromodels import Model from .data import * from . import queries import os import mimetypes import hashlib if not mimetypes.inited: mimetypes.init() mimetypes.add_type('text/plain', '.log') json_content = {'Content-Type': 'application/json'} STREAM_CONTENT = {'Content-Type': 'application/octet-stream'} class FindOneModeEnum(object): """ """ FIRST = 1 LAST = 2 def __getattr__(self, name): if name in FindOneModeEnum.__dict__: return FindOneModeEnum.__dict__[name] raise AttributeError FindOneMode = FindOneModeEnum() class AttributeDict(dict): __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ def obj_hook_attr_dict(dct): return AttributeDict(dct) class SlickApiPart(object): """A class representing part of the slick api""" def __init__(self, model, parentPart, name=None): self.model = model if name is None: self.name = model.__name__.lower() + "s" else: self.name = name self.parent = parentPart self.logger = logging.getLogger(name=self.get_name()) self.data = None def get_name(self): return self.parent.get_name() + "." + self.name def find(self, query=None, **kwargs): """ You can pass in the appropriate model object from the queries module, or a dictionary with the keys and values for the query, or a set of key=value parameters. """ url = self.getUrl() if query is not None: if isinstance(query, queries.SlickQuery): url = url + "?" + urlencode(query.to_dict()) elif isinstance(query, dict): url = url + "?" + urlencode(query) elif len(kwargs) > 0: url = url + "?" + urlencode(kwargs) # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug("Making request to slick at url %s", url) r = requests.get(url) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: retval = [] objects = r.json() for dct in objects: retval.append(self.model.from_dict(dct)) return retval else: self.logger.error("Slick returned an error when trying to access %s: status code %s" % (url, str(r.status_code))) self.logger.error("Slick response: ", pprint.pformat(r)) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url) query = find def findOne(self, query=None, mode=FindOneMode.FIRST, **kwargs): """ Perform a find, with the same options present, but only return a maximum of one result. If find returns an empty array, then None is returned. If there are multiple results from find, the one returned depends on the mode parameter. If mode is FindOneMode.FIRST, then the first result is returned. If the mode is FindOneMode.LAST, then the last is returned. If the mode is FindOneMode.ERROR, then a SlickCommunicationError is raised. """ results = self.find(query, **kwargs) if len(results) is 0: return None elif len(results) is 1 or mode == FindOneMode.FIRST: return results[0] elif mode == FindOneMode.LAST: return results[-1] def get(self): """Get the specified object from slick. You specify which one you want by providing the id as a parameter to the parent object. Example: slick.projects("4fd8cd95e4b0ee7ba54b9885").get() """ url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug("Making request to slick at url %s", url) r = requests.get(url) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url) def update(self): """Update the specified object from slick. You specify the object as a parameter, using the parent object as a function. Example: proj = slick.projects.findByName("foo") ... update proj here slick.projects(proj).update() """ obj = self.data url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out last_stats_code = None last_body = None for retry in range(3): try: json_data = obj.to_json() self.logger.debug("Making request to slick at url %s, with data: %s", url, json_data) r = requests.put(url, data=json_data, headers=json_content) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: last_stats_code = r.status_code last_body = r.text self.logger.warn("Slick status code: %d", r.status_code) self.logger.warn("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) traceback.print_exc() raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code. Last status code: %d, body: %s", url, last_stats_code, last_body) put = update def create(self): """Create the specified object (perform a POST to the api). You specify the object as a parameter, using the parent object as a function. Example: proj = Project() ... add project data here proj = slick.projects(proj).create() """ obj = self.data self.data = None url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: json_data = obj.to_json() self.logger.debug("Making request to slick at url %s, with data: %s", url, json_data) r = requests.post(url, data=json_data, headers=json_content) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url) post = create def remove(self): """Remove or delete the specified object from slick. You specify which one you want by providing the id as a parameter to the parent object, using it as a function. Example: slick.projects("4fd8cd95e4b0ee7ba54b9885").remove() """ url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug("Making DELETE request to slick at url %s", url) r = requests.delete(url) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return None else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url) delete = remove def getUrl(self): url = self.parent.getUrl() + "/" + self.name if self.data is not None: if isinstance(self.data, Model) and hasattr(self.data, 'id'): url = url + "/" + self.data.id else: url = url + "/" + str(self.data) self.data = None return url def __call__(self, *args, **kwargs): if len(args) > 0: self.data = args[0] return self class SlickProjectApiPart(SlickApiPart): def __init__(self, parentPart): super(SlickProjectApiPart, self).__init__(Project, parentPart) def findByName(self, name): """Find a project by it's name""" self.data = "byname/" + quote(name) + '?quick=true' return self.get() class SystemConfigurationApiPart(SlickApiPart): """ The system-configuration api is different from other apis. The model for the return type is variable due to it being able to store different instances of system configuration classes. """ def __init__(self, parentPart): super(SystemConfigurationApiPart, self).__init__(SystemConfiguration, parentPart, 'system-configuration') def __call__(self, model, data=None): """Make a request for system-configuration, but you not only need to provide the data, but the model that is needed for return type. """ self.model = model if data is not None: return super(SystemConfigurationApiPart, self).__call__(data) else: return self def find(self, query=None, **kwargs): instance = self.model() if hasattr(instance, 'configurationType') and instance.configurationType is not None: kwargs['config-type'] = instance.configurationType return super(SystemConfigurationApiPart, self).find(query, **kwargs) def upload_chunks(url, stored_file, file_like_obj): md5 = hashlib.md5() bindata = file_like_obj.read(stored_file.chunkSize) while bindata: if isinstance(bindata, bytes): md5.update(bindata) elif isinstance(bindata, str): md5.update(bindata.encode('ascii', 'ignore')) requests.post(url, data=bindata, headers={'Content-Type': 'application/octet-stream'}) bindata = file_like_obj.read(stored_file.chunkSize) stored_file.md5 = md5.hexdigest() class StoredFileApiPart(SlickApiPart): def __init__(self, parentPart): super(StoredFileApiPart, self).__init__(StoredFile, parentPart, "files") def upload_local_file(self, local_file_path, file_obj=None): """Create a Stored File and upload it's data. This is a one part do it all type method. Here is what it does: 1. "Discover" information about the file (mime-type, size) 2. Create the stored file object in slick 3. Upload (chunked) all the data in the local file 4. re-fetch the stored file object from slick, and return it """ if file_obj is None and not os.path.exists(local_file_path): return storedfile = StoredFile() storedfile.mimetype = mimetypes.guess_type(local_file_path)[0] storedfile.filename = os.path.basename(local_file_path) if file_obj is None: storedfile.length = os.stat(local_file_path).st_size else: file_obj.seek(0,os.SEEK_END) storedfile.length = file_obj.tell() file_obj.seek(0) storedfile = self(storedfile).create() md5 = hashlib.md5() url = self(storedfile).getUrl() + "/addchunk" if file_obj is None: with open(local_file_path, 'rb') as filecontents: upload_chunks(url, storedfile, filecontents) else: upload_chunks(url, storedfile, file_obj) return self(storedfile).update() class TestrunGroupApiPart(SlickApiPart): def __init__(self, parentPart): super(TestrunGroupApiPart, self).__init__(TestrunGroup, parentPart) def add_testrun(self, testrun): id = testrun if isinstance(testrun, Testrun): id = testrun.id url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug("Making request to slick at url %s", url) r = requests.post(url + "/addtestrun/" + id) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url) def remove_testrun(self, testrun): id = testrun if isinstance(testrun, Testrun): id = testrun.id url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: url = url + "/removetestrun/" + id self.logger.debug("Making request to slick at url %s", url) r = requests.delete(url) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url) class SlickCommunicationError(Exception): def __init__(self, *args, **kwargs): super(SlickCommunicationError, self).__init__(*args, **kwargs) class SlickConnection(object): """Slick Connection contains the information on how to connect to slick.""" logger = logging.getLogger("slick.SlickConnection") def __init__(self, baseUrl): """Create a new connection to slick, providing the base url under which to contact slick.""" if baseUrl is None or not isinstance(baseUrl, str): SlickConnection.logger.error("Base URL provided to slick connection is not a string.") raise SlickCommunicationError if baseUrl.endswith("/"): self.baseUrl = baseUrl + "api" else: self.baseUrl = baseUrl + "/api" self.configurations = SlickApiPart(Configuration, self) self.projects = SlickProjectApiPart(self) self.projects.releases = SlickApiPart(Release, self.projects) self.projects.releases.builds = SlickApiPart(Build, self.projects.releases) self.projects.components = SlickApiPart(Component, self.projects) self.systemconfigurations = SystemConfigurationApiPart(self) self.testplans = SlickApiPart(Testplan, self) self.testruns = SlickApiPart(Testrun, self) self.version = SlickApiPart(ProductVersion, self, name='version') self.testcases = SlickApiPart(Testcase, self) self.results = SlickApiPart(Result, self) self.testrungroups = TestrunGroupApiPart(self) self.hoststatus = SlickApiPart(HostStatus, self, name='hoststatus') self.updates = SlickApiPart(SlickUpdate, self, name='updates') self.updates.records = SlickApiPart(UpdateRecord, self.updates, name='records') self.quotes = SlickApiPart(Quote, self) self.files = StoredFileApiPart(self) def getUrl(self): """This method is used by the slick api parts to get the base url.""" return self.baseUrl def get_name(self): return "slick"
{ "content_hash": "059384ebed91a68cccba53ce2680093e", "timestamp": "", "source": "github", "line_count": 444, "max_line_length": 164, "avg_line_length": 41.164414414414416, "alnum_prop": 0.6107676314493626, "repo_name": "slickqa/python-client", "id": "1b81f515cfabd825e1dd374e4d5e2aa7c11f0421", "size": "18277", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "slickqa/connection.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "5560" }, { "name": "Python", "bytes": "126515" } ], "symlink_target": "" }
import wikipedia as wiki import json MAX_CATEGORY_DEPTH = 3 def is_algorithm_page(page): category_match = False for category in page.categories: if 'Algorithm' in category or 'algorithm' in category: category_match = True # summary_match = ('algorithm' in page.summary # or 'Algorithm' in page.summary) # # return category_match and summary_match content_match = (page.content.find('algorithm') != -1 or page.content.find('Algorithm') != -1) return category_match and content_match def get_wiki_page(title, auto_suggest=True): try: # disable auto_suggest to get the correct page # (e.g. auto_suggest will turn 'B*' into 'Bacteria') return wiki.page(title, auto_suggest=False) except: if auto_suggest: try: # if there's no exact matching page, # try the auto_suggest before giving up return wiki.page(title) except: return None def write_output_from_page(output, page): json.dump( {'title': page.title, 'summary': page.summary, 'categories': page.categories, 'links': page.links}, output) output.write('\n') def parse_list_of_algorithms_page(): output = open('wiki.csv', 'w+') # csv_writer = csv.writer(output) list_of_algorithms_page = wiki.page('list of algorithms') for link in list_of_algorithms_page.links: link_page = get_wiki_page(link) if link_page is None: continue if is_algorithm_page(link_page): write_output_from_page(output, link_page) output.close() def is_category_title(title): return title.encode('utf8').startswith('Category:') def parse_category_page(page, output, depth, visited): print 'looking at page:', page.title for member in page.categorymembers: print member, depth if member in visited: print 'visited' continue visited.add(member) if is_category_title(member): # subcategory page if depth < MAX_CATEGORY_DEPTH: page = wiki.categorypage(member) if page is None: print '-> subcategory not found' continue print '-> subcategory' parse_category_page(page, output, depth + 1, visited) else: # member page page = get_wiki_page(member) if page is None: print '-> member page not found' continue if is_algorithm_page(page): print '-> algorithm page' write_output_from_page(output, page) else: print '-> member page of other stuff' def parse_category(category): visited = set() try: input = open('wiki_algo_category.json') for line in input: visited.add(json.loads(line)['title']) input.close() except IOError: pass output = open('wiki_algo_category.json', 'a') # csv_writer = csv.writer(output) print 'start parsing...' parse_category_page(wiki.categorypage(category), output, 0, visited) output.close() # parse_category('Category:Algorithms')
{ "content_hash": "e0981d0703273e3ecad49e456b91742e", "timestamp": "", "source": "github", "line_count": 107, "max_line_length": 72, "avg_line_length": 31.074766355140188, "alnum_prop": 0.5750375939849625, "repo_name": "xkxx/algodb", "id": "3aae3f9831c28e7015120917441b62109730cf07", "size": "3352", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "AlgorithmNames/parseWikipedia.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "27531" }, { "name": "HTML", "bytes": "5467" }, { "name": "JavaScript", "bytes": "27024" }, { "name": "Python", "bytes": "84399" }, { "name": "Shell", "bytes": "1594" } ], "symlink_target": "" }