gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing operations."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources as resource_exceptions
def AddFlags(parser, is_ga):
"""Helper function for adding flags dependant on the release track."""
base_classes.BaseDescriber.Args(parser)
base_classes.AddFieldsFlag(parser, 'operations')
scope = parser.add_mutually_exclusive_group()
scope.add_argument(
'--global',
action='store_true',
help=('If provided, it is assumed that the requested operation is '
'global.'))
scope.add_argument(
'--region',
help='The region of the operation to fetch.',
action=actions.StoreProperty(properties.VALUES.compute.region))
scope.add_argument(
'--zone',
help='The zone of the operation to fetch.',
action=actions.StoreProperty(properties.VALUES.compute.zone))
if not is_ga:
scope.add_argument(
'--user-accounts',
action='store_true',
help=('If provided, it is assumed that the requested operation is '
'a Compute User Accounts operation.'))
@base.ReleaseTracks(base.ReleaseTrack.GA)
class DescribeGA(base_classes.BaseDescriber):
"""Describe a Google Compute Engine operation."""
def __init__(self, *args, **kwargs):
super(DescribeGA, self).__init__(*args, **kwargs)
self._ga = True
@staticmethod
def Args(parser):
AddFlags(parser, True)
@property
def service(self):
return self._service
def ReferenceFromUri(self, args):
"""Helper function for creating a ref from a Uri."""
try:
ref = self.resources.Parse(args.name, params={
'region': args.region, 'zone': args.zone})
return ref
except resource_exceptions.InvalidResourceException as e:
if not self._ga:
ref = self.clouduseraccounts_resources.Parse(
args.name)
return ref
else:
raise e
def ValidCollection(self, ref):
"""Helper function for checking a reference is for an operation."""
if self._ga:
return ref.Collection() in (
'compute.globalOperations',
'compute.regionOperations',
'compute.zoneOperations')
else:
return ref.Collection() in (
'compute.globalOperations',
'compute.regionOperations',
'compute.zoneOperations',
'clouduseraccounts.globalAccountsOperations')
def CreateReference(self, args):
try:
ref = self.ReferenceFromUri(args)
except resource_exceptions.UnknownCollectionException:
if getattr(args, 'global'):
ref = self.CreateGlobalReference(
args.name, resource_type='globalOperations')
elif args.region:
ref = self.CreateRegionalReference(
args.name, args.region, resource_type='regionOperations')
elif args.zone:
ref = self.CreateZonalReference(
args.name, args.zone, resource_type='zoneOperations')
elif not self._ga and args.user_accounts:
ref = self.CreateAccountsReference(
args.name, resource_type='globalAccountsOperations')
else:
# TODO(user): Instead of raising here, we should really just
# prompt for {global, <list of regions>, <list of zones>}, but
# for now, it's more important to go into GA than to solve
# this small problem.
raise exceptions.ToolException(
('Either pass in the full URI of an operation object or pass in '
'[--global], [--region], or [--zone] when specifying just the '
'operation name.') if self._ga else
('Either pass in the full URI of an operation object or pass in '
'[--global], [--region], [--zone], or [--user-accounts] when '
'specifying just the operation name.'))
if not self.ValidCollection(ref):
raise exceptions.ToolException(
('You must pass in a reference to a global, regional, or zonal '
'operation.') if self._ga else
('You must pass in a reference to a global, regional, zonal, or '
'user accounts operation.'))
else:
if ref.Collection() == 'compute.globalOperations':
self._service = self.compute.globalOperations
elif ref.Collection() == 'compute.regionOperations':
self._service = self.compute.regionOperations
elif ref.Collection() == 'clouduseraccounts.globalAccountsOperations':
self._service = self.clouduseraccounts.globalAccountsOperations
else:
self._service = self.compute.zoneOperations
return ref
def ScopeRequest(self, ref, request):
if ref.Collection() == 'compute.regionOperations':
request.region = ref.region
elif ref.Collection() == 'compute.zoneOperations':
request.zone = ref.zone
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class DescribeBeta(DescribeGA):
"""Describe a Google Compute Engine operation."""
def __init__(self, *args, **kwargs):
super(DescribeBeta, self).__init__(*args, **kwargs)
self._ga = False
@staticmethod
def Args(parser):
AddFlags(parser, False)
def DetailedHelp(version):
"""Construct help text based on the command release track."""
detailed_help = {
'brief': 'Describe a Google Compute Engine operation',
'DESCRIPTION': """\
*{command}* displays all data associated with a Google Compute
Engine operation in a project.
""",
'EXAMPLES': """\
To get details about a global operation, run:
$ {command} OPERATION --global
To get details about a regional operation, run:
$ {command} OPERATION --region us-central1
To get details about a zonal operation, run:
$ {command} OPERATION --zone us-central1-a
""",
}
if version == 'BETA':
detailed_help['EXAMPLES'] = """\
To get details about a global operation, run:
$ {command} OPERATION --global
To get details about a regional operation, run:
$ {command} OPERATION --region us-central1
To get details about a zonal operation, run:
$ {command} OPERATION --zone us-central1-a
To get details about a Compute User Accounts operation, run:
$ {command} OPERATION --user-accounts
"""
return detailed_help
DescribeGA.detailed_help = DetailedHelp('GA')
DescribeBeta.detailed_help = DetailedHelp('BETA')
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qsl
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.types import ProviderError
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver, \
CloudStackAffinityGroupType
from libcloud.compute.types import LibcloudError, Provider, InvalidCredsError
from libcloud.compute.types import KeyPairDoesNotExistError
from libcloud.compute.types import NodeState
from libcloud.compute.providers import get_driver
from libcloud.test import unittest
from libcloud.test import MockHttpTestCase
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
class CloudStackCommonTestCase(TestCaseMixin):
driver_klass = CloudStackNodeDriver
def setUp(self):
self.driver_klass.connectionCls.conn_classes = \
(None, CloudStackMockHttp)
self.driver = self.driver_klass('apikey', 'secret',
path='/test/path',
host='api.dummy.com')
self.driver.path = '/test/path'
self.driver.type = -1
CloudStackMockHttp.type = None
CloudStackMockHttp.fixture_tag = 'default'
self.driver.connection.poll_interval = 0.0
def test_invalid_credentials(self):
CloudStackMockHttp.type = 'invalid_credentials'
driver = self.driver_klass('invalid', 'invalid', path='/test/path',
host='api.dummy.com')
self.assertRaises(InvalidCredsError, driver.list_nodes)
def test_import_keypair_from_string_api_error(self):
CloudStackMockHttp.type = 'api_error'
name = 'test-pair'
key_material = ''
expected_msg = 'Public key is invalid'
self.assertRaisesRegexp(ProviderError, expected_msg,
self.driver.import_key_pair_from_string,
name=name, key_material=key_material)
def test_create_node_immediate_failure(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
CloudStackMockHttp.fixture_tag = 'deployfail'
self.assertRaises(
Exception,
self.driver.create_node,
name='node-name', image=image, size=size)
def test_create_node_delayed_failure(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
CloudStackMockHttp.fixture_tag = 'deployfail2'
self.assertRaises(
Exception,
self.driver.create_node,
name='node-name', image=image, size=size)
def test_create_node_default_location_success(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
default_location = self.driver.list_locations()[0]
node = self.driver.create_node(name='fred',
image=image,
size=size)
self.assertEqual(node.name, 'fred')
self.assertEqual(node.public_ips, [])
self.assertEqual(node.private_ips, ['192.168.1.2'])
self.assertEqual(node.extra['zone_id'], default_location.id)
def test_create_node_ex_networks(self):
CloudStackMockHttp.fixture_tag = 'deploynetworks'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='deploynetworks',
location=location,
image=image,
size=size,
networks=networks)
self.assertEqual(node.name, 'deploynetworks')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(len(node.private_ips), 2)
def test_create_node_ex_ipaddress(self):
CloudStackMockHttp.fixture_tag = 'deployip'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
ipaddress = '10.1.0.128'
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='deployip',
location=location,
image=image,
size=size,
networks=networks,
ex_ip_address=ipaddress)
self.assertEqual(node.name, 'deployip')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(node.private_ips[0], ipaddress)
def test_create_node_ex_rootdisksize(self):
CloudStackMockHttp.fixture_tag = 'rootdisksize'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
volumes = self.driver.list_volumes()
rootdisksize = '50'
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='rootdisksize',
location=location,
image=image,
size=size,
networks=networks,
ex_rootdisksize=rootdisksize)
self.assertEqual(node.name, 'rootdisksize')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(1, len(volumes))
self.assertEqual('ROOT-69941', volumes[0].name)
self.assertEqual(53687091200, volumes[0].size)
def test_create_node_ex_start_vm_false(self):
CloudStackMockHttp.fixture_tag = 'stoppedvm'
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
networks = [nw for nw in self.driver.ex_list_networks()
if str(nw.zoneid) == str(location.id)]
node = self.driver.create_node(name='stopped_vm',
location=location,
image=image,
size=size,
networks=networks,
ex_start_vm=False)
self.assertEqual(node.name, 'stopped_vm')
self.assertEqual(node.extra['size_id'], size.id)
self.assertEqual(node.extra['zone_id'], location.id)
self.assertEqual(node.extra['image_id'], image.id)
self.assertEqual(node.state, NodeState.STOPPED)
def test_create_node_ex_security_groups(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
sg = [sg['name'] for sg in self.driver.ex_list_security_groups()]
CloudStackMockHttp.fixture_tag = 'deploysecuritygroup'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
ex_security_groups=sg)
self.assertEqual(node.name, 'test')
self.assertEqual(node.extra['security_group'], sg)
self.assertEqual(node.id, 'fc4fd31a-16d3-49db-814a-56b39b9ef986')
def test_create_node_ex_keyname(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
CloudStackMockHttp.fixture_tag = 'deploykeyname'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
ex_keyname='foobar')
self.assertEqual(node.name, 'test')
self.assertEqual(node.extra['key_name'], 'foobar')
def test_create_node_project(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
project = self.driver.ex_list_projects()[0]
CloudStackMockHttp.fixture_tag = 'deployproject'
node = self.driver.create_node(name='test',
location=location,
image=image,
size=size,
project=project)
self.assertEqual(node.name, 'TestNode')
self.assertEqual(node.extra['project'], 'Test Project')
def test_list_images_no_images_available(self):
CloudStackMockHttp.fixture_tag = 'notemplates'
images = self.driver.list_images()
self.assertEqual(0, len(images))
def test_list_images(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listTemplates_default.json')
templates = fixture['listtemplatesresponse']['template']
images = self.driver.list_images()
for i, image in enumerate(images):
# NodeImage expects id to be a string,
# the CloudStack fixture has an int
tid = str(templates[i]['id'])
tname = templates[i]['name']
self.assertIsInstance(image.driver, CloudStackNodeDriver)
self.assertEqual(image.id, tid)
self.assertEqual(image.name, tname)
def test_ex_list_disk_offerings(self):
diskOfferings = self.driver.ex_list_disk_offerings()
self.assertEqual(1, len(diskOfferings))
diskOffering, = diskOfferings
self.assertEqual('Disk offer 1', diskOffering.name)
self.assertEqual(10, diskOffering.size)
def test_ex_list_networks(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listNetworks_default.json')
fixture_networks = fixture['listnetworksresponse']['network']
networks = self.driver.ex_list_networks()
for i, network in enumerate(networks):
self.assertEqual(network.id, fixture_networks[i]['id'])
self.assertEqual(
network.displaytext, fixture_networks[i]['displaytext'])
self.assertEqual(network.name, fixture_networks[i]['name'])
self.assertEqual(
network.networkofferingid,
fixture_networks[i]['networkofferingid'])
self.assertEqual(network.zoneid, fixture_networks[i]['zoneid'])
def test_ex_list_network_offerings(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listNetworkOfferings_default.json')
fixture_networkoffers = \
fixture['listnetworkofferingsresponse']['networkoffering']
networkoffers = self.driver.ex_list_network_offerings()
for i, networkoffer in enumerate(networkoffers):
self.assertEqual(networkoffer.id, fixture_networkoffers[i]['id'])
self.assertEqual(networkoffer.name,
fixture_networkoffers[i]['name'])
self.assertEqual(networkoffer.display_text,
fixture_networkoffers[i]['displaytext'])
self.assertEqual(networkoffer.for_vpc,
fixture_networkoffers[i]['forvpc'])
self.assertEqual(networkoffer.guest_ip_type,
fixture_networkoffers[i]['guestiptype'])
self.assertEqual(networkoffer.service_offering_id,
fixture_networkoffers[i]['serviceofferingid'])
def test_ex_create_network(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'createNetwork_default.json')
fixture_network = fixture['createnetworkresponse']['network']
netoffer = self.driver.ex_list_network_offerings()[0]
location = self.driver.list_locations()[0]
network = self.driver.ex_create_network(display_text='test',
name='test',
network_offering=netoffer,
location=location,
gateway='10.1.1.1',
netmask='255.255.255.0',
network_domain='cloud.local',
vpc_id="2",
project_id="2")
self.assertEqual(network.name, fixture_network['name'])
self.assertEqual(network.displaytext, fixture_network['displaytext'])
self.assertEqual(network.id, fixture_network['id'])
self.assertEqual(network.extra['gateway'], fixture_network['gateway'])
self.assertEqual(network.extra['netmask'], fixture_network['netmask'])
self.assertEqual(network.networkofferingid,
fixture_network['networkofferingid'])
self.assertEqual(network.extra['vpc_id'], fixture_network['vpcid'])
self.assertEqual(network.extra['project_id'],
fixture_network['projectid'])
def test_ex_delete_network(self):
network = self.driver.ex_list_networks()[0]
result = self.driver.ex_delete_network(network=network)
self.assertTrue(result)
def test_ex_list_nics(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listNics_default.json')
fixture_nic = fixture['listnicsresponse']['nic']
vm = self.driver.list_nodes()[0]
nics = self.driver.ex_list_nics(vm)
for i, nic in enumerate(nics):
self.assertEqual(nic.id, fixture_nic[i]['id'])
self.assertEqual(nic.network_id,
fixture_nic[i]['networkid'])
self.assertEqual(nic.net_mask,
fixture_nic[i]['netmask'])
self.assertEqual(nic.gateway,
fixture_nic[i]['gateway'])
self.assertEqual(nic.ip_address,
fixture_nic[i]['ipaddress'])
self.assertEqual(nic.is_default,
fixture_nic[i]['isdefault'])
self.assertEqual(nic.mac_address,
fixture_nic[i]['macaddress'])
def test_ex_add_nic_to_node(self):
vm = self.driver.list_nodes()[0]
network = self.driver.ex_list_networks()[0]
ip = "10.1.4.123"
result = self.driver.ex_attach_nic_to_node(node=vm, network=network, ip_address=ip)
self.assertTrue(result)
def test_ex_remove_nic_from_node(self):
vm = self.driver.list_nodes()[0]
nic = self.driver.ex_list_nics(node=vm)[0]
result = self.driver.ex_detach_nic_from_node(node=vm, nic=nic)
self.assertTrue(result)
def test_ex_list_vpc_offerings(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listVPCOfferings_default.json')
fixture_vpcoffers = \
fixture['listvpcofferingsresponse']['vpcoffering']
vpcoffers = self.driver.ex_list_vpc_offerings()
for i, vpcoffer in enumerate(vpcoffers):
self.assertEqual(vpcoffer.id, fixture_vpcoffers[i]['id'])
self.assertEqual(vpcoffer.name,
fixture_vpcoffers[i]['name'])
self.assertEqual(vpcoffer.display_text,
fixture_vpcoffers[i]['displaytext'])
def test_ex_list_vpcs(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listVPCs_default.json')
fixture_vpcs = fixture['listvpcsresponse']['vpc']
vpcs = self.driver.ex_list_vpcs()
for i, vpc in enumerate(vpcs):
self.assertEqual(vpc.id, fixture_vpcs[i]['id'])
self.assertEqual(vpc.display_text, fixture_vpcs[i]['displaytext'])
self.assertEqual(vpc.name, fixture_vpcs[i]['name'])
self.assertEqual(vpc.vpc_offering_id,
fixture_vpcs[i]['vpcofferingid'])
self.assertEqual(vpc.zone_id, fixture_vpcs[i]['zoneid'])
def test_ex_list_routers(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listRouters_default.json')
fixture_routers = fixture['listroutersresponse']['router']
routers = self.driver.ex_list_routers()
for i, router in enumerate(routers):
self.assertEqual(router.id, fixture_routers[i]['id'])
self.assertEqual(router.name, fixture_routers[i]['name'])
self.assertEqual(router.state, fixture_routers[i]['state'])
self.assertEqual(router.public_ip, fixture_routers[i]['publicip'])
self.assertEqual(router.vpc_id, fixture_routers[i]['vpcid'])
def test_ex_create_vpc(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'createVPC_default.json')
fixture_vpc = fixture['createvpcresponse']
vpcoffer = self.driver.ex_list_vpc_offerings()[0]
vpc = self.driver.ex_create_vpc(cidr='10.1.1.0/16',
display_text='cloud.local',
name='cloud.local',
vpc_offering=vpcoffer,
zone_id="2")
self.assertEqual(vpc.id, fixture_vpc['id'])
def test_ex_delete_vpc(self):
vpc = self.driver.ex_list_vpcs()[0]
result = self.driver.ex_delete_vpc(vpc=vpc)
self.assertTrue(result)
def test_ex_create_network_acllist(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'createNetworkACLList_default.json')
fixture_network_acllist = fixture['createnetworkacllistresponse']
vpc = self.driver.ex_list_vpcs()[0]
network_acllist = self.driver.ex_create_network_acllist(
name='test_acllist',
vpc_id=vpc.id,
description='test description')
self.assertEqual(network_acllist.id, fixture_network_acllist['id'])
def test_ex_list_network_acllist(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listNetworkACLLists_default.json')
fixture_acllist = \
fixture['listnetworkacllistsresponse']['networkacllist']
acllist = self.driver.ex_list_network_acllists()
for i, acllist in enumerate(acllist):
self.assertEqual(acllist.id,
fixture_acllist[i]['id'])
self.assertEqual(acllist.name,
fixture_acllist[i]['name'])
self.assertEqual(acllist.description,
fixture_acllist[i]['description'])
def test_ex_create_network_acl(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'createNetworkACL_default.json')
fixture_network_acllist = fixture['createnetworkaclresponse']
acllist = self.driver.ex_list_network_acllists()[0]
network_acl = self.driver.ex_create_network_acl(
protocol='test_acllist',
acl_id=acllist.id,
cidr_list='',
start_port='80',
end_port='80')
self.assertEqual(network_acl.id, fixture_network_acllist['id'])
def test_ex_list_projects(self):
_, fixture = CloudStackMockHttp()._load_fixture(
'listProjects_default.json')
fixture_projects = fixture['listprojectsresponse']['project']
projects = self.driver.ex_list_projects()
for i, project in enumerate(projects):
self.assertEqual(project.id, fixture_projects[i]['id'])
self.assertEqual(
project.display_text, fixture_projects[i]['displaytext'])
self.assertEqual(project.name, fixture_projects[i]['name'])
self.assertEqual(
project.extra['domainid'],
fixture_projects[i]['domainid'])
self.assertEqual(
project.extra['cpulimit'],
fixture_projects[i]['cpulimit'])
# Note -1 represents unlimited
self.assertEqual(project.extra['networklimit'], -1)
def test_create_volume(self):
volumeName = 'vol-0'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
self.assertEqual(volumeName, volume.name)
self.assertEqual(10, volume.size)
def test_create_volume_no_noncustomized_offering_with_size(self):
"""If the sizes of disk offerings are not configurable and there
are no disk offerings with the requested size, an exception should
be thrown."""
location = self.driver.list_locations()[0]
self.assertRaises(
LibcloudError,
self.driver.create_volume,
'vol-0', location, 11)
def test_create_volume_with_custom_disk_size_offering(self):
CloudStackMockHttp.fixture_tag = 'withcustomdisksize'
volumeName = 'vol-0'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
self.assertEqual(volumeName, volume.name)
def test_attach_volume(self):
node = self.driver.list_nodes()[0]
volumeName = 'vol-0'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
attachReturnVal = self.driver.attach_volume(volume, node)
self.assertTrue(attachReturnVal)
def test_detach_volume(self):
volumeName = 'gre-test-volume'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
res = self.driver.detach_volume(volume)
self.assertTrue(res)
def test_destroy_volume(self):
volumeName = 'gre-test-volume'
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(10, volumeName, location)
res = self.driver.destroy_volume(volume)
self.assertTrue(res)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(1, len(volumes))
self.assertEqual('ROOT-69942', volumes[0].name)
def test_ex_get_volume(self):
volume = self.driver.ex_get_volume(2600)
self.assertEqual('ROOT-69942', volume.name)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(2, len(nodes))
self.assertEqual('test', nodes[0].name)
self.assertEqual('2600', nodes[0].id)
self.assertEqual([], nodes[0].extra['security_group'])
self.assertEqual(None, nodes[0].extra['key_name'])
def test_ex_get_node(self):
node = self.driver.ex_get_node(2600)
self.assertEqual('test', node.name)
self.assertEqual('2600', node.id)
self.assertEqual([], node.extra['security_group'])
self.assertEqual(None, node.extra['key_name'])
def test_ex_get_node_doesnt_exist(self):
self.assertRaises(Exception, self.driver.ex_get_node(26), node_id=26)
def test_list_locations(self):
location = self.driver.list_locations()[0]
self.assertEqual('1', location.id)
self.assertEqual('Sydney', location.name)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual('Compute Micro PRD', sizes[0].name)
self.assertEqual('105', sizes[0].id)
self.assertEqual(384, sizes[0].ram)
self.assertEqual('Compute Large PRD', sizes[2].name)
self.assertEqual('69', sizes[2].id)
self.assertEqual(6964, sizes[2].ram)
def test_ex_start_node(self):
node = self.driver.list_nodes()[0]
res = node.ex_start()
self.assertEqual('Starting', res)
def test_ex_stop_node(self):
node = self.driver.list_nodes()[0]
res = node.ex_stop()
self.assertEqual('Stopped', res)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
res = node.destroy()
self.assertTrue(res)
def test_expunge_node(self):
node = self.driver.list_nodes()[0]
res = self.driver.destroy_node(node, ex_expunge=True)
self.assertTrue(res)
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
res = node.reboot()
self.assertTrue(res)
def test_list_key_pairs(self):
keypairs = self.driver.list_key_pairs()
fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \
'00:00:00:00:00'
self.assertEqual(keypairs[0].name, 'cs-keypair')
self.assertEqual(keypairs[0].fingerprint, fingerprint)
# Test old and deprecated way
keypairs = self.driver.ex_list_keypairs()
self.assertEqual(keypairs[0]['name'], 'cs-keypair')
self.assertEqual(keypairs[0]['fingerprint'], fingerprint)
def test_list_key_pairs_no_keypair_key(self):
CloudStackMockHttp.fixture_tag = 'no_keys'
keypairs = self.driver.list_key_pairs()
self.assertEqual(keypairs, [])
def test_get_key_pair(self):
CloudStackMockHttp.fixture_tag = 'get_one'
key_pair = self.driver.get_key_pair(name='cs-keypair')
self.assertEqual(key_pair.name, 'cs-keypair')
def test_get_key_pair_doesnt_exist(self):
CloudStackMockHttp.fixture_tag = 'get_one_doesnt_exist'
self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair,
name='does-not-exist')
def test_create_keypair(self):
key_pair = self.driver.create_key_pair(name='test-keypair')
self.assertEqual(key_pair.name, 'test-keypair')
self.assertTrue(key_pair.fingerprint is not None)
self.assertTrue(key_pair.private_key is not None)
# Test old and deprecated way
res = self.driver.ex_create_keypair(name='test-keypair')
self.assertEqual(res['name'], 'test-keypair')
self.assertTrue(res['fingerprint'] is not None)
self.assertTrue(res['privateKey'] is not None)
def test_import_keypair_from_file(self):
fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15'
path = os.path.join(os.path.dirname(__file__), 'fixtures',
'cloudstack',
'dummy_rsa.pub')
key_pair = self.driver.import_key_pair_from_file('foobar', path)
self.assertEqual(key_pair.name, 'foobar')
self.assertEqual(key_pair.fingerprint, fingerprint)
# Test old and deprecated way
res = self.driver.ex_import_keypair('foobar', path)
self.assertEqual(res['keyName'], 'foobar')
self.assertEqual(res['keyFingerprint'], fingerprint)
def test_ex_import_keypair_from_string(self):
fingerprint = 'c4:a1:e5:d4:50:84:a9:4c:6b:22:ee:d6:57:02:b8:15'
path = os.path.join(os.path.dirname(__file__), 'fixtures',
'cloudstack',
'dummy_rsa.pub')
fh = open(path)
key_material = fh.read()
fh.close()
key_pair = self.driver.import_key_pair_from_string('foobar', key_material=key_material)
self.assertEqual(key_pair.name, 'foobar')
self.assertEqual(key_pair.fingerprint, fingerprint)
# Test old and deprecated way
res = self.driver.ex_import_keypair_from_string('foobar', key_material=key_material)
self.assertEqual(res['keyName'], 'foobar')
self.assertEqual(res['keyFingerprint'], fingerprint)
def test_delete_key_pair(self):
key_pair = self.driver.list_key_pairs()[0]
res = self.driver.delete_key_pair(key_pair=key_pair)
self.assertTrue(res)
# Test old and deprecated way
res = self.driver.ex_delete_keypair(keypair='cs-keypair')
self.assertTrue(res)
def test_ex_list_security_groups(self):
groups = self.driver.ex_list_security_groups()
self.assertEqual(2, len(groups))
self.assertEqual(groups[0]['name'], 'default')
self.assertEqual(groups[1]['name'], 'mongodb')
def test_ex_list_security_groups_no_securitygroup_key(self):
CloudStackMockHttp.fixture_tag = 'no_groups'
groups = self.driver.ex_list_security_groups()
self.assertEqual(groups, [])
def test_ex_create_security_group(self):
group = self.driver.ex_create_security_group(name='MySG')
self.assertEqual(group['name'], 'MySG')
def test_ex_delete_security_group(self):
res = self.driver.ex_delete_security_group(name='MySG')
self.assertTrue(res)
def test_ex_authorize_security_group_ingress(self):
res = self.driver.ex_authorize_security_group_ingress('MySG',
'TCP',
'22',
'22',
'0.0.0.0/0')
self.assertTrue(res)
def test_ex_create_affinity_group(self):
res = self.driver.ex_create_affinity_group('MyAG2',
CloudStackAffinityGroupType('MyAGType'))
self.assertEqual(res.name, 'MyAG2')
self.assertIsInstance(res.type, CloudStackAffinityGroupType)
self.assertEqual(res.type.type, 'MyAGType')
def test_ex_create_affinity_group_already_exists(self):
self.assertRaises(LibcloudError,
self.driver.ex_create_affinity_group,
'MyAG', CloudStackAffinityGroupType('MyAGType'))
def test_delete_ex_affinity_group(self):
afg = self.driver.ex_create_affinity_group('MyAG3',
CloudStackAffinityGroupType('MyAGType'))
res = self.driver.ex_delete_affinity_group(afg)
self.assertTrue(res)
def test_ex_update_node_affinity_group(self):
affinity_group_list = self.driver.ex_list_affinity_groups()
nodes = self.driver.list_nodes()
node = self.driver.ex_update_node_affinity_group(nodes[0],
affinity_group_list)
self.assertEqual(node.extra['affinity_group'][0],
affinity_group_list[0].id)
def test_ex_list_affinity_groups(self):
res = self.driver.ex_list_affinity_groups()
self.assertEqual(len(res), 1)
self.assertEqual(res[0].id, '11112')
self.assertEqual(res[0].name, 'MyAG')
self.assertIsInstance(res[0].type, CloudStackAffinityGroupType)
self.assertEqual(res[0].type.type, 'MyAGType')
def test_ex_list_affinity_group_types(self):
res = self.driver.ex_list_affinity_group_types()
self.assertEqual(len(res), 1)
self.assertIsInstance(res[0], CloudStackAffinityGroupType)
self.assertEqual(res[0].type, 'MyAGType')
def test_ex_list_public_ips(self):
ips = self.driver.ex_list_public_ips()
self.assertEqual(ips[0].address, '1.1.1.116')
self.assertEqual(ips[0].virtualmachine_id, '2600')
def test_ex_allocate_public_ip(self):
addr = self.driver.ex_allocate_public_ip()
self.assertEqual(addr.address, '7.5.6.1')
self.assertEqual(addr.id, '10987171-8cc9-4d0a-b98f-1698c09ddd2d')
def test_ex_release_public_ip(self):
addresses = self.driver.ex_list_public_ips()
res = self.driver.ex_release_public_ip(addresses[0])
self.assertTrue(res)
def test_ex_create_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
address = self.driver.ex_list_public_ips()[0]
private_port = 33
private_end_port = 34
public_port = 33
public_end_port = 34
openfirewall = True
protocol = 'TCP'
rule = self.driver.ex_create_port_forwarding_rule(node,
address,
private_port,
public_port,
protocol,
public_end_port,
private_end_port,
openfirewall)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertEqual(rule.public_port, public_port)
self.assertEqual(rule.public_end_port, public_end_port)
self.assertEqual(rule.private_port, private_port)
self.assertEqual(rule.private_end_port, private_end_port)
def test_ex_list_firewall_rules(self):
rules = self.driver.ex_list_firewall_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule.address.address, '1.1.1.116')
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.cidr_list, '192.168.0.0/16')
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, '33')
self.assertEqual(rule.end_port, '34')
def test_ex_list_firewall_rules_icmp(self):
CloudStackMockHttp.fixture_tag = 'firewallicmp'
rules = self.driver.ex_list_firewall_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule.address.address, '1.1.1.116')
self.assertEqual(rule.protocol, 'icmp')
self.assertEqual(rule.cidr_list, '192.168.0.0/16')
self.assertEqual(rule.icmp_code, 0)
self.assertEqual(rule.icmp_type, 8)
self.assertIsNone(rule.start_port)
self.assertIsNone(rule.end_port)
def test_ex_delete_firewall_rule(self):
rules = self.driver.ex_list_firewall_rules()
res = self.driver.ex_delete_firewall_rule(rules[0])
self.assertTrue(res)
def test_ex_create_firewall_rule(self):
address = self.driver.ex_list_public_ips()[0]
cidr_list = '192.168.0.0/16'
protocol = 'TCP'
start_port = 33
end_port = 34
rule = self.driver.ex_create_firewall_rule(address,
cidr_list,
protocol,
start_port=start_port,
end_port=end_port)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, start_port)
self.assertEqual(rule.end_port, end_port)
def test_ex_create_firewall_rule_icmp(self):
address = self.driver.ex_list_public_ips()[0]
cidr_list = '192.168.0.0/16'
protocol = 'icmp'
icmp_code = 0
icmp_type = 8
rule = self.driver.ex_create_firewall_rule(address,
cidr_list,
protocol,
icmp_code=icmp_code,
icmp_type=icmp_type)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertEqual(rule.icmp_code, 0)
self.assertEqual(rule.icmp_type, 8)
self.assertIsNone(rule.start_port)
self.assertIsNone(rule.end_port)
def test_ex_list_egress_firewall_rules(self):
rules = self.driver.ex_list_egress_firewall_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertEqual(rule.network_id, '874be2ca-20a7-4360-80e9-7356c0018c0b')
self.assertEqual(rule.cidr_list, '192.168.0.0/16')
self.assertEqual(rule.protocol, 'tcp')
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, '80')
self.assertEqual(rule.end_port, '80')
def test_ex_delete_egress_firewall_rule(self):
rules = self.driver.ex_list_egress_firewall_rules()
res = self.driver.ex_delete_egress_firewall_rule(rules[0])
self.assertTrue(res)
def test_ex_create_egress_firewall_rule(self):
network_id = '874be2ca-20a7-4360-80e9-7356c0018c0b'
cidr_list = '192.168.0.0/16'
protocol = 'TCP'
start_port = 33
end_port = 34
rule = self.driver.ex_create_egress_firewall_rule(
network_id,
cidr_list,
protocol,
start_port=start_port,
end_port=end_port)
self.assertEqual(rule.network_id, network_id)
self.assertEqual(rule.cidr_list, cidr_list)
self.assertEqual(rule.protocol, protocol)
self.assertIsNone(rule.icmp_code)
self.assertIsNone(rule.icmp_type)
self.assertEqual(rule.start_port, start_port)
self.assertEqual(rule.end_port, end_port)
def test_ex_list_port_forwarding_rules(self):
rules = self.driver.ex_list_port_forwarding_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertTrue(rule.node)
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.public_port, '33')
self.assertEqual(rule.public_end_port, '34')
self.assertEqual(rule.private_port, '33')
self.assertEqual(rule.private_end_port, '34')
self.assertEqual(rule.address.address, '1.1.1.116')
def test_ex_delete_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
rule = self.driver.ex_list_port_forwarding_rules()[0]
res = self.driver.ex_delete_port_forwarding_rule(node, rule)
self.assertTrue(res)
def test_node_ex_delete_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
self.assertEqual(len(node.extra['port_forwarding_rules']), 1)
node.extra['port_forwarding_rules'][0].delete()
self.assertEqual(len(node.extra['port_forwarding_rules']), 0)
def test_node_ex_create_port_forwarding_rule(self):
node = self.driver.list_nodes()[0]
self.assertEqual(len(node.extra['port_forwarding_rules']), 1)
address = self.driver.ex_list_public_ips()[0]
private_port = 33
private_end_port = 34
public_port = 33
public_end_port = 34
openfirewall = True
protocol = 'TCP'
rule = node.ex_create_port_forwarding_rule(address,
private_port,
public_port,
protocol,
public_end_port,
private_end_port,
openfirewall)
self.assertEqual(rule.address, address)
self.assertEqual(rule.protocol, protocol)
self.assertEqual(rule.public_port, public_port)
self.assertEqual(rule.public_end_port, public_end_port)
self.assertEqual(rule.private_port, private_port)
self.assertEqual(rule.private_end_port, private_end_port)
self.assertEqual(len(node.extra['port_forwarding_rules']), 2)
def test_ex_list_ip_forwarding_rules(self):
rules = self.driver.ex_list_ip_forwarding_rules()
self.assertEqual(len(rules), 1)
rule = rules[0]
self.assertTrue(rule.node)
self.assertEqual(rule.protocol, 'tcp')
self.assertEqual(rule.start_port, 33)
self.assertEqual(rule.end_port, 34)
self.assertEqual(rule.address.address, '1.1.1.116')
def test_ex_limits(self):
limits = self.driver.ex_limits()
self.assertEqual(limits['max_images'], 20)
self.assertEqual(limits['max_networks'], 20)
self.assertEqual(limits['max_public_ips'], -1)
self.assertEqual(limits['max_vpc'], 20)
self.assertEqual(limits['max_instances'], 20)
self.assertEqual(limits['max_projects'], -1)
self.assertEqual(limits['max_volumes'], 20)
self.assertEqual(limits['max_snapshots'], 20)
def test_ex_create_tags(self):
node = self.driver.list_nodes()[0]
tags = {'Region': 'Canada'}
resp = self.driver.ex_create_tags([node.id], 'UserVm', tags)
self.assertTrue(resp)
def test_ex_delete_tags(self):
node = self.driver.list_nodes()[0]
tag_keys = ['Region']
resp = self.driver.ex_delete_tags([node.id], 'UserVm', tag_keys)
self.assertTrue(resp)
def test_list_snapshots(self):
snapshots = self.driver.list_snapshots()
self.assertEqual(len(snapshots), 3)
snap = snapshots[0]
self.assertEqual(snap.id, 188402)
self.assertEqual(snap.extra['name'], "i-123-87654-VM_ROOT-12344_20140917105548")
self.assertEqual(snap.extra['volume_id'], 89341)
def test_create_volume_snapshot(self):
volume = self.driver.list_volumes()[0]
snapshot = self.driver.create_volume_snapshot(volume)
self.assertEqual(snapshot.id, 190547)
self.assertEqual(snapshot.extra['name'], "i-123-87654-VM_ROOT-23456_20140917105548")
self.assertEqual(snapshot.extra['volume_id'], "fe1ada16-57a0-40ae-b577-01a153690fb4")
def test_destroy_volume_snapshot(self):
snapshot = self.driver.list_snapshots()[0]
resp = self.driver.destroy_volume_snapshot(snapshot)
self.assertTrue(resp)
def test_ex_create_snapshot_template(self):
snapshot = self.driver.list_snapshots()[0]
template = self.driver.ex_create_snapshot_template(snapshot, "test-libcloud-template", 99)
self.assertEqual(template.id, '10260')
self.assertEqual(template.name, "test-libcloud-template")
self.assertEqual(template.extra['displaytext'], "test-libcloud-template")
self.assertEqual(template.extra['hypervisor'], "VMware")
self.assertEqual(template.extra['os'], "Other Linux (64-bit)")
def test_ex_list_os_types(self):
os_types = self.driver.ex_list_os_types()
self.assertEqual(len(os_types), 146)
self.assertEqual(os_types[0]['id'], 69)
self.assertEqual(os_types[0]['oscategoryid'], 7)
self.assertEqual(os_types[0]['description'], "Asianux 3(32-bit)")
def test_ex_list_vpn_gateways(self):
vpn_gateways = self.driver.ex_list_vpn_gateways()
self.assertEqual(len(vpn_gateways), 1)
self.assertEqual(vpn_gateways[0].id, 'cffa0cab-d1da-42a7-92f6-41379267a29f')
self.assertEqual(vpn_gateways[0].account, 'some_account')
self.assertEqual(vpn_gateways[0].domain, 'some_domain')
self.assertEqual(vpn_gateways[0].domain_id, '9b397dea-25ef-4c5d-b47d-627eaebe8ed8')
self.assertEqual(vpn_gateways[0].public_ip, '1.2.3.4')
self.assertEqual(vpn_gateways[0].vpc_id, '4d25e181-8850-4d52-8ecb-a6f35bbbabde')
def test_ex_create_vpn_gateway(self):
vpc = self.driver.ex_list_vpcs()[0]
vpn_gateway = self.driver.ex_create_vpn_gateway(vpc)
self.assertEqual(vpn_gateway.id, '5ef6794e-cec8-4018-9fef-c4dacbadee14')
self.assertEqual(vpn_gateway.account, 'some_account')
self.assertEqual(vpn_gateway.domain, 'some_domain')
self.assertEqual(vpn_gateway.domain_id, '9b397dea-25ef-4c5d-b47d-627eaebe8ed8')
self.assertEqual(vpn_gateway.public_ip, '2.3.4.5')
self.assertEqual(vpn_gateway.vpc_id, vpc.id)
def test_ex_delete_vpn_gateway(self):
vpn_gateway = self.driver.ex_list_vpn_gateways()[0]
self.assertTrue(vpn_gateway.delete())
def test_ex_list_vpn_customer_gateways(self):
vpn_customer_gateways = self.driver.ex_list_vpn_customer_gateways()
self.assertEqual(len(vpn_customer_gateways), 1)
self.assertEqual(vpn_customer_gateways[0].id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea')
self.assertEqual(vpn_customer_gateways[0].cidr_list, '10.2.2.0/24')
self.assertEqual(vpn_customer_gateways[0].esp_policy, '3des-md5')
self.assertEqual(vpn_customer_gateways[0].gateway, '10.2.2.1')
self.assertEqual(vpn_customer_gateways[0].ike_policy, '3des-md5')
self.assertEqual(vpn_customer_gateways[0].ipsec_psk, 'some_psk')
def test_ex_create_vpn_customer_gateway(self):
vpn_customer_gateway = self.driver.ex_create_vpn_customer_gateway(
cidr_list='10.0.0.0/24',
esp_policy='3des-md5',
gateway='10.0.0.1',
ike_policy='3des-md5',
ipsec_psk='ipsecpsk')
self.assertEqual(vpn_customer_gateway.id, 'cef3c766-116a-4e83-9844-7d08ab7d3fd4')
self.assertEqual(vpn_customer_gateway.esp_policy, '3des-md5')
self.assertEqual(vpn_customer_gateway.gateway, '10.0.0.1')
self.assertEqual(vpn_customer_gateway.ike_policy, '3des-md5')
self.assertEqual(vpn_customer_gateway.ipsec_psk, 'ipsecpsk')
def test_ex_ex_delete_vpn_customer_gateway(self):
vpn_customer_gateway = self.driver.ex_list_vpn_customer_gateways()[0]
self.assertTrue(vpn_customer_gateway.delete())
def test_ex_list_vpn_connections(self):
vpn_connections = self.driver.ex_list_vpn_connections()
self.assertEqual(len(vpn_connections), 1)
self.assertEqual(vpn_connections[0].id, '8f482d9a-6cee-453b-9e78-b0e1338ffce9')
self.assertEqual(vpn_connections[0].passive, False)
self.assertEqual(vpn_connections[0].vpn_customer_gateway_id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea')
self.assertEqual(vpn_connections[0].vpn_gateway_id, 'cffa0cab-d1da-42a7-92f6-41379267a29f')
self.assertEqual(vpn_connections[0].state, 'Connected')
def test_ex_create_vpn_connection(self):
vpn_customer_gateway = self.driver.ex_list_vpn_customer_gateways()[0]
vpn_gateway = self.driver.ex_list_vpn_gateways()[0]
vpn_connection = self.driver.ex_create_vpn_connection(
vpn_customer_gateway,
vpn_gateway)
self.assertEqual(vpn_connection.id, 'f45c3af8-f909-4f16-9d40-ed4409c575f8')
self.assertEqual(vpn_connection.passive, False)
self.assertEqual(vpn_connection.vpn_customer_gateway_id, 'ea67eaae-1c2a-4e65-b910-441e77f69bea')
self.assertEqual(vpn_connection.vpn_gateway_id, 'cffa0cab-d1da-42a7-92f6-41379267a29f')
self.assertEqual(vpn_connection.state, 'Connected')
def test_ex_delete_vpn_connection(self):
vpn_connection = self.driver.ex_list_vpn_connections()[0]
self.assertTrue(vpn_connection.delete())
class CloudStackTestCase(CloudStackCommonTestCase, unittest.TestCase):
def test_driver_instantiation(self):
urls = [
'http://api.exoscale.ch/compute1', # http, default port
'https://api.exoscale.ch/compute2', # https, default port
'http://api.exoscale.ch:8888/compute3', # https, custom port
'https://api.exoscale.ch:8787/compute4', # https, custom port
'https://api.test.com/compute/endpoint' # https, default port
]
expected_values = [
{'host': 'api.exoscale.ch', 'port': 80, 'path': '/compute1'},
{'host': 'api.exoscale.ch', 'port': 443, 'path': '/compute2'},
{'host': 'api.exoscale.ch', 'port': 8888, 'path': '/compute3'},
{'host': 'api.exoscale.ch', 'port': 8787, 'path': '/compute4'},
{'host': 'api.test.com', 'port': 443, 'path': '/compute/endpoint'}
]
cls = get_driver(Provider.CLOUDSTACK)
for url, expected in zip(urls, expected_values):
driver = cls('key', 'secret', url=url)
self.assertEqual(driver.host, expected['host'])
self.assertEqual(driver.path, expected['path'])
self.assertEqual(driver.connection.port, expected['port'])
def test_user_must_provide_host_and_path_or_url(self):
expected_msg = ('When instantiating CloudStack driver directly '
'you also need to provide url or host and path '
'argument')
cls = get_driver(Provider.CLOUDSTACK)
self.assertRaisesRegexp(Exception, expected_msg, cls,
'key', 'secret')
try:
cls('key', 'secret', True, 'localhost', '/path')
except Exception:
self.fail('host and path provided but driver raised an exception')
try:
cls('key', 'secret', url='https://api.exoscale.ch/compute')
except Exception:
self.fail('url provided but driver raised an exception')
class CloudStackMockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('cloudstack')
fixture_tag = 'default'
def _load_fixture(self, fixture):
body = self.fixtures.load(fixture)
return body, json.loads(body)
def _test_path_invalid_credentials(self, method, url, body, headers):
body = ''
return (httplib.UNAUTHORIZED, body, {},
httplib.responses[httplib.UNAUTHORIZED])
def _test_path_api_error(self, method, url, body, headers):
body = self.fixtures.load('registerSSHKeyPair_error.json')
return (431, body, {},
httplib.responses[httplib.OK])
def _test_path(self, method, url, body, headers):
url = urlparse.urlparse(url)
query = dict(parse_qsl(url.query))
self.assertTrue('apiKey' in query)
self.assertTrue('command' in query)
self.assertTrue('response' in query)
self.assertTrue('signature' in query)
self.assertTrue(query['response'] == 'json')
del query['apiKey']
del query['response']
del query['signature']
command = query.pop('command')
if hasattr(self, '_cmd_' + command):
return getattr(self, '_cmd_' + command)(**query)
else:
fixture = command + '_' + self.fixture_tag + '.json'
body, obj = self._load_fixture(fixture)
return (httplib.OK, body, obj, httplib.responses[httplib.OK])
def _cmd_queryAsyncJobResult(self, jobid):
fixture = 'queryAsyncJobResult' + '_' + str(jobid) + '.json'
body, obj = self._load_fixture(fixture)
return (httplib.OK, body, obj, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
#!/usr/bin/env python3
"""
This script allows to read the tile grid from various sources and render it to
either SVG or PDF file. It can also draw connections between tiles when
provided with the 'tileconn.json' file used in the prjxray database.
Use ONE of following argument sets for data source specification:
1. --tilgrid <tilegrid.json> [--tileconn <tileconn.json>]
2. --arch-xml <arch.xml>
3. --graph-xml <rr_graph.xml>
4. --conn-db <channels.db> [--tb-table <tile table name>]
"""
import sys
import argparse
import os
import re
from collections import namedtuple
import progressbar
import json
import sqlite3
import lxml.etree as ET
import lxml.objectify as objectify
import svgwrite
# =============================================================================
class GridVisualizer(object):
BLOCK_RECT = 100
BLOCK_GAP = 10
BLOCK_SIZE = BLOCK_RECT + BLOCK_GAP
Loc = namedtuple("Loc", "x y")
Conn = namedtuple("Conn", "loc0 loc1")
GridExtent = namedtuple("GridExtent", "xmin ymin xmax ymax")
def __init__(self):
self.tilegrid = None
self.tileconn = None
self.grid_roi = None
self.conn_roi = None
self.tile_colormap = None
self.connections = {}
def load_tilegrid_from_json(self, json_file):
# Load JSON files
with open(json_file, "r") as fp:
self.tilegrid = json.load(fp)
self._determine_grid_extent()
self._build_loc_map()
def load_tileconn_from_json(self, json_file):
# Load JSON files
with open(json_file, "r") as fp:
self.tileconn = json.load(fp)
self._form_connections()
def load_tilegrid_from_arch_xml(self, xml_file):
# Load and parse the XML
parser = ET.XMLParser(remove_comments=True)
xml_tree = objectify.parse(xml_file, parser=parser)
xml_root = xml_tree.getroot()
# Get the layout section
layout = xml_root.find("layout")
assert (layout is not None)
# Get the fixed_layout section
fixed_layout = layout.find("fixed_layout")
assert (fixed_layout is not None)
# Extract the grid extent
dx = int(fixed_layout.get("width"))
dy = int(fixed_layout.get("height"))
self.grid_extent = self.GridExtent(0, 0, dx, dy)
# Convert
self.tilegrid = {}
for tile in list(fixed_layout):
assert (tile.tag == "single")
# Basic tile parameters
grid_x = int(tile.get("x"))
grid_y = int(tile.get("y"))
tile_type = tile.get("type")
# Tile name (if present)
tile_name = None
metadata = tile.find("metadata")
if metadata is not None:
for meta in metadata.findall("meta"):
if meta.get("name") == "fasm_prefix":
tile_name = meta.text
# Fake tile name
if tile_name is None:
tile_name = "UNKNOWN_X%dY%d" % (grid_x, grid_y)
# Already exists
if tile_name in self.tilegrid:
tile_name += "(2)"
self.tilegrid[tile_name] = {
"grid_x": grid_x,
"grid_y": grid_y,
"type": tile_type
}
self._build_loc_map()
def load_tilegrid_from_graph_xml(self, xml_file):
# Load and parse the XML
parser = ET.XMLParser(remove_comments=True)
xml_tree = objectify.parse(xml_file, parser=parser)
xml_root = xml_tree.getroot()
# Load block types
xml_block_types = xml_root.find("block_types")
assert (xml_block_types is not None)
block_types = {}
for xml_block_type in xml_block_types:
block_type_id = int(xml_block_type.get("id"))
block_name = xml_block_type.get("name")
block_types[block_type_id] = block_name
# Load grid
self.tilegrid = {}
all_x = set()
all_y = set()
xml_grid = xml_root.find("grid")
assert (xml_grid is not None)
for xml_grid_loc in xml_grid:
grid_x = int(xml_grid_loc.get("x"))
grid_y = int(xml_grid_loc.get("y"))
block_type_id = int(xml_grid_loc.get("block_type_id"))
all_x.add(grid_x)
all_y.add(grid_y)
# Fake tile name
tile_name = "BLOCK_X%dY%d" % (grid_x, grid_y)
self.tilegrid[tile_name] = {
"grid_x": grid_x,
"grid_y": grid_y,
"type": block_types[block_type_id]
}
# Determine grid extent
self.grid_extent = self.GridExtent(
min(all_x), min(all_y), max(all_x), max(all_y)
)
self._build_loc_map()
def load_tilegrid_from_conn_db(self, db_file, db_table):
# Connect to the database and load data
with sqlite3.Connection("file:%s?mode=ro" % db_file, uri=True) as conn:
c = conn.cursor()
# Load the grid
db_tiles = c.execute(
"SELECT pkey, name, tile_type_pkey, grid_x, grid_y FROM %s" %
(db_table)
).fetchall() # They say that it is insecure..
# Load tile types
db_tile_types = c.execute("SELECT pkey, name FROM tile_type"
).fetchall()
# Maps pkey to type string
tile_type_map = {}
for item in db_tile_types:
tile_type_map[item[0]] = item[1]
# Translate information
self.tilegrid = {}
all_x = set()
all_y = set()
for tile in db_tiles:
tile_type_pkey = tile[2]
if tile_type_pkey not in tile_type_map.keys():
print("Unknown tile type pkey %d !" % tile_type_pkey)
continue
tile_name = tile[1]
tile_type = tile_type_map[tile_type_pkey]
tile_grid_x = tile[3]
tile_grid_y = tile[4]
if tile_name in self.tilegrid:
print("Duplicate tile name '%s' !" % tile_name)
continue
all_x.add(tile_grid_x)
all_y.add(tile_grid_y)
self.tilegrid[tile_name] = {
"grid_x": tile_grid_x,
"grid_y": tile_grid_y,
"type": tile_type
}
# Determine grid extent
self.grid_extent = self.GridExtent(
min(all_x), min(all_y), max(all_x), max(all_y)
)
self._build_loc_map()
def load_tile_colormap(self, colormap):
# If it fails just skip it
try:
with open(colormap, "r") as fp:
self.tile_colormap = json.load(fp)
except FileNotFoundError:
pass
def set_grid_roi(self, roi):
self.grid_roi = roi
def set_conn_roi(self, roi):
self.conn_roi = roi
def _determine_grid_extent(self):
# Determine the grid extent
xs = set()
ys = set()
for tile in self.tilegrid.values():
xs.add(tile["grid_x"])
ys.add(tile["grid_y"])
self.grid_extent = self.GridExtent(min(xs), min(ys), max(xs), max(ys))
if self.grid_roi is not None:
self.grid_extent = self.GridExtent(
max(self.grid_extent.xmin, self.grid_roi[0]),
max(self.grid_extent.ymin, self.grid_roi[1]),
min(self.grid_extent.xmax, self.grid_roi[2]),
min(self.grid_extent.ymax, self.grid_roi[3])
)
def _build_loc_map(self):
self.loc_map = {}
for tile_name, tile in self.tilegrid.items():
loc = self.Loc(tile["grid_x"], tile["grid_y"])
if loc in self.loc_map.keys():
print("Duplicate tile at [%d, %d] !" % (loc.x, loc.y))
self.loc_map[loc] = tile_name
def _form_connections(self):
# Loop over tiles of interest
print("Forming connections...")
for tile_name, tile in progressbar.progressbar(self.tilegrid.items()):
this_loc = self.Loc(tile["grid_x"], tile["grid_y"])
this_type = tile["type"]
# Find matching connection rules
for rule in self.tileconn:
grid_deltas = rule["grid_deltas"]
tile_types = rule["tile_types"]
wire_count = len(rule["wire_pairs"])
for k in [+1]:
# Get a couterpart tile according to the rule grid delta
other_loc = self.Loc(
this_loc.x + k * grid_deltas[0],
this_loc.y + k * grid_deltas[1]
)
try:
other_name = self.loc_map[other_loc]
other_type = self.tilegrid[other_name]["type"]
except KeyError:
continue
# Check match
if this_type == tile_types[0] and \
other_type == tile_types[1]:
# Add the connection
conn = self.Conn(this_loc, other_loc)
if conn not in self.connections.keys():
self.connections[conn] = wire_count
else:
self.connections[conn] += wire_count
def _draw_connection(self, x0, y0, x1, y1, curve=False):
if curve:
dx = x1 - x0
dy = y1 - y0
cx = (x1 + x0) * 0.5 + dy * 0.33
cy = (y1 + y0) * 0.5 - dx * 0.33
path = "M %.3f %.3f " % (x0, y0)
path += "C %.3f %.3f %.3f %.3f %.3f %.3f" % \
(cx, cy, cx, cy, x1, y1)
self.svg.add(self.svg.path(d=path, fill="none", stroke="#000000"))
else:
self.svg.add(self.svg.line((x0, y0), (x1, y1), stroke="#000000"))
def _grid_to_drawing(self, x, y):
xc = (x - self.grid_extent.xmin + 1) * self.BLOCK_SIZE
yc = (y - self.grid_extent.ymin + 1) * self.BLOCK_SIZE
return xc, yc
def _create_drawing(self):
# Drawing size
self.svg_dx = (self.grid_extent.xmax - self.grid_extent.xmin + 2) \
* self.BLOCK_SIZE
self.svg_dy = (self.grid_extent.ymax - self.grid_extent.ymin + 2) \
* self.BLOCK_SIZE
# Create the drawing
self.svg = svgwrite.Drawing(
size=(self.svg_dx, self.svg_dy), profile="full", debug=False
)
def _get_tile_color(self, tile_name, tile):
tile_type = tile["type"]
# Match
if self.tile_colormap is not None:
for rule in self.tile_colormap:
# Match by tile name
if "name" in rule and re.match(rule["name"], tile_name):
return rule["color"]
# Match by tile type
if "type" in rule and re.match(rule["type"], tile_type):
return rule["color"]
# A default color
return "#C0C0C0"
def _draw_grid(self):
svg_tiles = []
svg_text = []
# Draw tiles
print("Drawing grid...")
for tile_name, tile in progressbar.progressbar(self.tilegrid.items()):
grid_x = tile["grid_x"]
grid_y = tile["grid_y"]
tile_type = tile["type"]
if self.grid_roi:
if grid_x < self.grid_roi[0] or grid_x > self.grid_roi[2]:
continue
if grid_y < self.grid_roi[1] or grid_y > self.grid_roi[3]:
continue
xc, yc = self._grid_to_drawing(grid_x, grid_y)
color = self._get_tile_color(tile_name, tile)
if color is None:
continue
font_size = self.BLOCK_RECT / 10
# Rectangle
svg_tiles.append(
self.svg.rect(
(
xc - self.BLOCK_RECT / 2,
(self.svg_dy - 1 - yc) - self.BLOCK_RECT / 2
), (self.BLOCK_RECT, self.BLOCK_RECT),
stroke="#C0C0C0",
fill=color
)
)
if grid_x & 1:
text_ofs = -font_size
else:
text_ofs = font_size
# Tile name
svg_text.append(
self.svg.text(
tile_name, (
xc - self.BLOCK_RECT / 2 + 2,
(self.svg_dy - 1 - yc) - font_size / 2 + text_ofs
),
font_size=font_size
)
)
# Tile type
svg_text.append(
self.svg.text(
tile_type, (
xc - self.BLOCK_RECT / 2 + 2,
(self.svg_dy - 1 - yc) + font_size / 2 + text_ofs
),
font_size=font_size
)
)
# Index
svg_text.append(
self.svg.text(
"X%dY%d" % (grid_x, grid_y), (
xc - self.BLOCK_RECT / 2 + 2,
(self.svg_dy - 1 - yc) + self.BLOCK_RECT / 2 - 2
),
font_size=font_size
)
)
# Add tiles to SVG
for item in svg_tiles:
self.svg.add(item)
# Add text to SVG
for item in svg_text:
self.svg.add(item)
def _draw_connections(self):
# Draw connections
print("Drawing connections...")
for conn, count in progressbar.progressbar(self.connections.items()):
if self.conn_roi:
if conn.loc0.x < self.grid_roi[0] or \
conn.loc0.x > self.grid_roi[2]:
if conn.loc1.x < self.grid_roi[0] or \
conn.loc1.x > self.grid_roi[2]:
continue
if conn.loc0.y < self.grid_roi[1] or \
conn.loc0.y > self.grid_roi[3]:
if conn.loc1.y < self.grid_roi[1] or \
conn.loc1.y > self.grid_roi[3]:
continue
dx = conn.loc1.x - conn.loc0.x
dy = conn.loc1.y - conn.loc0.y
xc0, yc0 = self._grid_to_drawing(conn.loc0.x, conn.loc0.y)
xc1, yc1 = self._grid_to_drawing(conn.loc1.x, conn.loc1.y)
max_count = int(self.BLOCK_RECT * 0.75 * 0.5)
line_count = min(count, max_count)
# Mostly horizontal
if abs(dx) > abs(dy):
for i in range(line_count):
k = 0.5 if line_count == 1 else i / (line_count - 1)
k = (k - 0.5) * 0.75
if dx > 0:
x0 = xc0 + self.BLOCK_RECT / 2
x1 = xc1 - self.BLOCK_RECT / 2
else:
x0 = xc0 - self.BLOCK_RECT / 2
x1 = xc1 + self.BLOCK_RECT / 2
y0 = yc0 + k * self.BLOCK_RECT
y1 = yc1 + k * self.BLOCK_RECT
self._draw_connection(
x0, (self.svg_dy - 1 - y0), x1, (self.svg_dy - 1 - y1),
True
)
# Mostly vertical
elif abs(dy) > abs(dx):
for i in range(line_count):
k = 0.5 if line_count == 1 else i / (line_count - 1)
k = (k - 0.5) * 0.75
if dy > 0:
y0 = yc0 + self.BLOCK_RECT / 2
y1 = yc1 - self.BLOCK_RECT / 2
else:
y0 = yc0 - self.BLOCK_RECT / 2
y1 = yc1 + self.BLOCK_RECT / 2
x0 = xc0 + k * self.BLOCK_RECT
x1 = xc1 + k * self.BLOCK_RECT
self._draw_connection(
x0, (self.svg_dy - 1 - y0), x1, (self.svg_dy - 1 - y1),
True
)
# Diagonal
else:
# FIXME: Do it in a more elegant way...
max_count = int(max_count * 0.40)
line_count = min(count, max_count)
for i in range(line_count):
k = 0.5 if line_count == 1 else i / (line_count - 1)
k = (k - 0.5) * 0.25
if (dx > 0) ^ (dy > 0):
x0 = xc0 + k * self.BLOCK_RECT
x1 = xc1 + k * self.BLOCK_RECT
y0 = yc0 + k * self.BLOCK_RECT
y1 = yc1 + k * self.BLOCK_RECT
else:
x0 = xc0 - k * self.BLOCK_RECT
x1 = xc1 - k * self.BLOCK_RECT
y0 = yc0 + k * self.BLOCK_RECT
y1 = yc1 + k * self.BLOCK_RECT
x0 += dx * self.BLOCK_RECT / 4
y0 += dy * self.BLOCK_RECT / 4
x1 -= dx * self.BLOCK_RECT / 4
y1 -= dy * self.BLOCK_RECT / 4
self._draw_connection(
x0, (self.svg_dy - 1 - y0), x1, (self.svg_dy - 1 - y1),
False
)
def run(self):
if self.grid_roi is not None:
if self.conn_roi is None:
self.conn_roi = self.grid_roi
else:
self.conn_roi[0] = max(self.conn_roi[0], self.grid_roi[0])
self.conn_roi[1] = max(self.conn_roi[1], self.grid_roi[1])
self.conn_roi[2] = min(self.conn_roi[2], self.grid_roi[2])
self.conn_roi[3] = min(self.conn_roi[3], self.grid_roi[3])
self._create_drawing()
self._draw_grid()
self._draw_connections()
def save(self, file_name):
self.svg.saveas(file_name)
# =============================================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--tilegrid",
type=str,
default=None,
help="Project X-Ray 'tilegrid.json' file"
)
parser.add_argument(
"--tileconn",
type=str,
default=None,
help="Project X-Ray 'tileconn.json' file"
)
parser.add_argument(
"--arch-xml",
type=str,
default=None,
help="Architecture definition XML file"
)
parser.add_argument(
"--graph-xml", type=str, default=None, help="Routing graph XML file"
)
parser.add_argument(
'--conn-db',
type=str,
default=None,
help='Connection SQL database (eg. "channels.db")'
)
parser.add_argument(
'--db-table',
type=str,
default="tile",
help='Table name in the SQL database to read (def. "tile")'
)
parser.add_argument(
"--colormap",
type=str,
default=None,
help="JSON file with tile coloring rules"
)
parser.add_argument(
"--grid-roi",
type=int,
nargs=4,
default=None,
help="Grid ROI to draw (x0 y0 x1 y1)"
)
parser.add_argument(
"--conn-roi",
type=int,
nargs=4,
default=None,
help="Connection ROI to draw (x0 y0 x1 y1)"
)
parser.add_argument(
"-o", type=str, default="layout.svg", help="Output SVG file name"
)
if len(sys.argv) <= 1:
parser.print_help()
exit(1)
args = parser.parse_args()
script_path = os.path.dirname(os.path.realpath(__file__))
if args.colormap is None:
args.colormap = os.path.join(script_path, "tile_colormap.json")
# Create the visualizer
visualizer = GridVisualizer()
# Set ROI
visualizer.set_grid_roi(args.grid_roi)
visualizer.set_conn_roi(args.conn_roi)
# Load arch XML file
if args.arch_xml is not None:
visualizer.load_tilegrid_from_arch_xml(args.arch_xml)
# Load routing graph XML
elif args.graph_xml is not None:
visualizer.load_tilegrid_from_graph_xml(args.graph_xml)
# Load JSON files
elif args.tilegrid is not None:
visualizer.load_tilegrid_from_json(args.tilegrid)
if args.tileconn is not None:
visualizer.load_tileconn_from_json(args.tileconn)
# Load SQL database
elif args.conn_db is not None:
visualizer.load_tilegrid_from_conn_db(args.conn_db, args.db_table)
# No data input
else:
raise RuntimeError("No input data specified")
# Load tile colormap
if args.colormap:
visualizer.load_tile_colormap(args.colormap)
# Do the visualization
visualizer.run()
# Save drawing
if args.o.endswith(".svg"):
print("Saving SVG...")
visualizer.save(args.o)
elif args.o.endswith(".pdf"):
print("Saving PDF...")
from cairosvg import svg2pdf
svg2pdf(visualizer.svg.tostring(), write_to=args.o)
else:
print("Unknown output file type '{}'".format(args.o))
exit(-1)
print("Done.")
# =============================================================================
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from contextlib import closing
from StringIO import StringIO
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.goal.goal import Goal
from pants.ivy.bootstrapper import Bootstrapper
from pants.util.contextutil import temporary_dir
from pants_test.base_test import BaseTest
# TODO: Find a better home for this?
def is_exe(name):
result = subprocess.call(['which', name], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
return result == 0
def ensure_cached(task_cls, expected_num_artifacts=None):
"""Decorator for a task-executing unit test. Asserts that after running
the decorated test function, the cache for task_cls contains expected_num_artifacts.
Clears the task's cache before running the test.
:param task_cls: Class of the task to check the artifact cache for. (e.g. JarCreate)
:param expected_num_artifacts: Expected number of artifacts to be in the task's
cache after running the test. If unspecified, will
assert that the number of artifacts in the cache is
non-zero.
"""
def decorator(test_fn):
def wrapper(self, *args, **kwargs):
with temporary_dir() as artifact_cache:
self.set_options_for_scope('cache.{}'.format(self.options_scope),
write_to=artifact_cache)
task_cache = os.path.join(artifact_cache, task_cls.stable_name())
os.mkdir(task_cache)
test_fn(self, *args, **kwargs)
num_artifacts = 0
for (_, _, files) in os.walk(task_cache):
num_artifacts += len(files)
if expected_num_artifacts is None:
self.assertNotEqual(num_artifacts, 0)
else:
self.assertEqual(num_artifacts, expected_num_artifacts)
return wrapper
return decorator
class TaskTestBase(BaseTest):
"""A baseclass useful for testing a single Task type."""
@classmethod
def task_type(cls):
"""Subclasses must return the type of the Task subclass under test."""
raise NotImplementedError()
def setUp(self):
super(TaskTestBase, self).setUp()
self.options_scope = 'test_scope'
self._testing_task_type = self.synthesize_task_subtype(self.task_type(), self.options_scope)
# We locate the workdir below the pants_workdir, which BaseTest locates within the BuildRoot.
# BaseTest cleans this up, so we don't need to. We give it a stable name, so that we can
# use artifact caching to speed up tests.
self._test_workdir = os.path.join(self.pants_workdir, self.task_type().stable_name())
os.mkdir(self._test_workdir)
# TODO: Push this down to JVM-related tests only? Seems wrong to have an ivy-specific
# action in this non-JVM-specific, high-level base class.
Bootstrapper.reset_instance()
@property
def test_workdir(self):
return self._test_workdir
def synthesize_task_subtype(self, task_type, options_scope):
"""Creates a synthetic subclass of the task type.
Note that passing in a stable options scope will speed up some tests, as the scope may appear
in the paths of tools used by the task, and if these are stable, tests can get artifact
cache hits when bootstrapping these tools. This doesn't hurt test isolation, as we reset
class-level state between each test.
# TODO: Use the task type directly once we re-do the Task lifecycle.
:param task_type: The task type to subtype.
:param options_scope: The scope to give options on the generated task type.
:return: A pair (type, options_scope)
"""
subclass_name = b'test_{0}_{1}'.format(task_type.__name__, options_scope)
return type(subclass_name, (task_type,), {'_stable_name': task_type._compute_stable_name(),
'options_scope': options_scope})
def set_options(self, **kwargs):
self.set_options_for_scope(self.options_scope, **kwargs)
def context(self, for_task_types=None, options=None, passthru_args=None, target_roots=None,
console_outstream=None, workspace=None):
# Add in our task type.
for_task_types = [self._testing_task_type] + (for_task_types or [])
return super(TaskTestBase, self).context(for_task_types=for_task_types,
options=options,
passthru_args=passthru_args,
target_roots=target_roots,
console_outstream=console_outstream,
workspace=workspace)
def create_task(self, context, workdir=None):
return self._testing_task_type(context, workdir or self._test_workdir)
class ConsoleTaskTestBase(TaskTestBase):
"""A base class useful for testing ConsoleTasks."""
def setUp(self):
Goal.clear()
super(ConsoleTaskTestBase, self).setUp()
task_type = self.task_type()
assert issubclass(task_type, ConsoleTask), \
'task_type() must return a ConsoleTask subclass, got %s' % task_type
def execute_task(self, targets=None, options=None):
"""Creates a new task and executes it with the given config, command line args and targets.
:param targets: Optional list of Target objects passed on the command line.
Returns the text output of the task.
"""
options = options or {}
with closing(StringIO()) as output:
self.set_options(**options)
context = self.context(target_roots=targets, console_outstream=output)
task = self.create_task(context)
task.execute()
return output.getvalue()
def execute_console_task(self, targets=None, extra_targets=None, options=None, passthru_args=None, workspace=None):
"""Creates a new task and executes it with the given config, command line args and targets.
:param options: option values.
:param targets: optional list of Target objects passed on the command line.
:param extra_targets: optional list of extra targets in the context in addition to those
passed on the command line.
:param passthru_args: optional list of passthru_args
:param workspace: optional Workspace to pass into the context.
Returns the list of items returned from invoking the console task's console_output method.
"""
options = options or {}
self.set_options(**options)
context = self.context(target_roots=targets, passthru_args=passthru_args, workspace=workspace)
return self.execute_console_task_given_context(context, extra_targets=extra_targets)
def execute_console_task_given_context(self, context, extra_targets=None):
"""Creates a new task and executes it with the context and extra targets.
:param context: The pants run context to use.
:param extra_targets: An optional list of extra targets in the context in addition to those
passed on the command line.
:returns: The list of items returned from invoking the console task's console_output method.
:rtype: list of strings
"""
task = self.create_task(context)
return list(task.console_output(list(task.context.targets()) + list(extra_targets or ())))
def assert_entries(self, sep, *output, **kwargs):
"""Verifies the expected output text is flushed by the console task under test.
NB: order of entries is not tested, just presence.
sep: the expected output separator.
*output: the output entries expected between the separators
**options: additional options passed to execute_task.
"""
# We expect each output line to be suffixed with the separator, so for , and [1,2,3] we expect:
# '1,2,3,' - splitting this by the separator we should get ['1', '2', '3', ''] - always an extra
# empty string if the separator is properly always a suffix and not applied just between
# entries.
self.assertEqual(sorted(list(output) + ['']), sorted((self.execute_task(**kwargs)).split(sep)))
def assert_console_output(self, *output, **kwargs):
"""Verifies the expected output entries are emitted by the console task under test.
NB: order of entries is not tested, just presence.
*output: the expected output entries
**kwargs: additional kwargs passed to execute_console_task.
"""
self.assertEqual(sorted(output), sorted(self.execute_console_task(**kwargs)))
def assert_console_output_contains(self, output, **kwargs):
"""Verifies the expected output string is emitted by the console task under test.
output: the expected output entry(ies)
**kwargs: additional kwargs passed to execute_console_task.
"""
self.assertIn(output, self.execute_console_task(**kwargs))
def assert_console_output_ordered(self, *output, **kwargs):
"""Verifies the expected output entries are emitted by the console task under test.
NB: order of entries is tested.
*output: the expected output entries in expected order
**kwargs: additional kwargs passed to execute_console_task.
"""
self.assertEqual(list(output), self.execute_console_task(**kwargs))
def assert_console_raises(self, exception, **kwargs):
"""Verifies the expected exception is raised by the console task under test.
**kwargs: additional kwargs are passed to execute_console_task.
"""
with self.assertRaises(exception):
self.execute_console_task(**kwargs)
|
|
import os
import sys
import time
import inspect
import filecmp
import util
import dbg
import services
import blobs
import random, string
from metasyncAPI import MetaSync
from mapping import DetMap, DetMap2
from params import *
# inspect doesn't respect the defined order
def __to_line(m):
try:
return m[1].func_code.co_firstlineno
except AttributeError:
return -1
def get_all_tests():
if not hasattr(get_all_tests, "tests"):
tests = []
for (n, f) in inspect.getmembers(sys.modules[__name__]):
if "test_" in n:
tests.append((n[5:], f))
get_all_tests.tests = sorted(tests, key=__to_line)
return get_all_tests.tests
def _init_disk_metasync(metasync, opts, nbackend=3, nreplicas=2, encrypt_key=None):
# use encrypt_key from cmd args
if encrypt_key is None:
encrypt_key = opts.encrypt_key
# see. services api (DiskAPI)
services = ["disk@%s/s%d" % (opts.tmpdir, i) for i in range(nbackend)]
metasync.cmd_init("testing", services, nreplicas, encrypt_key)
#
# NOTE. test cases, starting with "_test" prefix
#
def test_help(metasync, opts):
"print out help string for sub tests"
opts.notree = True
print "> List of subcommands"
for (n, f) in get_all_tests():
print " %-20s: %s" % (n, f.__doc__)
def test_all(metasync, opts):
"run all test cases"
opts.notree = True
for (n, f) in get_all_tests():
if n == "all" or n.startswith("bench_"):
continue
dbg.info("#R<testing %s#> (%s)" % (n, f.__doc__))
f(metasync, opts)
def test_path(metasync, _):
"test path constraints"
assert metasync.check_sanity() is False
assert metasync.path_meta.endswith("/.metasync")
assert metasync.path_conf.endswith("/.metasync/config")
assert metasync.path_objs.endswith("/.metasync/objects")
def test_init(metasync, opts):
"test inititation"
_init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)
# create/commit some files
file_sizes = [1024, 2048]
if opts.slow:
# bigger one that splits by blob
MB = 1024*1024
file_sizes.append(33 * MB)
for size in file_sizes:
pn = os.path.join(opts.root, "file-%s" % size)
util.create_random_file(pn, size)
metasync.cmd_checkin(pn)
metasync.cmd_push()
root = metasync.get_root_blob()
assert len(root.entries) == len(file_sizes)
def test_encryption(metasync, opts):
"test encryption layer"
# set a encryption key
_init_disk_metasync(metasync, opts, 3, 2, "testkey")
import translators
tr = translators.TrEncrypt(metasync)
assert tr.get(tr.put(test_encryption.__doc__)) == test_encryption.__doc__
def test_clone(metasync, opts, need_init=True):
"test cloning, after init"
if need_init:
test_init(metasync, opts)
dst = os.path.join(opts.tmpdir, "repo_clone")
util.mkdirs(dst)
# pick first backend
srv = metasync.config.get("backend", "services").split(",")[0]
clone = MetaSync(dst)
clone.cmd_clone("testing", srv, opts.encrypt_key)
# compare file side-by-side
for root, dirs, files in os.walk(clone.path_root):
for name in files:
dst = os.path.join(root, name)
src = metasync.path_root + dst[len(clone.path_root):]
try:
if not filecmp.cmp(dst, src):
assert dst.endswith("config")
except OSError as e:
assert name.startswith("head") or name.startswith("prev")
return clone
def test_checkin_dir(metasync, opts):
"test checkin with directory"
test_init(metasync, opts)
dst = os.path.join(metasync.path_root, "a/b")
util.mkdirs(dst)
pn = os.path.join(dst, "test-1024")
util.create_random_file(pn, 1024)
dst = os.path.join(metasync.path_root, "a")
metasync.cmd_checkin(dst)
metasync.cmd_push()
test_clone(metasync, opts, False)
def test_checkin_samefile(metasync, opts):
"test checkin one file twice"
test_init(metasync, opts)
metasync.cmd_checkin(os.path.join(metasync.path_root, "file-1024"))
def test_uptodate_master(metasync, opts):
"check uptodate master"
#XXX not yet done
clone = test_clone(metasync, opts)
assert metasync.get_next_version() == 2
assert clone.get_next_version() == 2
assert metasync.get_uptodate_master() != None
file_sizes = [1024, 2048]
for size in file_sizes:
pn = os.path.join(clone.path_root, "file-%s-2" % size)
util.create_random_file(pn, size)
clone.cmd_checkin(pn)
clone.cmd_push()
master = metasync.get_uptodate_master()
metasync.cmd_fetch()
metasync.cmd_update()
assert master == metasync.get_prev_value()
def test_fetch(metasync, opts):
"test fetching"
clone = test_clone(metasync, opts)
file_sizes = [1024, 2048]
for size in file_sizes:
pn = os.path.join(clone.path_root, "file-%s-2" % size)
util.create_random_file(pn, size)
clone.cmd_checkin(pn)
pn = os.path.join(clone.path_root, "dir1")
util.mkdirs(pn)
clone.cmd_checkin(pn)
pn = os.path.join(clone.path_root, "dir2")
util.mkdirs(pn)
pn = os.path.join(clone.path_root, "dir2", "file-1024")
util.create_random_file(pn, 1024)
pn = os.path.join(clone.path_root, "dir2")
clone.cmd_checkin(pn)
clone.cmd_push()
root2 = clone.get_root_blob()
metasync.cmd_fetch()
metasync.cmd_update()
root = metasync.get_root_blob()
cnt = 0
for i in root.walk():
cnt += 1
assert cnt == 7
# XXX. what to assert?
def test_rm(metasync, opts):
"test rm file"
_init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)
# create/commit some files
size = 512
for i in range(5):
pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
util.create_random_file(pn, size)
metasync.cmd_checkin(pn)
pn = os.path.join(opts.root, "a/b")
util.mkdirs(pn)
metasync.cmd_checkin(pn)
metasync.cmd_push()
pn = os.path.join(opts.root, "a/b/e")
util.mkdirs(pn)
# try to remove non-exist directory
pn = os.path.join(opts.root, "a/b/c/d")
assert not metasync.cmd_rm(pn)
pn = os.path.join(opts.root, "a/b/e/f")
assert not metasync.cmd_rm(pn)
# try to remove non-exist file
for i in range(3):
pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
metasync.cmd_rm(pn)
assert not os.path.exists(pn)
metasync.cmd_rm(os.path.join(opts.root,"a/b"))
metasync.cmd_push()
# TODO. fire gc
# check blobs
def test_mapping(metasync, opts):
"test mapping strategies"
opts.notree = True
m = DetMap([(1, 2*GB), (2, 5*GB), (3, 3*GB)])
def __do_check(m):
assert len([n for n in m.distrib if n == 1]) == len(m.distrib)/10*2
assert len([n for n in m.distrib if n == 2]) == len(m.distrib)/10*5
assert len([n for n in m.distrib if n == 3]) == len(m.distrib)/10*3
copyset = set()
for i in range(len(m.distrib)):
c = m.get_mapping(i)
assert len(c) == m.replica
copyset.add(tuple(c))
assert len(copyset) < len(m.distrib)
dbg.info("mapping: %s ..." % str(m.distrib)[:30])
dbg.info("copyset: %s" % str(list(copyset)))
__do_check(m)
__do_check(DetMap.restore(m.store()))
# small_test
m = DetMap([(2, 2*GB), (1, 5*GB)])
assert m.mapinfo[0].config[0][0] == 1
def test_mapping_reconfig(metasync, opts):
"test reconfiguration/versioning of mapping strategies"
opts.notree = True
m = DetMap([(1, 2*GB), (2, 5*GB), (3, 2*GB)])
m.reconfig([(1, 2*GB), (2, 5*GB)])
# two version of mapping info
assert len(m.mapinfo) == 2
for (ver, conf) in m.mapinfo.iteritems():
dbg.info("%s -> %s" % (ver, conf.store()))
# where to remap of hash value: 0
# 0: [1,2] -> [1,2]
# 1: [2,3] -> [2,1]
assert m.get_remmaping(0) == []
assert m.get_remmaping(1) == [1]
def test_mapping2(metasync, opts):
"test a new mapping scheme to tolerate rebalancing on node-failre"
from itertools import permutations
# goal
# 1. non-uniformly locating blobs, approximately reflecting storage size of each node
# 2. minimize realigning on a node failure
# design
# 0. node -> (node, storage)
# (e.g., (1,0), (1,1) if node 1 has 2G storage)
# 1. fixed hspace, where h(blob) % hspace = index
# (hspace any large number, NOT depending on len(nodes))
# 2. detmap[index] -> a group of nodes
# (a group includes all nodes, but different order see 3)
# 3. order nodes in a group, by hash(index, node)
# (so deterministic)
# 4. in each group, pick first #replication nodes
# failure
# node change
# replication
# => in all of above situations, only blobs in old/new node
# will be re-balanced
#
def uniq(lst, n):
rtn = []
for (i, _) in lst:
if i in rtn:
continue
rtn.append(i)
if len(rtn) == n:
break
return rtn
replication = 2
config = [(0,2), (1,4), (2,4), (3,2)]
hspace = 20
detmap = DetMap2(config, hspace, replication)
stats = [0] * len(config)
for (k, v) in enumerate(detmap.mapinfo[0].detmap):
"""
# doesn't it need to calculate over uniq?
for i in range(replication):
stats[v[i][0]] += 1
"""
for i in uniq(v, replication):
stats[i] += 1
if opts.slow:
dbg.info("%-2s: [" % k)
for (n, priority) in v:
dbg.info(" %s: %s" % (n, priority))
dbg.info("]")
else:
dbg.info("%-2s: %s -> %s" \
% (k, [e[0] for e in v], detmap.get_mapping(k)))
# approximately reflect storage?
for (i, k) in enumerate(stats):
dbg.info("%s (%s GB) -> #%s" % (i, config[i][1], k))
def test_map_pack(metasync, opts):
config = [(0,2), (1,10), (2,4), (3,2)]
hspace = 100
replication = 2
N = 50
detmap = DetMap2(config, hspace, replication)
detmap2 = DetMap2(config, hspace, replication)
detmap2.pack()
for _ in range(100):
randstr = ''.join(random.choice(string.letters + string.digits) for _ in range(N))
hashid = util.sha1(randstr)
assert detmap.get_mapping(hashid) == detmap2.get_mapping(hashid)
def test_remapping(metasync, opts):
replication = 2
config = [(0,2), (1,4), (2,4), (3,2)]
hspace = 20
detmap = DetMap2(config, hspace, replication)
N = 50
lst = []
for _ in range(100):
randstr = ''.join(random.choice(string.letters + string.digits) for _ in range(N))
hashid = util.sha1(randstr)
lst.append(hashid)
#lst = detmap.get_mapping(hashid)
#for i in lst:
# count[i] += 1
detmap.reconfig(config, 3)
assert len(detmap.mapinfo) == 2
added, removed = detmap.get_remapping(lst)
for i in removed:
assert len(removed[i]) == 0
import copy
detmap = DetMap2(config, hspace, replication)
config = copy.copy(config)
config.pop()
lst3 = []
for hv in lst:
if 3 in detmap.get_mapping(hv):
lst3.append(hv)
detmap.reconfig(config)
added, removed = detmap.get_remapping(lst)
assert len(removed[3]) == len(lst3)
def test_paxos_latency(metasync, opts):
lock = "locktest/ltest_latency"
import services
srvs = ["onedrive"]
srvs_instance = map(services.factory, srvs)
for srv in srvs_instance:
if not srv.exists(lock):
srv.put(lock, '')
from paxos import Proposer
proposer = Proposer("1", srvs_instance, lock)
val = proposer.propose("1")
assert val == "1"
proposer.join()
def test_paxos(metasync, opts):
"test paxos with disk_api"
lock = 'locktest/ltest'
test_init(metasync, opts)
srvs = metasync.services
for srv in srvs:
if not srv.exists(lock):
srv.put(lock, '')
from paxos import Proposer
proposer = Proposer("1", srvs, lock)
val = proposer.propose("1")
assert val == "1"
proposer.join()
def test_paxos_services(metasync, opts):
"test paxos with services"
# init tmp repo to play with
#test_init(metasync, opts)
# init lock primitives
lock = 'locktest/ltest2'
targets = ["google", "box", "dropbox"]
srvs = map(services.factory, targets)
for srv in srvs:
if not srv.exists(lock):
srv.put(lock, '')
from paxos import Proposer
proposer = Proposer("1", srvs, lock)
val = proposer.propose("1")
assert val == "1"
proposer.join()
# XXX. invoke python threads or async
# - srv.sid()
# - metasync.services
# - metasync.lockpath
# proposer = Proposer(srv, metasync)
# proposer.propose()
#
# XXX. check proposed one?
def test_gc(metasync, opts):
"test garbage collector"
test_init(metasync, opts)
# 1. mark and sweep
root = metasync.get_root_blob()
print root
# 2. due to reconfigure
# - if version is different
# - check all blos in each service
def test_blob_diff(metasync, opts):
"test blob diff operation"
test_init(metasync, opts)
bs = blobs.BlobStore2(metasync)
blob_dir = blobs.BlobDir2(bs)
blob_dir.add("file", blobs.BlobFile2(bs))
blob_dir_sub = blobs.BlobDir2(bs)
blob_dir.add("dir1", blob_dir_sub)
blob_dir_sub.add("file2", blobs.BlobFile2(bs))
blob_dir2 = blobs.BlobDir2(bs)
blob_dir2.add("file", blobs.BlobFile2(bs))
blob_dir.diff(blob_dir2)
def test_large_blob(metasync, opts):
test_init(metasync, opts)
bs = blobs.BlobStore2(metasync)
blob_dir = blobs.MBlobDir2(bs)
pn = os.path.join(opts.root, "a")
with open(pn, "w") as f:
f.write("hello world")
blob_dir.add_file("a", pn)
pn = os.path.join(opts.root, "b")
with open(pn, "w") as f:
f.write("hello world2")
pn = os.path.join(opts.root, "b")
blob_dir.add_file("b", pn)
blob_dir.store()
def test_blob(metasync, opts):
"test blob-related operations"
test_init(metasync, opts)
bs = blobs.BlobStore2(metasync)
blob_dir = blobs.BlobDir2(bs)
# empty dir
assert blob_dir.hv is not None \
and len(blob_dir.entries) == 0
# add three
hv0 = blob_dir.hv
blob_dir.add("dir1", blobs.BlobDir2(bs))
blob_dir.add("dir2", blobs.BlobDir2(bs))
blob_dir.add("dir3", blobs.BlobDir2(bs))
blob_dir.add("file", blobs.BlobFile2(bs))
hv3 = blob_dir.hv
assert hv0 != hv3 \
and len(blob_dir.entries) == 4
for (name, blob) in blob_dir.entries.iteritems():
# empty dir
if isinstance(blob, blobs.BlobDir2):
assert blob.hv == hv0
# empty file
if isinstance(blob, blobs.BlobFile2):
assert blob.hv != hv0
# delete one
blob_dir.rm("dir2")
hv2 = blob_dir.hv
assert hv3 != hv2 \
and len(blob_dir.entries) == 3
dbg.dbg("hv: %s\n%s" % (hv2, blob_dir.dump()))
# test store/load
blob_dir.store()
# loaded from disk
loaded_blob = blobs.BlobDir2(bs, hv2)
assert loaded_blob.dump() == blob_dir.dump()
def test_blob_file(metasync, opts):
"test blobfile-related operations"
test_init(metasync, opts)
bs = blobs.BlobStore2(metasync)
blob_file = blobs.BlobFile2(bs)
# empty file
assert blob_file.hv is not None \
and len(blob_file.entries) == 0
# random file with 3 chunks (last one is smaller than unit)
unit = 1*MB
size = 3*MB - 2*KB
pn = os.path.join(opts.tmpdir, "file-%s" % size)
util.create_random_file(pn, size)
# store each chunk to blob_file
blob_file = bs.load_file(pn, unit)
# check entries and total size
assert len(blob_file.entries) == 3 and blob_file.size == size
# test store/load
blob_file.store()
# loaded from disk
loaded_blob = blobs.BlobFile2(bs, blob_file.hv)
assert loaded_blob.dump() == blob_file.dump()
def test_blob_walk(metasync, opts):
"test creating/walking a blob dir"
opts.notree = True
bs = blobs.BlobStore2(metasync)
root = blobs.BlobDir2(bs)
# generate sample tree
for i in range(1, 3):
parent_dir = blobs.BlobDir2(bs)
root.add("dir-%s" % i, parent_dir)
for j in range(1, 4):
child_dir = blobs.BlobDir2(bs)
parent_dir.add("sub-%s" % j, child_dir)
for k in range(1, 5):
blob_file = blobs.BlobFile2(bs)
child_dir.add("file-%s" % k, blob_file)
# count all entries
cnt = 0
for (name, blob) in root.walk():
dbg.dbg("%-18s: %s" % (name, blob.hv))
cnt += 1
assert cnt == 2*3*4 + 2*3 + 2
def test_blob_load(metasync, opts):
"test loading file/dir from a path"
_init_disk_metasync(metasync, opts)
bs = blobs.BlobStore2(metasync)
# /a/b/c
dirp = metasync.get_local_path("a", "b", "c")
util.mkdirs(dirp)
# /a/b/c/file
pn = os.path.join(dirp, "file")
util.create_random_file(pn, 5*KB)
blob = bs.load_dir(dirp)
blob.add("file", bs.load_file(pn))
# count how many blobs
root = bs.get_root_blob()
dbg.dbg("%-15s: %s" % ("/", root.hv))
cnt = 0
for (name, blob) in bs.walk():
dbg.dbg("%-15s: %s" % (name, blob.hv))
cnt += 1
assert cnt == len(["a", "b", "c", "file"])
# flush all new blobs
assert len(os.listdir(metasync.path_objs)) == 0
root.store()
assert len(os.listdir(metasync.path_objs)) == 6
# "." => root
test_blob = bs.load_dir(metasync.get_local_path("."))
assert test_blob == root
test_blob = bs.load_dir(metasync.get_local_path(""))
assert test_blob == root
def test_bstore_iterate(metasync, opts):
"walk over all files in a service, and check if correctly distributed"
test_init(metasync, opts)
hashes = set()
for srv in metasync.services:
for hv in metasync.bstore_iter_remote(srv):
dbg.info("%-10s: %s" % (srv, hv))
hashes.add(hv)
# covered by local's bstore?
for hv in metasync.bstore_iter():
hashes.remove(hv)
assert len(hashes) == 0
def test_bstore_reconfig(metasync, opts):
"rebalancing all blobs when conf changes"
test_init(metasync, opts)
dbg.info("old config: %s" % metasync.mapping)
# all benchmarks
def test_bench_upload(metasync, opts):
"bencmark upload speed of storage services"
# bump files
tmpdir = os.path.join(opts.tmpdir, "metasync-files")
sizes = [1024, 2048, 1*MB]
files = []
# for real bench
if opts.slow:
sizes = [10*MB, 100*MB]
util.mkdirs(tmpdir)
for size in sizes:
fn = "file-%s" % size
pn = os.path.join(tmpdir, fn)
if not os.path.exists(pn):
util.create_random_file(pn, size)
files.append(fn)
# try uploading each file
result = [["Services"] + files]
for cls in services.all_services:
if cls in [services.DiskAPI]:
continue
if opt.slow and cls in [services.BaiduAPI]:
continue
row = [services.slug(cls)]
srv = cls()
print 'uploading:', row[0]
if srv.exists('/upload_test'):
srv.rmdir('/upload_test')
srv.putdir('/upload_test')
for f in files:
#if row[0] == 'baidu' and f == 'file-104857600':
# continue
content = open(os.path.join(tmpdir, f), 'r').read()
beg = time.time()
srv.put('/upload_test/' + f, content)
end = time.time()
row.append(end - beg)
result.append(row)
# tabularize
for row in result:
for e in row:
print "%s\t" % e,
print
def test_bench_download(metasync, opts):
"bencmark upload speed of storage services"
# bump files
sizes = [1024, 2048, 1*MB]
files = []
# for real bench
if opts.slow:
sizes = [10*MB, 100*MB]
for size in sizes:
fn = "file-%s" % size
files.append(fn)
# try downloading each file
result = [["Services"] + files]
for cls in services.all_services:
if cls in [services.DiskAPI]:
continue
if opt.slow and cls in [services.BaiduAPI]:
continue
row = [services.slug(cls)]
srv = cls()
print 'downloading:', row[0]
if not srv.exists('/upload_test'):
print 'Testing files no longer exist in %s' % row[0]
return
for f in files:
#if row[0] == 'baidu' and f == 'file-104857600':
# continue
beg = time.time()
srv.get('/upload_test/' + f)
end = time.time()
row.append(end - beg)
result.append(row)
# tabularize
for row in result:
for e in row:
print "%s\t" % e,
print
def test_concurrent_upload(metasync, opts):
def _put(srv, path, remote_path):
with open(path, "rb") as f:
srv.put(remote_path, f.read())
# bump files
tmpdir = os.path.join(opts.tmpdir, "metasync-files")
sizes = [1024, 2048, 4192, 8192, 1*MB]
files = []
total_size = 1*MB
print tmpdir
util.mkdirs(tmpdir)
for size in sizes:
count = total_size / size
fl = []
for i in range(count):
fn = "file-%s-%s" % (size, i)
pn = os.path.join(tmpdir, fn)
if not os.path.exists(pn):
util.create_random_file(pn, size)
fl.append(fn)
files.append(fl)
from metasyncAPI import Worker, ThreadPool
from multiprocessing import cpu_count
pool = ThreadPool(cpu_count())
# try uploading each file
result = [["Services"] + files]
for cls in services.all_services:
if cls in [services.DiskAPI]:
continue
row = [services.slug(cls)]
srv = cls()
if srv.exists('/concurrent_upload'):
srv.rmdir('/concurrent_upload')
srv.putdir('/concurrent_upload')
print 'uploading:', row[0]
for fl in files:
beg = time.time()
for f in fl:
path = os.path.join(tmpdir, f)
remote_path = '/concurrent_upload/%s' % f
pool.submit(srv.copy, _put, path, remote_path)
pool.join()
end = time.time()
row.append(end - beg)
result.append(row)
# tabularize
for row in result:
for e in row:
print "%s\t" % e,
print
def test_service_auth(metasync, opts):
dropbox = services.factory('dropbox')
google = services.factory('google')
box = services.factory('box')
def test_lock(metasync, opts):
clone = test_clone(metasync, opts)
assert metasync.lock_master()
assert not clone.lock_master()
metasync.unlock_master()
assert clone.lock_master()
clone.unlock_master()
def test_util(metasync, opts):
"test functions in util.py"
opts.notree = True
rtn = ["a", "a/b", "a/b/c"]
for (crumb, name) in util.iter_path_crumb("./a/b/c/"):
assert crumb == rtn.pop(0)
def test_merge(metasync, opts):
clone = test_clone(metasync, opts)
new_files = [3072, 4096]
metasyncs = [metasync, clone]
for i in range(2):
dbg.info("checkin %d" % i)
pn = os.path.join(metasyncs[i].path_root, "file-%s" % new_files[i])
util.create_random_file(pn, new_files[i])
metasyncs[i].cmd_checkin(pn)
metasync.cmd_push()
clone.cmd_fetch()
assert not clone.cmd_push()
clone.cmd_update()
assert clone.cmd_push()
def test_mv(metasync, opts):
test_init(metasync, opts)
src = os.path.join(metasync.path_root, "file-1024")
dst = os.path.join(metasync.path_root, "file-1024-2")
metasync.cmd_mv(src, dst)
def test_bench_paxos(metasync, opts):
"bencmark latency of paxos with backends"
def new_index(srv, folder, prefix):
if services.slug(srv) == 'onedrive':
folder = '/Public' + folder
if not srv.exists(folder):
return 0
files = srv.listdir(folder)
cnt = 0
for fn in files:
if fn.startswith(prefix):
cnt += 1
return cnt
from paxos import PPaxosWorker
repeat = 5
client_num = [1, 2, 3, 4, 5]
backend_list = [["google"], ["dropbox"], ["onedrive"], ["box"], ["google", "dropbox", "onedrive"]]
results = [['Clients'] + [','.join(x) for x in backend_list]]
# start to test
for num in client_num:
for _ in range(repeat):
row = ['%d clients' % (num)]
for backend in backend_list:
dbg.info('Test paxos for %d clients and %s' % (num, ','.join(backend)))
srvs = map(services.factory, backend)
# init log file
prefix = 'test-%d-%d' % (num , len(backend))
index = new_index(srvs[0], '/ppaxos', prefix)
path = '/ppaxos/%s.%d' % (prefix, index)
dbg.info(path)
for srv in srvs:
srv.init_log(path)
clients = []
for i in range(num):
storages = map(services.factory, backend)
worker = PPaxosWorker(storages, path)
clients.append(worker)
for worker in clients:
worker.start()
latency = []
master_latency = None
for worker in clients:
worker.join()
latency.append(worker.latency)
if (worker.master):
assert master_latency is None
master_latency = worker.latency
for worker in clients:
worker.join()
summary = ",".join(map(str,[min(latency), max(latency), util.median(latency), master_latency]))
dbg.info("Result: %s" % summary)
row.append(summary)
results.append(row)
# tabularize
print "Item Format: min,max,median,master"
for row in results:
for e in row:
print "%s \t" % e,
print
def test_bench_paxos2(metasync, opts):
"bencmark latency of paxos with backends"
def new_index(srv, folder, prefix):
if not srv.exists(folder):
return 0
files = srv.listdir(folder)
cnt = 0
for fn in files:
if fn.startswith(prefix):
cnt += 1
return cnt
from paxos import PPaxosWorker2
repeat = 5
client_num = [1, 2, 3, 4, 5]
backend_list = [["dropbox"], ["onedrive"]]
results = [['Clients'] + [','.join(x) for x in backend_list]]
# start to test
for num in client_num:
for _ in range(repeat):
row = ['%d clients' % (num)]
for backend in backend_list:
dbg.info('Test paxos for %d clients and %s' % (num, ','.join(backend)))
srvs = map(services.factory, backend)
# init log file
prefix = 'test2-%d-%d' % (num , len(backend))
index = new_index(srvs[0], '/ppaxos', prefix)
path = '/ppaxos/%s.%d' % (prefix, index)
dbg.info(path)
for srv in srvs:
srv.init_log2(path)
clients = []
for i in range(num):
storages = map(services.factory, backend)
worker = PPaxosWorker2(storages, path)
clients.append(worker)
for worker in clients:
worker.start()
for worker in clients:
worker.join()
latency = []
master_latency = None
for worker in clients:
latency.append(worker.latency)
if (worker.master):
assert master_latency is None
master_latency = worker.latency
assert master_latency is not None
summary = ",".join(map(str,[min(latency), max(latency), util.median(latency), master_latency]))
dbg.info("Result: %s" % summary)
row.append(summary)
results.append(row)
# tabularize
print "Item Format: min,max,median,master"
for row in results:
for e in row:
print "%s \t" % e,
print
def test_bench_disk_paxos(metasync, opts):
"test disk paxos"
"bencmark latency of paxos with backends"
from disk_paxos import DiskPaxosWorker
repeat = 5
client_num = [1, 2, 3, 4, 5]
backend_list = [["google"], ["dropbox"], ["onedrive"], ["box"], ["google", "dropbox", "onedrive"]]
results = [['Clients'] + [','.join(x) for x in backend_list]]
# start to test
for num in client_num:
for num_prop in range(1, num + 1):
for _ in range(repeat):
row = ['%d/%d clients' % (num_prop, num)]
for backend in backend_list:
srvs = map(services.factory, backend)
dbg.info('Test paxos for %d/%d clients and %s' % (num_prop, num, ','.join(backend)))
# initialize all disk blocks
blockList = []
for i in range(num):
path = '/diskpaxos/client%d' % i
for srv in srvs:
if not srv.exists(path):
srv.put(path, '')
else:
srv.update(path, '')
blockList.append(path)
clients = []
for i in range(num_prop):
storages = map(services.factory, backend)
worker = DiskPaxosWorker(storages, blockList[i], blockList)
clients.append(worker)
#dbg.dbg('client %d %s' % (i, worker.clientid))
for worker in clients:
worker.start()
latency = []
master_latency = None
for worker in clients:
worker.join()
latency.append(worker.latency)
if (worker.master):
assert master_latency is None
master_latency = worker.latency
for worker in clients:
worker.join()
summary = ",".join(map(str,[min(latency), max(latency), util.median(latency), master_latency]))
dbg.info("Result: %s" % summary)
row.append(summary)
results.append(row)
# tabularize
print "Item Format: min,max,median,master"
for row in results:
for e in row:
print "%s \t" % e,
print
def test_sid(metasync, opts):
import services
allset = set()
for srv, doc in services.backends():
if(srv != "disk"):
instance = services.factory(srv)
assert instance is not None
sid = instance.sid()
print(sid, instance)
assert sid is not None
assert sid not in allset
allset.add(sid)
def test_bench_latency(metasync, opts):
import services
allset = set()
path = '%d' % time.time()
with open("/dev/urandom") as ifd:
content = ifd.read(128)
for srv, doc in services.backends():
if(srv != "disk"):
instance = services.factory(srv)
beg = time.time()
instance.put(path,content)
end = time.time()
print(srv + " up " + str(end-beg))
beg = time.time()
ct = instance.get(path)
end = time.time()
print(srv + " dn " + str(end-beg))
def test_mapping_fairness(metasync, opts):
"test the fairness of mapping scheme"
import string
import random
def evaluate(count, config):
N = sum(count)
C = sum(map(lambda x: x[1], config))
score = 0.0
for srv in config:
score += (1.0*count[srv[0]]/srv[1] - 1.0*N/C) ** 2
return score
config = [(0,2), (1,7), (2,10), (3,15)]
nspace = sum(map(lambda x: x[1], config))
result = [['replication', 'factor', 'result', 'fairness', 'score']]
N = 50
random.seed(0)
for replication in range(1, 4):
for factor in range(100, 1001, 100):
hspace = factor * nspace
detmap = DetMap2(config, hspace, replication)
count = [0, 0, 0, 0]
for _ in range(5000):
randstr = ''.join(random.choice(string.letters + string.digits) for _ in range(N))
hashid = util.sha1(randstr)
lst = detmap.get_mapping(hashid)
for i in lst:
count[i] += 1
fairness = [1.0 * count[i] / config[i][1] for i in range(4)]
score = evaluate(count, config)
row = [replication, factor, count, fairness, score]
result.append(row)
for row in result:
for e in row:
print "%s\t" % e,
print
def test_mapping_dist(metasync, opts):
mapping = [("dropbox", 2), ("google", 15), ("box", 10), ("onedrive", 7), ("baidu", 2048)]
mapping = map(lambda x:(util.md5(x[0])%10000,x[1]), mapping)
print(mapping)
hspace = (2+15+10+7+2048)*5
objs = []
with open("result/linux_objs.txt") as f:
for line in f:
sp = line.strip().split("\t")
hv = sp[0]
size = int(sp[1])
objs.append( (hv, size) )
for replication in range(1, 4):
detmap = DetMap2(mapping, hspace, replication)
sizes = {}
counts = {}
for srv, sz in mapping:
sizes[srv] = 0
counts[srv] = 0
for obj in objs:
hv = obj[0]
size = obj[1]
lst = detmap.get_mapping(hv)
for srv in lst:
counts[srv] += 1
sizes[srv] += size
print replication,
for srv, sz in mapping:
print "%d/%d" % (counts[srv],sizes[srv]),
print
|
|
#!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import sys
from os.path import join
from utils_misc import config_writer, mat_type_check
from functools import reduce
from utils_fs import relevant_folders
# Contains configuration setting for training
DATA_FORMAT = 'binary'
def binomial_m_svm_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1]:
icpt = str(i)
reg = '0.01'
tol = '0.0001'
maxiter = 20
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
model = join(train_write + '.' + str(i), 'model.data')
Log = join(train_write + '.' + str(i), 'Log.data')
config = dict(X=X, Y=Y, icpt=icpt, classes=2, reg=reg, tol=tol, maxiter=maxiter,
model=model, Log=Log, fmt=DATA_FORMAT)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def binomial_l2_svm_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1]:
icpt = str(i)
reg = '0.01'
tol = '0.0001'
maxiter = '100'
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
model = join(train_write + '.' + str(i), 'model.data')
Log = join(train_write + '.' + str(i), 'Log.data')
config = dict(X=X, Y=Y, icpt=icpt, reg=reg, tol=tol, maxiter=maxiter, model=model,
Log=Log, fmt=DATA_FORMAT)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def binomial_multilogreg_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
icpt = str(i)
reg = '0.01'
tol = '0.0001'
moi = '100'
mii = '5'
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
config = dict(X=X, Y=Y, icpt=icpt, reg=reg, tol=tol, moi=moi, mii=mii,
B=B)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def clustering_kmeans_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
X = join(datagen_dir, 'X.data')
C = join(train_write, 'C.data')
k = '50'
maxi = '50'
tol = '0.0001'
config = dict(X=X, k=k, maxi=maxi, tol=tol, C=C)
config_writer(save_path + '.json', config)
return [save_path]
def stats1_univar_stats_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
X = join(datagen_dir, 'X.data')
TYPES = join(datagen_dir, 'types')
STATS = join(train_write, 'STATS.data')
config = dict(X=X, TYPES=TYPES, STATS=STATS)
config_writer(save_path + '.json', config)
return [save_path]
def stats1_bivar_stats_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
X = join(datagen_dir, 'X.data')
index1 = join(datagen_dir, 'set1.indices')
index2 = join(datagen_dir, 'set2.indices')
types1 = join(datagen_dir, 'set1.types')
types2 = join(datagen_dir, 'set2.types')
config = dict(X=X, index1=index1, index2=index2, types1=types1, types2=types2, OUTDIR=train_write)
config_writer(save_path + '.json', config)
return [save_path]
def stats2_stratstats_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
X = join(datagen_dir, 'X.data')
Xcid = join(datagen_dir, 'Xcid.data')
Ycid = join(datagen_dir, 'Ycid.data')
O = join(train_write, 'O.data')
config = dict(X=X, Xcid=Xcid, Ycid=Ycid, O=O, fmt=DATA_FORMAT)
config_writer(save_path + '.json', config)
return [save_path]
def multinomial_m_svm_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1]:
icpt = str(i)
reg = '0.01'
tol = '0.0001'
maxiter = '20'
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
model = join(train_write + '.' + str(i), 'model.data')
Log = join(train_write + '.' + str(i), 'Log.data')
config = dict(X=X, Y=Y, icpt=icpt, classes=150, reg=reg, tol=tol, maxiter=maxiter,
model=model, Log=Log, fmt=DATA_FORMAT)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def multinomial_naive_bayes_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
classes = '150'
prior = join(train_write, 'prior')
conditionals = join(train_write, 'conditionals')
accuracy = join(train_write, 'accuracy')
probabilities = join(train_write, 'probabilities')
config = dict(X=X, Y=Y, classes=classes, prior=prior, conditionals=conditionals,
accuracy=accuracy, fmt=DATA_FORMAT, probabilities=probabilities)
config_writer(save_path + '.json', config)
return [save_path]
def multinomial_multilogreg_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
icpt = str(i)
reg = '0.01'
tol = '0.0001'
moi = '100'
mii = '0'
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
config = dict(X=X, Y=Y, B=B, icpt=icpt, reg=reg, tol=tol, moi=moi, mii=mii, fmt=DATA_FORMAT)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def regression1_linearregds_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
icpt = str(i)
reg = '0.01'
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
config = dict(X=X, Y=Y, B=B, icpt=icpt, fmt=DATA_FORMAT, reg=reg)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def regression1_linearregcg_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
icpt = str(i)
reg = '0.01'
tol = '0.0001'
maxi = '20'
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
config = dict(X=X, Y=Y, B=B, icpt=icpt, fmt=DATA_FORMAT, maxi=maxi, tol=tol, reg=reg)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def regression2_glm_gamma_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
icpt = str(i)
fmt = DATA_FORMAT
moi = '200'
mii = '5'
dfam = '1'
vpow = '2.0'
link = '1'
lpow = '0.0'
tol = '0.0001'
reg = '0.01'
config = dict(X=X, Y=Y, B=B, icpt=icpt, fmt=fmt, moi=moi, mii=mii, dfam=dfam,
vpov=vpow, link=link, lpow=lpow, tol=tol, reg=reg)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def regression2_glm_binomial_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
icpt = str(i)
fmt = DATA_FORMAT
moi = '200'
mii = '5'
dfam = '2'
link = '3'
yneg = '2'
tol = '0.0001'
reg = '0.01'
config = dict(X=X, Y=Y, B=B, icpt=icpt, fmt=fmt, moi=moi, mii=mii,
dfam=dfam, link=link, yneg=yneg, tol=tol, reg=reg)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def regression2_glm_poisson_train(save_folder_name, datagen_dir, train_dir, config_dir):
save_path = join(config_dir, save_folder_name)
train_write = join(train_dir, save_folder_name)
data_folders = []
for i in [0, 1, 2]:
X = join(datagen_dir, 'X.data')
Y = join(datagen_dir, 'Y.data')
B = join(train_write + '.' + str(i), 'B.data')
icpt = str(i)
fmt = DATA_FORMAT
moi = '200'
mii = '5'
dfam = '1'
vpov = '1'
link = '1'
lpow = '0'
tol = '0.0001'
reg = '0.01'
config = dict(X=X, Y=Y, B=B, icpt=icpt, fmt=fmt, moi=moi, mii=mii,
dfam=dfam, vpov=vpov, link=link, lpow=lpow, tol=tol, reg=reg)
config_writer(save_path + '.' + str(i) + '.json', config)
data_folders.append(save_path + '.' + str(i))
return data_folders
def config_packets_train(algo_payload, matrix_type, matrix_shape, datagen_dir, train_dir, dense_algos, config_dir):
"""
This function has two responsibilities. Generate the configuration files for
input training algorithms and return a dictionary that will be used for execution.
algo_payload : List of tuples
The first tuple index contains algorithm name and the second index contains
family type.
matrix_type: String
Type of matrix to generate e.g dense, sparse, all
matrix_shape: String
Shape of matrix to generate e.g 100k_10
datagen_dir: String
Path of the data generation directory
train_dir: String
Path of the training directory
dense_algos: List
Algorithms that support only dense matrix type
config_dir: String
Location to store to configuration json file
return: {string: list}
This dictionary contains algorithms to be executed as keys and the path of configuration
json files to be executed list of values.
"""
config_bundle = {}
for current_algo, current_family in algo_payload:
key_name = current_algo + '.' + current_family
config_bundle[key_name] = []
for current_algo, current_family in algo_payload:
current_matrix_type = mat_type_check(current_family, matrix_type, dense_algos)
data_gen_folders = relevant_folders(datagen_dir, current_algo, current_family,
current_matrix_type, matrix_shape, 'data-gen')
if len(data_gen_folders) == 0:
print('datagen folders not present for {}'.format(current_family))
sys.exit()
for current_datagen_dir in data_gen_folders:
file_path_last = current_datagen_dir.split('/')[-1]
save_name = '.'.join([current_algo] + [file_path_last])
algo_func = '_'.join([current_family] + [current_algo.lower().replace('-', '_')]
+ ['train'])
conf_path = globals()[algo_func](save_name, current_datagen_dir, train_dir, config_dir)
key_name = current_algo + '.' + current_family
config_bundle[key_name].append(conf_path)
config_packets = {}
# Flatten
for current_algo, current_family in config_bundle.items():
config_packets[current_algo] = reduce(lambda x, y: x + y, current_family)
return config_packets
|
|
#
import MySQLdb, os, re, json
from functools import *
from tableinfo import *
from sys import argv
from graph import *
from extra import *
from defines import *
import readline
from optparse import OptionParser
usage = """
Usage:
python3 relations.py --source=<database> [--options]
<source> format:
username:password@host[:port]/database
python3 relations.py root:root@localhost/mydb
"""
def fetch_database_info(extra_info, user, password, server, db):
"""
Fetch database info and mixin extra info from json config
"""
host = server
port = 3306
if ':' in server:
host, port = server.split(':')
port = int(port)
db = MySQLdb.connect(host=host, user=user, passwd=password, db=db, port=port, charset="utf8")
print("#Reading database scheme")
ct = db.cursor()
ct.execute("SHOW TABLES")
table_info_list = []
id_table_map = {} # Stores id-field names => tableInfo mapping
for (table,) in ct.fetchall():
ct.execute("SHOW FULL COLUMNS FROM " + table)
fields = ct.fetchall()
table_info = TableInfo(table, fields, extra_info)
id_fields = table_info.get_id_fields()
for id_field_name in id_fields:
if id_field_name not in id_table_map:
id_table_map[id_field_name] = [table_info]
else:
id_table_map[id_field_name].append(table_info)
table_info_list.append(table_info)
ct.close()
return table_info_list, id_table_map, db
def calc_tables_relations(tables, id_table_map):
"""
Calc the tables' relations
"""
for table in tables:
primary_key = table.primary_key[0]
if primary_key not in id_table_map:
continue
follower_tables = id_table_map[primary_key]
for follower_table in follower_tables:
table.add_follower_table(follower_table)
def update_logic_foreign_key(table_info_list, table_info, uncertain_id, keys, extra):
keys = keys.split(',')
for key in keys:
key = key.strip()
table_name, field_name = key.split(".")
if table_name not in map(lambda x: x.table_name, table_info_list):
raise Exception("Table `%s` not found" % red_text(table_name))
this_table_info = list(filter(lambda x: x.table_name==table_name, table_info_list))[0]
if field_name not in this_table_info.id_fields and field_name != this_table_info.primary_key[0]:
raise Exception("Field `%s`.`%s` not found" % (red_text(table_name), red_text(field_name)))
extra.set_virtual_foreign_key(table_info, uncertain_id, table_name, field_name)
extra.update_table_extra_info()
return True
def query_uncertain_id_fields(table_info_list, extra):
"""
"""
for table_info in table_info_list:
id_fields = table_info.get_id_fields()
depends = table_info.depends
if len(id_fields) == len(depends):
continue
depends_ids = list(map(lambda x: x[0], depends.keys()))
uncertain_ids = list(set(id_fields) - set(depends_ids))
if len(uncertain_ids) == 0:
continue
index = 0
while index < len(uncertain_ids):
uncertain_id = uncertain_ids[index]
try:
print("Could you point out `%s`.`%s` corresponds to which primary key?"
% (green_text(table_info.table_name), green_text(uncertain_id)))
keys = input('')
if len(keys) > 0 and '.' in keys:
if update_logic_foreign_key(table_info_list, table_info, uncertain_id, keys, extra):
index += 1
elif keys == 'i':
# Ignore it this time
index += 1
elif keys == 'n':
# It's not an Id.
index += 1
elif keys == 'e':
# The fields means an id from extra system
extra.set_virtual_foreign_key(table_info, uncertain_id, '', '')
extra.update_table_extra_info()
index += 1
except Exception as e:
print(e)
# show all tables' followers and depends
def print_relations(results):
for table in results:
print(table)
for f in table.followers:
print("\t", f)
# print("\t", '-' * 30)
# for d in table.depends:
# print("\t", d)
print("=" * 40, end='\n\n')
def init_graph_from_relations(results):
graph = Graph()
for table in results:
graph.add_vertex(table.table_name, table)
for table in results:
for follow in table.followers:
graph.add_edge(table.table_name, follow.table_name)
return graph
def plot(graph, filename="social_network.png"):
from igraph import plot
layout = graph.layout("circle")
visual_style = dict()
visual_style["vertex_size"] = 20
visual_style["vertex_label_size"] = 30
visual_style["vertex_label_dist"] = 2
visual_style["vertex_color"] = "white"
visual_style["vertex_label_color"] = "blue"
visual_style["vertex_label"] = graph.vs["name"]
visual_style["edge_width"] = 2
visual_style["layout"] = layout
visual_style["bbox"] = (1200, 1000)
visual_style["margin"] = 100
plot(graph, filename, **visual_style)
def calc_database_table_relations(db_args):
extra = ExtraTableInfo(db_args[3])
extra_info = extra.load_table_extra_info()
table_info_list, id_table_map, db = fetch_database_info(extra_info, *db_args)
calc_tables_relations(table_info_list, id_table_map)
return table_info_list, extra
def main(options, other_args):
# For local test
u = re.compile("(.*):(.*)@(.*)/(.*)")
a = u.match(options.source)
db_args = a.groups()
table_info_list, extra = calc_database_table_relations(db_args)
print("Press [i] to ignore this time, [n] means not an id(key), [e] means an id from an external system.")
print("")
try:
query_uncertain_id_fields(table_info_list, extra)
except KeyboardInterrupt as e:
print('Ignore all uncertain foreign keys')
table_info_list, extra = calc_database_table_relations(db_args)
if options.graph:
graph = init_graph_from_relations(table_info_list)
plot(graph, options.graph)
if options.way:
begin_point, end_point = options.way.split(',')
paths = graph.all_paths(begin_point, end_point)
count = 1
for path in paths:
print('-' * 5, "Way %d" % count, '-' * 5)
graph.prints(path)
count += 1
#
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-s", "--source", action="store", dest="source", help="Provide source database")
parser.add_option("-g", "--graph", action="store", dest="graph", help="Render the relations in a graph")
parser.add_option("-w", "--way", action="store", dest="way", help="Provide a way from a begin point to the end point")
options, args = parser.parse_args()
main(options, argv[2:])
|
|
import copy
from core.expression import Symbol, Matrix, \
Equal, Plus, Minus, Times, Transpose, Inverse, \
BlockedExpression, Sequence, Predicate, \
PatternDot
from core.builtin_operands import Zero
from core.functional import replace, replace_all, RewriteRule, Replacement, Constraint
from core.rules_collection_base import canonical_rules, \
simplify_rules_base
import core.properties as properties
from core.TOS import _TOE as TOE
def to_canonical( expr ):
return replace_all( expr, canonical_rules+simplify_rules_base )
#
# An expression is an input expression if:
# - is an input symbol.
# - a sum or product of input expressions.
# - the transpose of an input expression.
# - the inverse of an input expression.
# - is an operation like trsm, ...
#
# [TODO] node.isInput -> only when changing a property
# will trigger re-inference up the tree
def isInput( node ):
# isinstance?
#if node.isInput():
if isinstance( node, Symbol ) and node.isInput():
return True
if isinstance( node, Plus ):
return all( [ isInput(term) for term in node.get_children() ] )
if isinstance( node, Times ):
return all( [ isInput(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return isInput(node.get_children()[0])
if isinstance( node, Transpose ):
return isInput(node.get_children()[0])
if isinstance( node, Inverse ):
return isInput(node.get_children()[0])
# [TODO] Double check!!!
if isinstance( node, Predicate ) and node.isInput():
return True
return False
#
# An expression is an input expression if:
# - is an output symbol.
# - a sum or product where at least one output expression appears.
# - the transpose of an output expression.
# - the inverse of an output expression.
#
# [TODO] node.isOutput -> only when changing a property
# will trigger re-inference up the tree
def isOutput( node ):
# isinstance?
#if node.isOutput():
if isinstance( node, Symbol ) and node.isOutput():
return True
if isinstance( node, Plus ):
return any( [ isOutput(term) for term in node.get_children() ] )
if isinstance( node, Times ):
return any( [ isOutput(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return isOutput(node.get_children()[0])
if isinstance( node, Transpose ):
return isOutput(node.get_children()[0])
if isinstance( node, Inverse ):
return isOutput(node.get_children()[0])
return False
def isZero( node ):
return node.isZero()
def isIdentity( node ):
if node.isIdentity():
return True
if isinstance( node, Plus ):
return False
if isinstance( node, Times ):
return all( [ isIdentity(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return False
if isinstance( node, Transpose ):
return isIdentity(node.get_children()[0])
if isinstance( node, Inverse ):
return isIdentity(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar": ONE!
#return True
return False
def isDiagonal( node ):
# isinstance?
if node.isDiagonal():
return True
if isinstance( node, Plus ):
return all( [ isDiagonal(term) for term in node.get_children() ] )
if isinstance( node, Times ):
return all( [ isDiagonal(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return isDiagonal(node.get_children()[0])
if isinstance( node, Transpose ):
return isDiagonal(node.get_children()[0])
if isinstance( node, Inverse ):
return isDiagonal(node.get_children()[0])
#if isinstance( node, Operand ): # Should be inferred once at the beginning and enter the first "if"
if node.type == "Scalar":
return True
return False
def isTriangular( node ):
return isLowerTriangular( node ) or isUpperTriangular( node )
def isLowerTriangular( node ):
# isinstance?
if node.isLowerTriangular():
return True
if isinstance( node, Plus ):
return all( [ isLowerTriangular(term) for term in node.get_children() ] )
if isinstance( node, Times ):
return all( [ isLowerTriangular(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return isLowerTriangular(node.get_children()[0])
if isinstance( node, Transpose ):
return isUpperTriangular(node.get_children()[0])
if isinstance( node, Inverse ):
return isLowerTriangular(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar":
#return True
return False
def isUpperTriangular( node ):
# isinstance?
if node.isUpperTriangular():
return True
if isinstance( node, Plus ):
return all( [ isUpperTriangular(term) for term in node.get_children() ] )
if isinstance( node, Times ):
return all( [ isUpperTriangular(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return isUpperTriangular(node.get_children()[0])
if isinstance( node, Transpose ):
return isLowerTriangular(node.get_children()[0])
if isinstance( node, Inverse ):
return isUpperTriangular(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar":
#return True
return False
def isUnitDiagonal( node ):
# isinstance?
if node.isUnitDiagonal():
return True
if isinstance( node, Plus ):
return False
if isinstance( node, Times ): # Should check triangular as well?
return all( [ isUnitDiagonal(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return False
if isinstance( node, Transpose ):
return isUnitDiagonal(node.get_children()[0])
if isinstance( node, Inverse ): # triangular?
return isUnitDiagonal(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar": ONE!
#return True
return False
def isSymmetric( node ):
# isinstance?
if node.isSymmetric():
return True
# node == trans( node )
alt1 = copy.deepcopy( node )
alt1 = to_canonical(alt1)._cleanup()
alt2 = copy.deepcopy( node )
alt2 = to_canonical(Transpose([alt2]))._cleanup()
if alt1 == alt2:
return True
# more ...
if isinstance( node, Plus ):
return all( [ isSymmetric(term) for term in node.get_children() ] )
if isinstance( node, Times ): # iif they commute ...
return False
if isinstance( node, Minus ):
return isSymmetric(node.get_children()[0])
if isinstance( node, Transpose ):
return isSymmetric(node.get_children()[0])
if isinstance( node, Inverse ):
return isSymmetric(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar":
#return True
return False
def isSPD( node ):
# isinstance?
if node.isSPD():
return True
if TOE.get_property( properties.SPD, node ):
return True
if isinstance( node, Plus ): # ?
#return reduce( lambda x,y: x and y, [ isSPD(term) for term in node.get_children() ], True )
return False
if isinstance( node, Times ): # related to "iif they commute" ... ?
return False
if isinstance( node, Minus ):
return False
if isinstance( node, Transpose ):
return isSPD(node.get_children()[0])
if isinstance( node, Inverse ):
return isSPD(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar": > 0 !
#return True
return False
def isNonSingular( node ):
# isinstance?
if node.isNonSingular():
return True
if isinstance( node, Plus ): # ?
return False
if isinstance( node, Times ): # related to "iif they commute" ... ?
return all( [ isNonSingular(factor) for factor in node.get_children() ] )
if isinstance( node, Minus ):
return isNonSingular(node.get_children()[0])
if isinstance( node, Transpose ):
return isNonSingular(node.get_children()[0])
if isinstance( node, Inverse ):
return isNonSingular(node.get_children()[0])
#if isinstance( node, Operand ):
#if node.type == "Scalar": != 0 !
#return True
return False
|
|
import os
from socket import socket
from time import sleep
from mock import patch
from nose.plugins.attrib import attr
from nose.tools import ok_, eq_
import unittest
import xmlrpclib
from checks import AgentCheck
from tests.common import get_check
class TestSupervisordCheck(unittest.TestCase):
TEST_CASES = [{
'yaml': """
init_config:
instances:
- name: server1
host: localhost
port: 9001""",
'expected_instances': [{
'host': 'localhost',
'name': 'server1',
'port': 9001
}],
'expected_metrics': {
'server1': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:python']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:mysql']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:java']})
]
},
'expected_service_checks': {
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1', 'supervisord_process:mysql'],
'check': 'supervisord.process.check'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:java'],
'check': 'supervisord.process.check'
}, {
'status': AgentCheck.UNKNOWN,
'tags': ['supervisord_server:server1', 'supervisord_process:python'],
'check': 'supervisord.process.check'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
user: user
pass: pass
proc_names:
- apache2
- webapp
- name: server1
host: 10.60.130.82""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'user': 'user',
'pass': 'pass',
'proc_names': ['apache2', 'webapp'],
}, {
'host': '10.60.130.82',
'name': 'server1'
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:apache2']}),
('supervisord.process.uptime', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:webapp']}),
],
'server1': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:ruby']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:apache2'],
'check': 'supervisord.process.check'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:webapp'],
'check': 'supervisord.process.check'
}],
'server1': [{
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:ruby'],
'check': 'supervisord.process.check'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: invalid_host
port: 9009""",
'expected_instances': [{
'name': 'server0',
'host': 'invalid_host',
'port': 9009
}],
'error_message': """Cannot connect to http://invalid_host:9009. Make sure supervisor is running and XML-RPC inet interface is enabled."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9010
user: invalid_user
pass: invalid_pass""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9010,
'user': 'invalid_user',
'pass': 'invalid_pass'
}],
'error_message': """Username or password to server0 are incorrect."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_names:
- mysql
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_names': ['mysql', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.check'
}]
}
}]
def setUp(self):
self.patcher = patch('xmlrpclib.Server', self.mock_server)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
### Integration Test #####################################################
def test_check(self):
"""Integration test for supervisord check. Using a mocked supervisord."""
for tc in self.TEST_CASES:
check, instances = get_check('supervisord', tc['yaml'])
ok_(check is not None, msg=check)
eq_(tc['expected_instances'], instances)
for instance in instances:
name = instance['name']
try:
# Run the check
check.check(instance)
except Exception, e:
if 'error_message' in tc: # excepted error
eq_(str(e), tc['error_message'])
else:
ok_(False, msg=str(e))
else:
# Assert that the check collected the right metrics
expected_metrics = tc['expected_metrics'][name]
self.assert_metrics(expected_metrics, check.get_metrics())
# Assert that the check generated the right service checks
expected_service_checks = tc['expected_service_checks'][name]
self.assert_service_checks(expected_service_checks,
check.get_service_checks())
@attr(requires='supervisord')
def test_travis_supervisord(self):
"""Integration test for supervisord check. Using a supervisord on Travis."""
# Load yaml config
config_str = open(os.environ['VOLATILE_DIR'] + '/supervisord.yaml', 'r').read()
ok_(config_str is not None and len(config_str) > 0, msg=config_str)
# init the check and get the instances
check, instances = get_check('supervisord', config_str)
ok_(check is not None, msg=check)
eq_(len(instances), 1)
# Supervisord should run 3 programs for 30, 60 and 90 seconds
# respectively. The tests below will ensure that the process count
# metric is reported correctly after (roughly) 10, 40, 70 and 100 seconds
for i in range(4):
try:
# Run the check
check.check(instances[0])
except Exception, e:
# Make sure that it ran successfully
ok_(False, msg=str(e))
else:
up, down = 0, 0
for name, timestamp, value, meta in check.get_metrics():
if name == 'supervisord.process.count':
if 'status:up' in meta['tags']:
up = value
elif 'status:down' in meta['tags']:
down = value
eq_(up, 3 - i)
eq_(down, i)
sleep(30)
### Unit Tests ###########################################################
def test_build_message(self):
"""Unit test supervisord build service check message."""
process = {
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}
expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""
check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml'])
eq_(expected_message, check._build_message(process))
### Helper Methods #######################################################
@staticmethod
def mock_server(url):
return MockXmlRcpServer(url)
@staticmethod
def assert_metrics(expected, actual):
actual = [TestSupervisordCheck.norm_metric(metric) for metric in actual]
eq_(len(actual), len(expected), msg='Invalid # metrics reported.\n'
'Expected: {0}. Found: {1}'.format(len(expected), len(actual)))
ok_(all([expected_metric in actual for expected_metric in expected]),
msg='Reported metrics are incorrect.\nExpected: {0}.\n'
'Found: {1}'.format(expected, actual))
@staticmethod
def assert_service_checks(expected, actual):
actual = [TestSupervisordCheck.norm_service_check(service_check)
for service_check in actual]
eq_(len(actual), len(expected), msg='Invalid # service checks reported.'
'\nExpected: {0}. Found: {1}.'.format(len(expected), len(actual)))
ok_(all([expected_service_check in actual
for expected_service_check in expected]),
msg='Reported service checks are incorrect.\nExpected:{0}\n'
'Found:{1}'.format(expected, actual))
@staticmethod
def norm_metric(metric):
'''Removes hostname and timestamp'''
metric[3].pop('hostname')
return (metric[0], metric[2], metric[3])
@staticmethod
def norm_service_check(service_check):
'''Removes timestamp, host_name, message and id'''
for field in ['timestamp', 'host_name', 'message', 'id']:
service_check.pop(field)
return service_check
class MockXmlRcpServer:
"""Class that mocks an XML RPC server. Initialized using a mocked
supervisord server url, which is used to initialize the supervisord
server.
"""
def __init__(self, url):
self.supervisor = MockSupervisor(url)
class MockSupervisor:
"""Class that mocks a supervisord sever. Initialized using the server url
and mocks process methods providing mocked process information for testing
purposes.
"""
MOCK_PROCESSES = {
'http://localhost:9001/RPC2': [{
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}, {
'now': 1414815738,
'group': 'java',
'description': 'Nov 01 04:22 AM',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/java-stderr---supervisor-lSdcKZ.log',
'stop': 1414815722,
'statename': 'STOPPED',
'start': 1414815388,
'state': 0,
'stdout_logfile': '/var/log/java/java.log',
'logfile': '/var/log/java/java.log',
'exitstatus': 21,
'spawnerr': '',
'name': 'java'
}, {
'now': 1414815738,
'group': 'python',
'description': '',
'pid': 2765,
'stderr_logfile': '/var/log/supervisor/python-stderr---supervisor-vFzxIg.log',
'stop': 1414815737,
'statename': 'STARTING',
'start': 1414815737,
'state': 10,
'stdout_logfile': '/var/log/python/python.log',
'logfile': '/var/log/python/python.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'python'
}],
'http://user:pass@localhost:9001/RPC2': [{
'now': 1414869824,
'group': 'apache2',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/apache2-stderr---supervisor-0PkXWd.log',
'stop': 1414867047,
'statename': 'FATAL',
'start': 1414867047,
'state': 200,
'stdout_logfile': '/var/log/apache2/apache2.log',
'logfile': '/var/log/apache2/apache2.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'apache2'
}, {
'now': 1414871104,
'group': 'webapp',
'description': '',
'pid': 17600,
'stderr_logfile': '/var/log/supervisor/webapp-stderr---supervisor-onZK__.log',
'stop': 1414871101,
'statename': 'STOPPING',
'start': 1414871102,
'state': 40,
'stdout_logfile': '/var/log/company/webapp.log',
'logfile': '/var/log/company/webapp.log',
'exitstatus': 1,
'spawnerr': '',
'name': 'webapp'
}],
'http://10.60.130.82:9001/RPC2': [{
'now': 1414871588,
'group': 'ruby',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/ruby-stderr---supervisor-BU7Wat.log',
'stop': 1414871588,
'statename': 'BACKOFF',
'start': 1414871588,
'state': 30,
'stdout_logfile': '/var/log/ruby/ruby.log',
'logfile': '/var/log/ruby/ruby.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'ruby'
}]
}
def __init__(self, url):
self.url = url
def getAllProcessInfo(self):
self._validate_request()
return self.MOCK_PROCESSES[self.url]
def getProcessInfo(self, proc_name):
self._validate_request(proc=proc_name)
for proc in self.MOCK_PROCESSES[self.url]:
if proc['name'] == proc_name:
return proc
raise Exception('Process not found: %s' % proc_name)
def _validate_request(self, proc=None):
'''Validates request and simulates errors when not valid'''
if 'invalid_host' in self.url:
# Simulate connecting to an invalid host/port in order to
# raise `socket.error: [Errno 111] Connection refused`
socket().connect(('localhost', 38837))
elif 'invalid_pass' in self.url:
# Simulate xmlrpc exception for invalid credentials
raise xmlrpclib.ProtocolError(self.url[7:], 401,
'Unauthorized', None)
elif proc is not None and 'invalid' in proc:
# Simulate xmlrpc exception for process not found
raise xmlrpclib.Fault(10, 'BAD_NAME')
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
from ..util.event import Event, EmitterGroup
from ..visuals.transforms import (NullTransform, BaseTransform,
ChainTransform, create_transform)
class Node(object):
""" Base class representing an object in a scene.
A group of nodes connected through parent-child relationships define a
scenegraph. Nodes may have any number of children or parents, although
it is uncommon to have more than one parent.
Each Node defines a ``transform`` property, which describes the position,
orientation, scale, etc. of the Node relative to its parent. The Node's
children inherit this property, and then further apply their own
transformations on top of that.
With the ``transform`` property, each Node implicitly defines a "local"
coordinate system, and the Nodes and edges in the scenegraph can be though
of as coordinate systems connected by transformation functions.
Parameters
----------
parent : Node
The parent of the Node.
name : str
The name used to identify the node.
"""
# Needed to allow subclasses to repr() themselves before Node.__init__()
_name = None
def __init__(self, parent=None, name=None):
self.name = name
self._visible = True
# Add some events to the emitter groups:
events = ['parents_change', 'children_change', 'transform_change',
'mouse_press', 'mouse_move', 'mouse_release', 'mouse_wheel',
'key_press', 'key_release']
# Create event emitter if needed (in subclasses that inherit from
# Visual, we already have an emitter to share)
if not hasattr(self, 'events'):
self.events = EmitterGroup(source=self, auto_connect=True,
update=Event)
self.events.add(**dict([(ev, Event) for ev in events]))
# Entities are organized in a parent-children hierarchy
self._children = []
# TODO: use weakrefs for parents.
self._parents = []
if parent is not None:
self.parents = parent
self._document = None
# Components that all entities in vispy have
# todo: default transform should be trans-scale-rot transform
self._transform = NullTransform()
# todo: move visible to BaseVisualNode class when we make Node not a Visual
@property
def visible(self):
""" Whether this node should be drawn or not. Only applicable to
nodes that can be drawn.
"""
return self._visible
@visible.setter
def visible(self, val):
self._visible = bool(val)
self.update()
@property
def name(self):
return self._name
@name.setter
def name(self, n):
self._name = n
@property
def children(self):
""" A copy of the list of children of this node. Do not add
items to this list, but use ``x.parent = y`` instead.
"""
return list(self._children)
@property
def parent(self):
""" Get/set the parent. If the node has multiple parents while
using this property as a getter, an error is raised.
"""
if not self._parents:
return None
elif len(self._parents) == 1:
return self._parents[0]
else:
raise RuntimeError('Ambiguous parent: there are multiple parents.')
@parent.setter
def parent(self, parent):
# This is basically an alias
self.parents = parent
@property
def parents(self):
""" Get/set a tuple of parents.
"""
return tuple(self._parents)
@parents.setter
def parents(self, parents):
# Test input
if isinstance(parents, Node):
parents = (parents,)
if not hasattr(parents, '__iter__'):
raise ValueError("Node.parents must be iterable (got %s)"
% type(parents))
# Test that all parents are entities
for p in parents:
if not isinstance(p, Node):
raise ValueError('A parent of an node must be an node too,'
' not %s.' % p.__class__.__name__)
# Apply
prev = list(self._parents) # No list.copy() on Py2.x
with self.events.parents_change.blocker():
# Remove parents
for parent in prev:
if parent not in parents:
self.remove_parent(parent)
# Add new parents
for parent in parents:
if parent not in prev:
self.add_parent(parent)
self.events.parents_change(new=parents, old=prev)
def add_parent(self, parent):
"""Add a parent
Parameters
----------
parent : instance of Node
The parent.
"""
if parent in self._parents:
return
self._parents.append(parent)
parent._add_child(self)
self.events.parents_change(added=parent)
self.update()
def remove_parent(self, parent):
"""Remove a parent
Parameters
----------
parent : instance of Node
The parent.
"""
if parent not in self._parents:
raise ValueError("Parent not in set of parents for this node.")
self._parents.remove(parent)
parent._remove_child(self)
self.events.parents_change(removed=parent)
def _add_child(self, node):
self._children.append(node)
self.events.children_change(added=node)
node.events.update.connect(self.events.update)
def _remove_child(self, node):
self._children.remove(node)
self.events.children_change(removed=node)
node.events.update.disconnect(self.events.update)
def update(self):
"""
Emit an event to inform listeners that properties of this Node or its
children have changed.
"""
self.events.update()
@property
def document(self):
""" The document is an optional property that is an node representing
the coordinate system from which this node should make physical
measurements such as px, mm, pt, in, etc. This coordinate system
should be used when determining line widths, font sizes, and any
other lengths specified in physical units.
The default is None; in this case, a default document is used during
drawing (usually this is supplied by the SceneCanvas).
"""
return self._document
@document.setter
def document(self, doc):
if doc is not None and not isinstance(doc, Node):
raise TypeError("Document property must be Node or None.")
self._document = doc
self.update()
@property
def transform(self):
""" The transform that maps the local coordinate frame to the
coordinate frame of the parent.
"""
return self._transform
@transform.setter
def transform(self, tr):
if self._transform is not None:
self._transform.changed.disconnect(self._transform_changed)
assert isinstance(tr, BaseTransform)
self._transform = tr
self._transform.changed.connect(self._transform_changed)
self._transform_changed(None)
def set_transform(self, type_, *args, **kwargs):
""" Create a new transform of *type* and assign it to this node.
All extra arguments are used in the construction of the transform.
Parameters
----------
type_ : str
The transform type.
*args : tuple
Arguments.
**kwargs : dict
Keywoard arguments.
"""
self.transform = create_transform(type_, *args, **kwargs)
def _transform_changed(self, event):
self.events.transform_change()
self.update()
def _parent_chain(self):
"""
Return the chain of parents starting from this node. The chain ends
at the first node with either no parents or multiple parents.
"""
chain = [self]
while True:
try:
parent = chain[-1].parent
except Exception:
break
if parent is None:
break
chain.append(parent)
return chain
def describe_tree(self, with_transform=False):
"""Create tree diagram of children
Parameters
----------
with_transform : bool
If true, add information about node transform types.
Returns
----------
tree : str
The tree diagram.
"""
# inspired by https://github.com/mbr/asciitree/blob/master/asciitree.py
return self._describe_tree('', with_transform)
def _describe_tree(self, prefix, with_transform):
"""Helper function to actuall construct the tree"""
extra = ': "%s"' % self.name if self.name is not None else ''
if with_transform:
extra += (' [%s]' % self.transform.__class__.__name__)
output = ''
if len(prefix) > 0:
output += prefix[:-3]
output += ' +--'
output += '%s%s\n' % (self.__class__.__name__, extra)
n_children = len(self.children)
for ii, child in enumerate(self.children):
sub_prefix = prefix + (' ' if ii+1 == n_children else ' |')
output += child._describe_tree(sub_prefix, with_transform)
return output
def common_parent(self, node):
"""
Return the common parent of two entities
If the entities have no common parent, return None.
Does not search past multi-parent branches.
Parameters
----------
node : instance of Node
The other node.
Returns
-------
parent : instance of Node | None
The parent.
"""
p1 = self._parent_chain()
p2 = node._parent_chain()
for p in p1:
if p in p2:
return p
return None
def node_path_to_child(self, node):
"""Return a list describing the path from this node to a child node
This method assumes that the given node is a child node. Multiple
parenting is allowed.
Parameters
----------
node : instance of Node
The child node.
Returns
-------
path : list | None
The path.
"""
if node is self:
return []
# Go up from the child node as far as we can
path1 = [node]
child = node
while len(child.parents) == 1:
child = child.parent
path1.append(child)
# Early exit
if child is self:
return list(reversed(path1))
# Verify that we're not cut off
if len(path1[-1].parents) == 0:
raise RuntimeError('%r is not a child of %r' % (node, self))
def _is_child(path, parent, child):
path.append(parent)
if child in parent.children:
return path
else:
for c in parent.children:
possible_path = _is_child(path[:], c, child)
if possible_path:
return possible_path
return None
# Search from the parent towards the child
path2 = _is_child([], self, path1[-1])
if not path2:
raise RuntimeError('%r is not a child of %r' % (node, self))
# Return
return path2 + list(reversed(path1))
def node_path(self, node):
"""Return two lists describing the path from this node to another
Parameters
----------
node : instance of Node
The other node.
Returns
-------
p1 : list
First path (see below).
p2 : list
Second path (see below).
Notes
-----
The first list starts with this node and ends with the common parent
between the endpoint nodes. The second list contains the remainder of
the path from the common parent to the specified ending node.
For example, consider the following scenegraph::
A --- B --- C --- D
\
--- E --- F
Calling `D.node_path(F)` will return::
([D, C, B], [E, F])
Note that there must be a _single_ path in the scenegraph that connects
the two entities; otherwise an exception will be raised.
"""
p1 = self._parent_chain()
p2 = node._parent_chain()
cp = None
for p in p1:
if p in p2:
cp = p
break
if cp is None:
raise RuntimeError("No single-path common parent between nodes %s "
"and %s." % (self, node))
p1 = p1[:p1.index(cp)+1]
p2 = p2[:p2.index(cp)][::-1]
return p1, p2
def node_path_transforms(self, node):
"""Return the list of transforms along the path to another node.
The transforms are listed in reverse order, such that the last
transform should be applied first when mapping from this node to
the other.
Parameters
----------
node : instance of Node
The other node.
Returns
-------
transform : instance of Transform
The transform.
"""
a, b = self.node_path(node)
return ([n.transform.inverse for n in b] +
[n.transform for n in a[:-1]])[::-1]
def node_transform(self, node):
"""
Return the transform that maps from the coordinate system of
*self* to the local coordinate system of *node*.
Note that there must be a _single_ path in the scenegraph that connects
the two entities; otherwise an exception will be raised.
Parameters
----------
node : instance of Node
The other node.
Returns
-------
transform : instance of ChainTransform
The transform.
"""
return ChainTransform(self.node_path_transforms(node))
def __repr__(self):
name = "" if self.name is None else " name="+self.name
return "<%s%s at 0x%x>" % (self.__class__.__name__, name, id(self))
|
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import unittest
import MySQLdb
import environment
import tablet
import utils
use_mysqlctld = False
tablet_master = None
tablet_replica1 = None
tablet_replica2 = None
new_init_db = ''
db_credentials_file = ''
def setUpModule():
global new_init_db, db_credentials_file
global tablet_master, tablet_replica1, tablet_replica2
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
try:
environment.topo_server().setup()
# Determine which column is used for user passwords in this MySQL version.
proc = tablet_master.init_mysql()
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
try:
tablet_master.mquery('mysql', 'select password from mysql.user limit 0',
user='root')
password_col = 'password'
except MySQLdb.DatabaseError:
password_col = 'authentication_string'
utils.wait_procs([tablet_master.teardown_mysql()])
tablet_master.remove_tree(ignore_options=True)
# Create a new init_db.sql file that sets up passwords for all users.
# Then we use a db-credentials-file with the passwords.
new_init_db = environment.tmproot + '/init_db_with_passwords.sql'
with open(environment.vttop + '/config/init_db.sql') as fd:
init_db = fd.read()
with open(new_init_db, 'w') as fd:
fd.write(init_db)
fd.write('''
# Set real passwords for all users.
UPDATE mysql.user SET %s = PASSWORD('RootPass')
WHERE User = 'root' AND Host = 'localhost';
UPDATE mysql.user SET %s = PASSWORD('VtDbaPass')
WHERE User = 'vt_dba' AND Host = 'localhost';
UPDATE mysql.user SET %s = PASSWORD('VtAppPass')
WHERE User = 'vt_app' AND Host = 'localhost';
UPDATE mysql.user SET %s = PASSWORD('VtAllprivsPass')
WHERE User = 'vt_allprivs' AND Host = 'localhost';
UPDATE mysql.user SET %s = PASSWORD('VtReplPass')
WHERE User = 'vt_repl' AND Host = '%%';
UPDATE mysql.user SET %s = PASSWORD('VtFilteredPass')
WHERE User = 'vt_filtered' AND Host = 'localhost';
FLUSH PRIVILEGES;
''' % tuple([password_col] * 6))
credentials = {
'vt_dba': ['VtDbaPass'],
'vt_app': ['VtAppPass'],
'vt_allprivs': ['VtAllprivsPass'],
'vt_repl': ['VtReplPass'],
'vt_filtered': ['VtFilteredPass'],
}
db_credentials_file = environment.tmproot+'/db_credentials.json'
with open(db_credentials_file, 'w') as fd:
fd.write(json.dumps(credentials))
# start mysql instance external to the test
setup_procs = [
tablet_master.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica1.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica2.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_master.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica1.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica2.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
class TestBackup(unittest.TestCase):
def setUp(self):
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
tablet_master.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True,
extra_args=['-db-credentials-file',
db_credentials_file])
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True,
extra_args=['-db-credentials-file',
db_credentials_file])
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_master.tablet_alias])
def tearDown(self):
for t in tablet_master, tablet_replica1, tablet_replica2:
t.kill_vttablet()
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.set_semi_sync_enabled(master=False, slave=False)
t.clean_dbs()
for backup in self._list_backups():
self._remove_backup(backup)
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_data(self, t, index):
"""Add a single row with value 'index' to the given tablet."""
t.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def _check_data(self, t, count, msg):
"""Check that the specified tablet has the expected number of rows."""
timeout = 10
while True:
try:
result = t.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == count:
break
except MySQLdb.DatabaseError:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step(msg, timeout)
def _restore(self, t, tablet_type='replica'):
"""Erase mysql/tablet dir, then start tablet with restore enabled."""
self._reset_tablet_dir(t)
t.start_vttablet(wait_for_state='SERVING',
init_tablet_type=tablet_type,
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True,
extra_args=['-db-credentials-file', db_credentials_file])
# check semi-sync is enabled for replica, disabled for rdonly.
if tablet_type == 'replica':
t.check_db_var('rpl_semi_sync_slave_enabled', 'ON')
t.check_db_status('rpl_semi_sync_slave_status', 'ON')
else:
t.check_db_var('rpl_semi_sync_slave_enabled', 'OFF')
t.check_db_status('rpl_semi_sync_slave_status', 'OFF')
def _reset_tablet_dir(self, t):
"""Stop mysql, delete everything including tablet dir, restart mysql."""
extra_args = ['-db-credentials-file', db_credentials_file]
utils.wait_procs([t.teardown_mysql(extra_args=extra_args)])
# Specify ignore_options because we want to delete the tree even
# if the test's -k / --keep-logs was specified on the command line.
t.remove_tree(ignore_options=True)
proc = t.init_mysql(init_db=new_init_db, extra_args=extra_args)
if use_mysqlctld:
t.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
def _list_backups(self):
"""Get a list of backup names for the test shard."""
backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
return backups.splitlines()
def _remove_backup(self, backup):
"""Remove a named backup from the test shard."""
utils.run_vtctl(
tablet.get_backup_storage_flags() +
['RemoveBackup', 'test_keyspace/0', backup],
auto_log=True, mode=utils.VTCTL_VTCTL)
def test_backup_rdonly(self):
self._test_backup('rdonly')
def test_backup_replica(self):
self._test_backup('replica')
def _test_backup(self, tablet_type):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- insert some data
- take a backup
- insert more data on the master
- bring up tablet_replica2 after the fact, let it restore the backup
- check all data is right (before+after backup data)
- list the backup, remove it
Args:
tablet_type: 'replica' or 'rdonly'.
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2, tablet_type=tablet_type)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# check that the restored slave has the right local_metadata
result = tablet_replica2.mquery('_vt', 'select * from local_metadata')
metadata = {}
for row in result:
metadata[row[0]] = row[1]
self.assertEqual(metadata['Alias'], 'test_nj-0000062346')
self.assertEqual(metadata['ClusterAlias'], 'test_keyspace.0')
self.assertEqual(metadata['DataCenter'], 'test_nj')
if tablet_type == 'replica':
self.assertEqual(metadata['PromotionRule'], 'neutral')
else:
self.assertEqual(metadata['PromotionRule'], 'must_not')
# check that the backup shows up in the listing
backups = self._list_backups()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias))
# remove the backup and check that the list is empty
self._remove_backup(backups[0])
backups = self._list_backups()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
tablet_replica2.kill_vttablet()
def test_master_slave_same_backup(self):
"""Test a master and slave from the same backup.
Check that a slave and master both restored from the same backup
can replicate successfully.
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# Promote replica2 to master.
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica2.tablet_alias])
# insert more data on replica2 (current master)
self._insert_data(tablet_replica2, 3)
# Force replica1 to restore from backup.
tablet_replica1.kill_vttablet()
self._restore(tablet_replica1)
# wait for replica1 to catch up.
self._check_data(tablet_replica1, 3,
'replica1 getting data from restored master')
tablet_replica2.kill_vttablet()
def _restore_old_master_test(self, restore_method):
"""Test that a former master replicates correctly after being restored.
- Take a backup.
- Reparent from old master to new master.
- Force old master to restore from a previous backup using restore_method.
Args:
restore_method: function accepting one parameter of type tablet.Tablet,
this function is called to force a restore on the provided tablet
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# reparent to replica1
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica1.tablet_alias])
# insert more data on new master
self._insert_data(tablet_replica1, 3)
# force the old master to restore at the latest backup.
restore_method(tablet_master)
# wait for it to catch up.
self._check_data(tablet_master, 3, 'former master catches up after restore')
def test_restore_old_master(self):
def _restore_using_kill(t):
t.kill_vttablet()
self._restore(t)
self._restore_old_master_test(_restore_using_kill)
def test_in_place_restore(self):
def _restore_in_place(t):
utils.run_vtctl(['RestoreFromBackup', t.tablet_alias], auto_log=True)
self._restore_old_master_test(_restore_in_place)
def test_terminated_restore(self):
def _terminated_restore(t):
for e in utils.vtctld_connection.execute_vtctl_command(
['RestoreFromBackup', t.tablet_alias]):
logging.info('%s', e.value)
if 'shutdown mysqld' in e.value:
break
logging.info('waiting for restore to finish')
utils.wait_for_tablet_type(t.tablet_alias, 'replica', timeout=30)
utils.Vtctld().start()
self._restore_old_master_test(_terminated_restore)
def test_backup_transform(self):
"""Use a transform, tests we backup and restore properly."""
# Insert data on master, make sure slave gets it.
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# Restart the replica with the transform parameter.
tablet_replica1.kill_vttablet()
tablet_replica1.start_vttablet(supports_backups=True,
extra_args=[
'-backup_storage_hook',
'test_backup_transform',
'-backup_storage_compress=false',
'-db-credentials-file',
db_credentials_file])
# Take a backup, it should work.
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# Insert more data on the master.
self._insert_data(tablet_master, 2)
# Make sure we have the TransformHook in the MANIFEST, and that
# every file starts with 'header'.
backups = self._list_backups()
self.assertEqual(len(backups), 1, 'invalid backups: %s' % backups)
location = os.path.join(environment.tmproot, 'backupstorage',
'test_keyspace', '0', backups[0])
with open(os.path.join(location, 'MANIFEST')) as fd:
contents = fd.read()
manifest = json.loads(contents)
self.assertEqual(manifest['TransformHook'], 'test_backup_transform')
self.assertEqual(manifest['SkipCompress'], True)
for i in xrange(len(manifest['FileEntries'])):
name = os.path.join(location, '%d' % i)
with open(name) as fd:
line = fd.readline()
self.assertEqual(line, 'header\n', 'wrong file contents for %s' % name)
# Then start replica2 from backup, make sure that works.
# Note we don't need to pass in the backup_storage_transform parameter,
# as it is read from the MANIFEST.
self._restore(tablet_replica2)
# Check the new slave has all the data.
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
def test_backup_transform_error(self):
"""Use a transform, force an error, make sure the backup fails."""
# Restart the replica with the transform parameter.
tablet_replica1.kill_vttablet()
tablet_replica1.start_vttablet(supports_backups=True,
extra_args=['-backup_storage_hook',
'test_backup_error',
'-db-credentials-file',
db_credentials_file])
# This will fail, make sure we get the right error.
_, err = utils.run_vtctl(['Backup', tablet_replica1.tablet_alias],
auto_log=True, expect_fail=True)
self.assertIn('backup is not usable, aborting it', err)
# And make sure there is no backup left.
backups = self._list_backups()
self.assertEqual(len(backups), 0, 'invalid backups: %s' % backups)
if __name__ == '__main__':
utils.main()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to extract features from the data."""
import os
import random
from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import tensorflow as tf
from covid_epidemiology import model
from covid_epidemiology.src import constants
def apply_sigmoid_bounds(variable, lower_bound, upper_bound):
"""Applies soft bounding using sigmoid nonlinearity.
Args:
variable: Input tensor.
lower_bound: Lower bound.
upper_bound: Upper bound.
Returns:
Bounded tensor.
"""
return lower_bound + (upper_bound - lower_bound) * tf.nn.sigmoid(variable)
def apply_relu_bounds(variable, lower_bound, upper_bound, replace_nan=True):
"""Applies hard bounding using ReLU nonlinearity.
Args:
variable: Input tensor.
lower_bound: Lower bound.
upper_bound: Upper bound.
replace_nan: Whether to replace NaNs.
Returns:
Bounded tensor.
"""
bounded_variable = tf.nn.relu(variable - lower_bound) + lower_bound
bounded_variable = upper_bound - tf.nn.relu(upper_bound - bounded_variable)
if replace_nan:
bounded_variable = tf.where(
tf.math.is_nan(bounded_variable), tf.zeros_like(bounded_variable),
bounded_variable)
return bounded_variable
def populate_gt_list(index, location_to_gt,
location, num_observed_timesteps,
gt_list, gt_indicator):
"""Copies the ground truth of a location to a corresponding index of gt_list.
Args:
index: The index of the location.
location_to_gt: A map from location to ground truth time-series.
location: The location
num_observed_timesteps: Number of observed time-series.
gt_list: The ground truth list.
gt_indicator: The binary indicator whether the particular value exists.
Returns:
The ground truth list and the indicator.
"""
if location_to_gt and location_to_gt[location].any():
observed_gt = location_to_gt[location][:num_observed_timesteps]
observed_indicator = 1.0 - np.isnan(observed_gt)
fill_values = observed_gt * observed_indicator
fill_values[np.isnan(fill_values)] = 0
gt_list[index, :observed_gt.shape[0]] = fill_values
gt_indicator[index, :observed_gt.shape[0]] = observed_indicator
return gt_list, gt_indicator
def search_county_indice(chosen_location_list, location):
idx1 = np.where(
np.asarray(chosen_location_list).astype(int) > int(location))[0]
idx2 = np.where(
np.asarray(chosen_location_list).astype(int) < int(location) + 1000)[0]
idx = list(np.intersect1d(idx1, idx2))
return idx
def filter_data_based_on_location(
static_data,
ts_data,
locations # pylint: disable=g-bare-generic
):
return static_data[static_data[constants.COUNTRY_COLUMN].isin(
locations)], ts_data[ts_data[constants.COUNTRY_COLUMN].isin(locations)]
def update_metric_to_predictions(
metric,
values,
metric_to_predictions,
train_end_of_window,
gt_data,
time_horizon_offset = 0,
quantiles = None,
quantiles_output = None,
metric_string_format = None
):
"""Updates a given metric_to_predictions dictionary using given values.
Args:
metric: The metric for which the new predictions are added.
values: The predictions by the model. This is a numpy array of
single-element lists. Shape:(num_forecast_steps, 1)
metric_to_predictions: The dictionary that will be updated.
train_end_of_window: The train_end_window used for training the model.
gt_data: The ground truth data for the same metric. Shape:(N), where N is
the total number of time steps in the entire time series. Note that
gt_data contains the entire time series, while values contains only
predicted future values in the forecasting window, starting with
train_end_of_window. Therefore, we will compare values with
gt_data[train_end_of_window:train_end_of_window + num_forecast_steps]
time_horizon_offset: Optional integer offset to be subtracted from the time
horizon value of the prediction. It can be used to ensure the first
prediction data point starts from time_horizon=1 when training window is
also in values.
quantiles: Defines quantile values used in the quantile forecast. None if
quantile forecast is not used.
quantiles_output: If defined, only export quantile values in this list.
metric_string_format: Defines an optional metric string pattern in returned
dict. It will be formatted with metric (and quantile if quantile forecast
is used).
Returns:
The updated dictionary mapping metrics to model predictions.
"""
if quantiles is None:
# Provide an one element list for this corner case
# in order to output value[0]
quantiles = [None]
quantiles_output = [None]
elif quantiles_output is None:
quantiles_output = quantiles
for index, quantile in enumerate(quantiles):
if quantile not in quantiles_output:
continue
predictions = []
for i, value in enumerate(values):
time_horizon = i + train_end_of_window
# TODO(aepshtey): Add more tests to test the data reading and evaluation.
if gt_data is not None and len(gt_data) > time_horizon:
predictions.append(
model.Prediction(i + 1 - time_horizon_offset, value[index],
gt_data[time_horizon]))
else:
predictions.append(
model.Prediction(i + 1 - time_horizon_offset, value[index], None))
if metric_string_format is not None:
metric_string = metric_string_format.format(
metric=metric, quantile=quantile)
else:
metric_string = metric
metric_to_predictions[metric_string] = predictions
return metric_to_predictions
def inv_sig(x, lb=0, ub=1):
"""Inverse of sigmoid function given the bounds."""
assert x > lb
return np.log((x - lb) / np.max([ub - x, 1e-6]))
def inv_softplus(x):
"""Inverse of softplus function."""
assert x >= 0
if x > 15:
return x
else:
return np.log(np.exp(x) - 1 + 1e-6)
def set_random_seeds(random_seed):
"""Set all random seeds.
Set the random seeds for the calling environment, Numpy, Tensorflow and
Python.
Args:
random_seed: int. Value for seed.
Returns:
None.
"""
os.environ['PYTHONHASHSEED'] = str(random_seed)
tf.random.set_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
def compartment_base(gt_list, gt_indicator, num_train_steps, num_known_steps):
"""Computes base of compartment coefficients.
Args:
gt_list: ground truth list
gt_indicator: ground truth indicator
num_train_steps: training window
num_known_steps: number of known timesteps
Returns:
train_compartment_base: base of train_coef for each compartment
valid_compartment_base: base of valid_coef for each compartment
"""
train_compartment_base = (
np.sum(gt_list[:num_train_steps] * gt_indicator[:num_train_steps]) /
(np.sum(gt_indicator[:num_train_steps])))
valid_compartment_base = (
np.sum(gt_list[num_train_steps:num_known_steps] *
gt_indicator[num_train_steps:num_known_steps]) /
(np.sum(gt_indicator[num_train_steps:num_known_steps])))
return train_compartment_base, valid_compartment_base
def increment_compartment_base(gt_list, gt_indicator, num_train_steps,
num_known_steps):
"""Computes base of compartment coefficients for increment loss.
Args:
gt_list: ground truth list
gt_indicator: ground truth indicator
num_train_steps: training window
num_known_steps: number of known timesteps
Returns:
train_compartment_increment_base: base of train_coef for each compartment
increment
valid_compartment_increment_base: base of valid_coef for each compartment
increment
"""
num_forecast_steps = num_known_steps - num_train_steps
gt_list_increment = (
gt_list[num_forecast_steps:num_known_steps] -
gt_list[:num_known_steps - num_forecast_steps])
gt_indicator_increment = (
gt_indicator[num_forecast_steps:num_known_steps] *
gt_indicator[:num_known_steps - num_forecast_steps])
train_compartment_increment_base, valid_compartment_increment_base = compartment_base(
gt_list_increment, gt_indicator_increment,
num_train_steps - num_forecast_steps, num_train_steps)
return train_compartment_increment_base, valid_compartment_increment_base
def sync_compartment(gt_list, gt_indicator, compartment, sync_coef):
"""Synchronizes the ground truth and forecast.
Args:
gt_list: Ground truth time-series gt_indicator ground truth time-series
indicator.
gt_indicator: Indicator to denote availability of ground truth.
compartment: Forecasted values for a certain compartment.
sync_coef: Synchronization coefficient.
Returns:
synced_compartment: synchronized compartment value
"""
synced_compartment_tf = tf.nn.relu(gt_list * sync_coef * gt_indicator)
synced_compartment_pred = compartment * (1 - sync_coef * gt_indicator)
synced_compartment = synced_compartment_tf + synced_compartment_pred
return synced_compartment
def construct_infection_active_mask(
confirmed_gt_list,
num_locations,
num_observed_timesteps,
infected_threshold,
):
"""Creates a array that is 1 when the infection is active.
Args:
confirmed_gt_list: The locations are the keys and the values are the
confirmed cases across time.
num_locations: The number of locations.
num_observed_timesteps: The number of observed time steps.
infected_threshold: The minimum number of cases a location must have to be
considered active
Returns:
An array of size num_observed_timesteps x num_locations that is 0.0 when
the infection is not active and 1.0 when it is.
"""
# We define infection_active_mask[ni, ti] such that at ni^th location,
# at time ti, the infection is likely to be active. We will only start SEIR
# dynamics if infection_active_mask[ni, ti]=1 for the location ni at
# time ti. The motivation is that different locations start the infection
# behavior at different timestep. A simple preprocessing step to
# determine their relative position in time allows
# more efficient fitting (i.e. the differential equations are not fit before
# the disease is started).
infection_active_mask = np.zeros((num_observed_timesteps, num_locations),
dtype=np.float32)
for location_index in range(num_locations):
for timestep in range(num_observed_timesteps):
if confirmed_gt_list[timestep, location_index] >= infected_threshold:
infection_active_mask[timestep:, location_index] = 1.0
break
return infection_active_mask
|
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
# Modified by D.C.-G. for translation purpose
import collections
import os
import traceback
import copy
from albow import FloatField, IntField, AttrRef, Row, Label, Widget, TabPanel, \
CheckBox, Column, Button, TextFieldWrapped, translate
_ = translate._
import albow
from config import config
from editortools.blockview import BlockButton
from editortools.editortool import EditorTool
from glbackground import Panel
from mceutils import setWindowCaption, alertException
from albow import ChoiceButton, showProgress, TextInputRow
import mcplatform
from operation import Operation
from albow.dialogs import wrapped_label, alert, Dialog
import pymclevel
from pymclevel import BoundingBox
import urllib2
import urllib
import json
import shutil
import directories
import sys
import keys
import imp
from nbtexplorer import NBTExplorerToolPanel
import logging
log = logging.getLogger(__name__)
def alertFilterException(func):
def _func(*args, **kw):
try:
func(*args, **kw)
except Exception, e:
print traceback.format_exc()
alert(_(u"Exception during filter operation. See console for details.\n\n{0}").format(e))
return _func
def addNumField(page, optionName, oName, val, min_value=None, max_value=None, increment=0.1):
if isinstance(val, float):
field_type = FloatField
if isinstance(increment, int):
increment = float(increment)
else:
field_type = IntField
if increment == 0.1:
increment = 1
if isinstance(increment, float):
increment = int(round(increment))
if min_value == max_value:
min_value = None
max_value = None
field = field_type(value=val, width=200, min=min_value, max=max_value)
field._increment = increment
page.optionDict[optionName] = AttrRef(field, 'value')
row = Row([Label(oName, doNotTranslate=True), field])
return row
class JsonDictProperty(dict):
def __init__(self, filename, **kwargs):
super(JsonDictProperty, self).__init__(**kwargs)
self._filename = filename
def __setitem__(self, key, value):
data = self._getJson()
data[key] = value
self._putJson(data)
def __getitem__(self, key):
return self._getJson()[key]
def __delitem__(self, key):
data = self._getJson()
del data[key]
self._putJson(data)
def _putJson(self, data):
with open(self._filename, 'wb') as f:
json.dump(data, f)
def _getJson(self):
try:
filter_json = json.load(open(self._filename), 'rb')
if "Macros" not in filter_json.keys():
filter_json["Macros"] = {}
return filter_json
except (ValueError, IOError):
return {"Macros": {}}
class MacroModuleOptions(Widget):
is_gl_container = True
def __init__(self, macro_data, *args, **kw):
self._parent = None
self._macro_data = macro_data
if '_parent' in kw.keys():
self._parent = kw.pop('_parent')
Widget.__init__(self, *args, **kw)
infoColList = []
stepsLabel = wrapped_label("Number of steps: %s" % macro_data["Number of steps"], 300)
infoColList.append(stepsLabel)
for step in sorted(macro_data.keys()):
if step != "Number of steps":
infoColList.append(wrapped_label("Step %s: %s" % (int(step) + 1, macro_data[step]["Name"]), 300))
self.add(Column(infoColList))
self.shrink_wrap()
@property
def options(self):
return {}
@options.setter
def options(self, value):
pass
def run(self):
pass
@alertFilterException
def confirm(self, tool):
with setWindowCaption("Applying Macro..."):
options = []
filters = []
for step in sorted(self._macro_data.keys()):
if step != "Number of steps":
filters.append(tool.filterModules[self._macro_data[step]["Name"]])
for module_input in self._macro_data[step]["Inputs"].keys():
if not isinstance(self._macro_data[step]["Inputs"][module_input], (str, unicode)):
continue
if not self._macro_data[step]["Inputs"][module_input].startswith("block-"):
continue
toFind = self._macro_data[step]["Inputs"][module_input][6:].split(":")
block = tool.editor.materials.get((toFind[0], toFind[1]))
self._macro_data[step]["Inputs"][module_input] = block
options.append(self._macro_data[step]["Inputs"])
op = MacroOperation(tool.editor, tool.editor.level, tool.selectionBox(), filters, options)
tool.editor.level.showProgress = showProgress
tool.editor.addOperation(op)
tool.editor.addUnsavedEdit()
tool.editor.invalidateBox(tool.selectionBox())
class FilterModuleOptions(Widget):
is_gl_container = True
def __init__(self, tool, module, *args, **kw):
self._parent = None
self.nbttree = None
self.module = module
if '_parent' in kw.keys():
self._parent = kw.pop('_parent')
Widget.__init__(self, *args, **kw)
self.spacing = 2
self.tool = tool
self.pages = pages = TabPanel()
pages.is_gl_container = True
self.optionDict = {}
self.giveEditorObject(module)
log.info("Creating options for " + str(module))
if hasattr(module, "inputs"):
trn = getattr(module, "trn", None)
self.trn = trn
if isinstance(module.inputs, list):
self.pgs = []
for tabData in module.inputs:
title, page, pageRect = self.makeTabPage(self.tool, tabData, trn=trn)
self.pgs.append((title, page))
pages.set_parent(None)
self.pages = pages = TabPanel(self.pgs)
elif isinstance(module.inputs, tuple):
title, page, pageRect = self.makeTabPage(self.tool, module.inputs, trn=trn)
pages.add_page(title, page)
pages.set_rect(pageRect)
else:
self.size = (0, 0)
pages.shrink_wrap()
self.add(pages)
self.shrink_wrap()
if len(pages.pages):
if pages.current_page is not None:
pages.show_page(pages.current_page)
else:
pages.show_page(pages.pages[0])
for eachPage in pages.pages:
self.optionDict = dict(self.optionDict.items() + eachPage.optionDict.items())
def rebuildTabPage(self, inputs, **kwargs):
title, page, rect = self.makeTabPage(self.tool, inputs, self.trn, **kwargs)
for i, t, p, s, r in self.pages.iter_tabs():
if t == title:
self.pages.remove_page(p)
self.pages.add_page(title, page, idx=i)
self.pages.show_page(page)
break
def makeTabPage(self, tool, inputs, trn=None, **kwargs):
page = Widget(**kwargs)
page.is_gl_container = True
rows = []
cols = []
max_height = tool.editor.mainViewport.height - tool.editor.toolbar.height - tool.editor.subwidgets[0].height -\
self._parent.filterSelectRow.height - self._parent.confirmButton.height - self.pages.tab_height
page.optionDict = {}
page.tool = tool
title = "Tab"
for optionSpec in inputs:
optionName = optionSpec[0]
optionType = optionSpec[1]
if trn is not None:
n = trn._(optionName)
else:
n = optionName
if n == optionName:
oName = _(optionName)
else:
oName = n
if isinstance(optionType, tuple):
if isinstance(optionType[0], (int, long, float)):
if len(optionType) == 3:
val, min, max = optionType
increment = 0.1
elif len(optionType) == 2:
min, max = optionType
val = min
increment = 0.1
else:
val, min, max, increment = optionType
rows.append(addNumField(page, optionName, oName, val, min, max, increment))
if isinstance(optionType[0], (str, unicode)):
isChoiceButton = False
if optionType[0] == "string":
kwds = []
wid = None
val = None
for keyword in optionType:
if isinstance(keyword, (str, unicode)) and keyword != "string":
kwds.append(keyword)
for keyword in kwds:
splitWord = keyword.split('=')
if len(splitWord) > 1:
v = None
try:
v = int(splitWord[1])
except ValueError:
pass
key = splitWord[0]
if v is not None:
if key == "width":
wid = v
else:
if key == "value":
val = "=".join(splitWord[1:])
if val is None:
val = ""
if wid is None:
wid = 200
field = TextFieldWrapped(value=val, width=wid)
page.optionDict[optionName] = AttrRef(field, 'value')
row = Row((Label(oName, doNotTranslate=True), field))
rows.append(row)
else:
isChoiceButton = True
if isChoiceButton:
if trn is not None:
__ = trn._
else:
__ = _
choices = [__("%s" % a) for a in optionType]
choiceButton = ChoiceButton(choices, doNotTranslate=True)
page.optionDict[optionName] = AttrRef(choiceButton, 'selectedChoice')
rows.append(Row((Label(oName, doNotTranslate=True), choiceButton)))
elif isinstance(optionType, bool):
cbox = CheckBox(value=optionType)
page.optionDict[optionName] = AttrRef(cbox, 'value')
row = Row((Label(oName, doNotTranslate=True), cbox))
rows.append(row)
elif isinstance(optionType, (int, float)):
rows.append(addNumField(self, optionName, oName, optionType))
elif optionType == "blocktype" or isinstance(optionType, pymclevel.materials.Block):
blockButton = BlockButton(tool.editor.level.materials)
if isinstance(optionType, pymclevel.materials.Block):
blockButton.blockInfo = optionType
row = Column((Label(oName, doNotTranslate=True), blockButton))
page.optionDict[optionName] = AttrRef(blockButton, 'blockInfo')
rows.append(row)
elif optionType == "label":
rows.append(wrapped_label(oName, 50, doNotTranslate=True))
elif optionType == "string":
inp = None
# not sure how to pull values from filters,
# but leaves it open for the future. Use this variable to set field width.
if inp is not None:
size = inp
else:
size = 200
field = TextFieldWrapped(value="")
row = TextInputRow(oName, ref=AttrRef(field, 'value'), width=size, doNotTranslate=True)
page.optionDict[optionName] = AttrRef(field, 'value')
rows.append(row)
elif optionType == "title":
title = oName
elif type(optionType) == list and optionType[0].lower() == "nbttree":
kw = {'close_text': None, 'load_text': None}
if len(optionType) >= 3:
def close():
self.pages.show_page(self.pages.pages[optionType[2]])
kw['close_action'] = close
kw['close_text'] = "Go Back"
if len(optionType) >= 4:
if optionType[3]:
kw['load_text'] = optionType[3]
if hasattr(self.module, 'nbt_ok_action'):
kw['ok_action'] = getattr(self.module, 'nbt_ok_action')
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject=optionType[1],
height=max_height, no_header=True, copy_data=False, **kw)
self.module.set_tree(self.nbttree.tree)
for meth_name in dir(self.module):
if meth_name.startswith('nbttree_'):
setattr(self.nbttree.tree.treeRow, meth_name.split('nbttree_')[-1],
getattr(self.module, meth_name))
# elif meth_name.startswith('nbt_'):
# setattr(self.nbttree, meth_name.split('nbt_')[-1], getattr(self.module, meth_name))
page.optionDict[optionName] = AttrRef(self, 'rebuildTabPage')
rows.append(self.nbttree)
self.nbttree.page = len(self.pgs)
else:
raise ValueError(("Unknown option type", optionType))
height = sum(r.height for r in rows) + (len(rows) - 1) * self.spacing
if height > max_height:
h = 0
for i, r in enumerate(rows):
h += r.height
if h > height / 2:
if rows[:i]:
cols.append(Column(rows[:i], spacing=0))
rows = rows[i:]
break
if len(rows):
cols.append(Column(rows, spacing=0))
if len(cols):
page.add(Row(cols, spacing=0))
page.shrink_wrap()
return title, page, page._rect
@property
def options(self):
options = {}
for k, v in self.optionDict.iteritems():
options[k] = v.get() if not isinstance(v.get(), pymclevel.materials.Block) else copy.copy(v.get())
if self.pages.current_page is not None:
options["__page_index__"] = self.pages.pages.index(self.pages.current_page)
return options
@options.setter
def options(self, val):
for k in val:
if k in self.optionDict:
self.optionDict[k].set(val[k])
index = val.get("__page_index__", -1)
if len(self.pages.pages) > index > -1:
self.pages.show_page(self.pages.pages[index])
def giveEditorObject(self, module):
module.editor = self.tool.editor
@staticmethod
def confirm(tool):
with setWindowCaption("Applying Filter... - "):
filterModule = tool.filterModules[tool.panel.filterSelect.selectedChoice]
op = FilterOperation(tool.editor, tool.editor.level, tool.selectionBox(), filterModule,
tool.panel.filterOptionsPanel.options)
tool.editor.level.showProgress = showProgress
tool.editor.addOperation(op)
tool.editor.addUnsavedEdit()
tool.editor.invalidateBox(tool.selectionBox())
class FilterToolPanel(Panel):
BACKUP_FILTER_JSON = False
"""If set to true, the filter.json is backed up to the hard disk
every time it's edited. The default is false, which makes the file save
only whenever the tool gets closed. If MCEdit were to crash, any recorded
macros would not be saved."""
def __init__(self, tool):
Panel.__init__(self, name='Panel.FilterToolPanel')
self.macro_steps = []
self.current_step = 0
self._filter_json = None
self.keys_panel = None
self.filterOptionsPanel = None
self.filterSelect = ChoiceButton([], choose=self.filterChanged, doNotTranslate=True)
self.binding_button = Button("", action=self.bind_key,
tooltipText="Click to bind this filter to a key")
self.filterLabel = Label("Filter:", fg_color=(177, 177, 255, 255))
self.filterLabel.mouse_down = lambda x: mcplatform.platform_open(directories.getFiltersDir())
self.filterLabel.tooltipText = "Click to open filters folder"
self.macro_button = Button("Record Macro", action=self.start_record_macro)
self.filterSelectRow = Row((self.filterLabel, self.filterSelect,
self.macro_button, self.binding_button))
self.confirmButton = Button("Filter", action=self.confirm)
self._recording = False
self._save_macro = False
self.tool = tool
self.selectedName = self.filter_json.get("Last Filter Opened", "")
@staticmethod
def load_filter_json():
filter_json_file = os.path.join(directories.getDataDir(), "filters.json")
filter_json = {}
if FilterToolPanel.BACKUP_FILTER_JSON:
filter_json = JsonDictProperty(filter_json_file)
else:
try:
if os.path.exists(filter_json_file):
filter_json = json.load(open(filter_json_file, 'rb'))
except (ValueError, IOError) as e:
log.error("Error while loading filters.json %s", e)
if "Macros" not in filter_json.keys():
filter_json["Macros"] = {}
return filter_json
@property
def filter_json(self):
if self._filter_json is None:
self._filter_json = FilterToolPanel.load_filter_json()
return self._filter_json
def close(self):
self._saveOptions()
self.filter_json["Last Filter Opened"] = self.selectedName
if not FilterToolPanel.BACKUP_FILTER_JSON:
with open(os.path.join(directories.getDataDir(), "filters.json"), 'w') as f:
json.dump(self.filter_json, f)
def reload(self):
for i in list(self.subwidgets):
self.remove(i)
tool = self.tool
# Display "No filter modules found" if there are no filters
if len(tool.filterModules) is 0:
self.add(Label("No filter modules found!"))
self.shrink_wrap()
return
names_list = sorted([n for n in tool.filterNames if not n.startswith("[")])
# We get a list of names like ["[foo] bar", "[test] thing"]
# The to sort on is created by splitting on "[": "[foo", " bar" and then
# removing the first char: "foo", "bar"
subfolder_names_list = sorted([n for n in tool.filterNames if n.startswith("[")],
key=lambda x: x.split("]")[0][1:])
names_list.extend(subfolder_names_list)
names_list.extend([macro for macro in self.filter_json["Macros"].keys()])
if self.selectedName is None or self.selectedName not in names_list:
self.selectedName = names_list[0]
# Remove any keybindings that don't have a filter
for (i, j) in config.config.items("Filter Keys"):
if i == "__name__":
continue
if not any([i == m.lower() for m in names_list]):
config.config.remove_option("Filter Keys", i)
self.filterSelect.choices = names_list
name = self.selectedName.lower()
names = [k for (k, v) in config.config.items("Filter Keys")]
btn_name = config.config.get("Filter Keys", name) if name in names else "*"
self.binding_button.set_text(btn_name)
self.filterOptionsPanel = None
while self.filterOptionsPanel is None:
module = self.tool.filterModules.get(self.selectedName, None)
if module is not None:
try:
self.filterOptionsPanel = FilterModuleOptions(self.tool, module, _parent=self)
except Exception as e:
alert(_("Error creating filter inputs for {0}: {1}").format(module, e))
traceback.print_exc()
self.tool.filterModules.pop(self.selectedName)
self.selectedName = tool.filterNames[0]
if len(tool.filterNames) == 0:
raise ValueError("No filters loaded!")
if not self._recording:
self.confirmButton.set_text("Filter")
else: # We verified it was an existing macro already
macro_data = self.filter_json["Macros"][self.selectedName]
self.filterOptionsPanel = MacroModuleOptions(macro_data)
self.confirmButton.set_text("Run Macro")
# This has to be recreated every time in case a macro has a longer name then everything else.
self.filterSelect = ChoiceButton(names_list, choose=self.filterChanged, doNotTranslate=True)
self.filterSelect.selectedChoice = self.selectedName
self.filterSelectRow = Row((self.filterLabel, self.filterSelect,
self.macro_button, self.binding_button))
self.add(Column((self.filterSelectRow, self.filterOptionsPanel, self.confirmButton)))
self.shrink_wrap()
if self.parent:
height = self.parent.mainViewport.height - self.parent.toolbar.height
self.centery = height / 2 + self.parent.subwidgets[0].height
if self.selectedName in self.tool.savedOptions:
self.filterOptionsPanel.options = self.tool.savedOptions[self.selectedName]
@property
def macroSelected(self):
return self.filterSelect.selectedChoice not in self.tool.filterNames
def filterChanged(self):
# if self.filterSelect.selectedChoice not in self.tool.filterModules:
# return
self._saveOptions()
self.selectedName = self.filterSelect.selectedChoice
if self.macroSelected: # Is macro
self.macro_button.set_text("Delete Macro")
self.macro_button.action = self.delete_macro
elif not self._recording:
self.macro_button.set_text("Record Macro")
self.macro_button.action = self.start_record_macro
self.reload()
def delete_macro(self):
macro_name = self.selectedName
if macro_name in self.filter_json["Macros"]:
del self.filter_json["Macros"][macro_name]
if len(self.filterSelect.choices) == 1: # Just this macro available
self.reload()
return
choices = self.filterSelect.choices
self.filterSelect.selectedChoice = choices[0] if choices[0] != macro_name else choices[1]
self.filterChanged()
def stop_record_macro(self):
macro_dialog = Dialog()
macroNameLabel = Label("Macro Name: ")
macroNameField = TextFieldWrapped(width=200)
def save_macro():
macro_name = "{Macro} " + macroNameField.get_text()
self.filter_json["Macros"][macro_name] = {}
self.filter_json["Macros"][macro_name]["Number of steps"] = len(self.macro_steps)
self.filterSelect.choices.append(macro_name)
for entry in self.macro_steps:
for inp in entry["Inputs"].keys():
if not isinstance(entry["Inputs"][inp], pymclevel.materials.Block):
if not entry["Inputs"][inp] == "blocktype":
continue
_inp = entry["Inputs"][inp]
entry["Inputs"][inp] = "block-{0}:{1}".format(_inp.ID, _inp.blockData)
self.filter_json["Macros"][macro_name][entry["Step"]] = {"Name": entry["Name"],
"Inputs": entry["Inputs"]}
stop_dialog()
self.filterSelect.selectedChoice = macro_name
self.filterChanged()
def stop_dialog():
self.macro_button.text = "Record Macro"
self.macro_button.tooltipText = None
self.macro_button.action = self.start_record_macro
macro_dialog.dismiss()
self.macro_steps = []
self.current_step = 0
self._recording = False
input_row = Row((macroNameLabel, macroNameField))
saveButton = Button("Save", action=save_macro)
closeButton = Button("Cancel", action=stop_dialog)
button_row = Row((saveButton, closeButton))
macro_dialog.add(Column((input_row, button_row)))
macro_dialog.shrink_wrap()
macro_dialog.present()
def start_record_macro(self):
self.macro_button.text = "Stop recording"
self.macro_button.tooltipText = "Currently recording a macro"
self.macro_button.action = self.stop_record_macro
self.confirmButton.text = "Add macro"
self.confirmButton.width += 75
self.confirmButton.centerx = self.centerx
self._recording = True
def _addMacroStep(self, name=None, inputs=None):
data = {"Name": name, "Step": self.current_step, "Inputs": inputs}
self.current_step += 1
self.macro_steps.append(data)
def unbind_key(self):
config.config.remove_option("Filter Keys", self.selectedName)
self.binding_button.text = "*"
self.keys_panel.dismiss()
# self.saveOptions()
self.reload()
def bind_key(self, message=None):
panel = Panel(name='Panel.FilterToolPanel.bind_key')
panel.bg_color = (0.5, 0.5, 0.6, 1.0)
if not message:
message = _("Press a key to assign to the filter \"{0}\"\n\n"
"Press ESC to cancel.").format(self.selectedName)
label = albow.Label(message)
unbind_button = Button("Press to unbind", action=self.unbind_key)
column = Column((label, unbind_button))
panel.add(column)
panel.shrink_wrap()
def panelKeyUp(evt):
_key_name = self.root.getKey(evt)
panel.dismiss(_key_name)
def panelMouseUp(evt):
button = keys.remapMouseButton(evt.button)
_key_name = None
if button == 3:
_key_name = "Button 3"
elif button == 4:
_key_name = "Scroll Up"
elif button == 5:
_key_name = "Scroll Down"
elif button == 6:
_key_name = "Button 4"
elif button == 7:
_key_name = "Button 5"
if 2 < button < 8:
panel.dismiss(_key_name)
panel.key_up = panelKeyUp
panel.mouse_up = panelMouseUp
self.keys_panel = panel
key_name = panel.present()
if type(key_name) is bool:
return True
if key_name != "Escape":
if key_name in ["Alt-F4", "F1", "F2", "F3", "F4", "F5", "1", "2", "3",
"4", "5", "6", "7", "8", "9", "Ctrl-Alt-F9", "Ctrl-Alt-F10"]:
self.bind_key(_("You can't use the key {0}.\n"
"Press a key to assign to the filter \"{1}\"\n\n"
""
"Press ESC to cancel.").format(_(key_name), self.selectedName))
return True
keysUsed = [(j, i) for (j, i) in config.config.items("Keys") if i == key_name]
if keysUsed:
self.bind_key(_("Can't bind. {0} is already used by {1}.\n"
"Press a key to assign to the filter \"{2}\"\n\n"
""
"Press ESC to cancel.").format(_(key_name), keysUsed[0][0], self.selectedName))
return True
filter_keys = [i for (i, j) in config.config.items("Filter Keys") if j == key_name]
if filter_keys:
self.bind_key(_("Can't bind. {0} is already used by the \"{1}\" filter.\n"
"Press a new key.\n\n"
""
"Press ESC to cancel.").format(_(key_name), filter_keys[0]))
return True
config.config.set("Filter Keys", self.selectedName.lower(), key_name)
config.save()
self.reload()
def _saveOptions(self):
"""Should never be called. Call filterchanged() or close() instead,
which will then call this.
:return:
"""
if self.filterOptionsPanel is not None:
options = {}
options.update(self.filterOptionsPanel.options)
options.pop("", "")
self.tool.savedOptions[self.selectedName] = options
@alertFilterException
def confirm(self):
if self._recording:
self._addMacroStep(self.selectedName, self.filterOptionsPanel.options)
else:
self.filterOptionsPanel.confirm(self.tool)
class FilterOperation(Operation):
def __init__(self, editor, level, box, filter, options):
super(FilterOperation, self).__init__(editor, level)
self.box = box
self.filter = filter
self.options = options
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
if recordUndo:
self.undoLevel = self.extractUndo(self.level, self.box)
self.filter.perform(self.level, BoundingBox(self.box), self.options)
self.canUndo = True
def dirtyBox(self):
return self.box
class MacroOperation(Operation):
def __init__(self, editor, level, box, filters, options):
super(MacroOperation, self).__init__(editor, level)
self._box = box
self.options = options
self.filters = filters
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
if recordUndo:
self.undoLevel = self.extractUndo(self.level, self._box)
for o, f in zip(self.options, self.filters):
f.perform(self.level, BoundingBox(self._box), o)
self.canUndo = True
def dirtyBox(self):
return self._box
class FilterTool(EditorTool):
tooltipText = "Filter"
toolIconName = "filter"
def __init__(self, editor):
EditorTool.__init__(self, editor)
self.filterModules = {}
self.savedOptions = {}
self.updatePanel = Panel(name='Panel.FilterTool.updatePanel')
updateButton = Button("Update Filters", action=self.updateFilters)
self.updatePanel.add(updateButton)
self.updatePanel.shrink_wrap()
self.updatePanel.bottomleft = self.editor.viewportContainer.bottomleft
@property
def statusText(self):
return "Choose a filter, then click Filter or press Enter to apply it."
def toolEnabled(self):
return not (self.selectionBox() is None)
def toolSelected(self):
self.showPanel()
@alertException
def showPanel(self):
self.panel = FilterToolPanel(self)
self.updatePanel.bottomleft = self.editor.viewportContainer.bottomleft
self.editor.add(self.updatePanel)
self.reloadFilters()
self.panel.reload()
height = self.editor.mainViewport.height - self.editor.toolbar.height
self.panel.centery = height / 2 + self.editor.subwidgets[0].height
self.panel.left = self.editor.left
self.editor.add(self.panel)
def hidePanel(self):
if self.panel is None:
return
self.panel.close()
if self.panel.parent:
self.panel.parent.remove(self.panel)
self.updatePanel.parent.remove(self.updatePanel)
self.panel = None
def updateFilters(self):
totalFilters = 0
updatedFilters = 0
filtersDir = directories.getFiltersDir()
try:
os.mkdir(os.path.join(filtersDir, "updates"))
except OSError:
pass
for module in self.filterModules.values():
totalFilters += 1
if hasattr(module, "UPDATE_URL") and hasattr(module, "VERSION"):
if isinstance(module.UPDATE_URL, (str, unicode)) and isinstance(module.VERSION, (str, unicode)):
# Pass on URL or network errors.
# This is a basic error hadling, need more refinement to sort errors...
update = True
try:
versionJSON = json.loads(urllib2.urlopen(module.UPDATE_URL).read())
except Exception, e:
update = False
log.warn(" Could not fetch source for %s. System said: %s"%(module.displayName, e))
if update and module.VERSION != versionJSON["Version"]:
urllib.urlretrieve(versionJSON["Download-URL"],
os.path.join(filtersDir, "updates", versionJSON["Name"]))
updatedFilters += 1
for f in os.listdir(os.path.join(filtersDir, "updates")):
shutil.copy(os.path.join(filtersDir, "updates", f), filtersDir)
shutil.rmtree(os.path.join(filtersDir, "updates"))
finishedUpdatingWidget = Widget()
lbl = Label("Updated %s filter(s) out of %s" % (updatedFilters, totalFilters))
closeBTN = Button("Close this message", action=finishedUpdatingWidget.dismiss)
col = Column((lbl, closeBTN))
finishedUpdatingWidget.bg_color = (0.0, 0.0, 0.6)
finishedUpdatingWidget.add(col)
finishedUpdatingWidget.shrink_wrap()
finishedUpdatingWidget.present()
def reloadFilters(self):
filterFiles = []
unicode_module_names = []
# Tracking stock and custom filters names in order to load correctly the translations.
stock_filters = []
cust_filters = []
def searchForFiltersInDir(searchFolder, stock=False):
for root, folders, files in os.walk(os.path.join(searchFolder), True):
filter_dir = os.path.basename(root)
if filter_dir.startswith('demo') or filter_dir.startswith('lib'):
continue
subFolderString = root.replace(searchFolder, "")
if subFolderString.endswith(os.sep):
subFolderString = subFolderString[:len(os.sep)]
if subFolderString.startswith(os.sep):
subFolderString = subFolderString[len(os.sep):]
if len(subFolderString) > 0:
subFolderString = "[" + subFolderString + "]"
try:
root = str(root)
if root not in sys.path:
sys.path.append(root)
except UnicodeEncodeError:
unicode_module_names.extend([filter_name for filter_name in files])
for possible_filter in files:
if possible_filter.endswith(".py"):
if stock:
stock_filters.append(possible_filter)
_stock = True
else:
cust_filters.append(possible_filter)
_stock = False
# Force the 'stock' parameter if the filter was found in the stock-filters directory
if possible_filter in stock_filters:
_stock = True
filterFiles.append((root, possible_filter, _stock, subFolderString))
# Search first for the stock filters.
searchForFiltersInDir(os.path.join(directories.getDataDir(), "stock-filters"), True)
searchForFiltersInDir(directories.getFiltersDir(), False)
filterModules = []
org_lang = albow.translate.lang
# If the path has unicode chars, there's no way of knowing what order to add the
# files to the sys.modules. To fix this, we keep trying to import until we import
# fail to import all leftover files.
shouldContinue = True
while shouldContinue:
shouldContinue = False
for f in filterFiles:
module = tryImport(f[0], f[1], org_lang, f[2], f[3], f[1] in unicode_module_names)
if module is None:
continue
filterModules.append(module)
filterFiles.remove(f)
shouldContinue |= True
displayNames = []
for m in filterModules:
while m.displayName in displayNames:
m.displayName += "_"
displayNames.append(m)
filterModules = filter(lambda mod: hasattr(mod, "perform"), filterModules)
self.filterModules = collections.OrderedDict(sorted(
[(FilterTool.moduleDisplayName(x), x) for x in filterModules],
key=lambda module_name: (module_name[0].lower(),
module_name[1])))
@staticmethod
def moduleDisplayName(module):
subFolderString = getattr(module, 'foldersForDisplayName', "")
subFolderString = subFolderString if len(subFolderString) < 1 else subFolderString + " "
name = getattr(module, "displayName", module.__name__)
return subFolderString + _(name[0].upper() + name[1:])
@property
def filterNames(self):
return [FilterTool.moduleDisplayName(module) for module in self.filterModules.itervalues()]
#-# WIP. Reworking on the filters translations.
#-# The 'new_method' variable is used to select the latest working code or the actual under development one.
#-# This variable must be on False when releasing unless the actual code is fully working.
new_method = True
def tryImport_old(_root, name, org_lang, stock=False, subFolderString="", unicode_name=False):
with open(os.path.join(_root, name)) as module_file:
module_name = name.split(os.path.sep)[-1].replace(".py", "")
try:
if unicode_name:
source_code = module_file.read()
module = imp.new_module(module_name)
exec (source_code, module.__dict__)
if module_name not in sys.modules.keys():
sys.modules[module_name] = module
else:
module = imp.load_source(module_name, os.path.join(_root, name), module_file)
module.foldersForDisplayName = subFolderString
if not (hasattr(module, 'displayName')):
module.displayName = module_name # Python is awesome
if not stock:
if "trn" in sys.modules.keys():
del sys.modules["trn"]
if "albow.translate" in sys.modules.keys():
del sys.modules["albow.translate"]
from albow import translate as trn
if directories.getFiltersDir() in name:
trn_path = os.path.split(name)[0]
else:
trn_path = directories.getFiltersDir()
trn_path = os.path.join(trn_path, subFolderString[1:-1], module_name)
module.trn = trn
if os.path.exists(trn_path):
module.trn.setLangPath(trn_path)
module.trn.buildTranslation(config.settings.langCode.get())
n = module.displayName
if hasattr(module, "trn"):
n = module.trn._(module.displayName)
if n == module.displayName:
n = _(module.displayName)
module.displayName = n
import albow.translate
albow.translate.lang = org_lang
return module
except Exception as e:
traceback.print_exc()
alert(_(u"Exception while importing filter module {}. " +
u"See console for details.\n\n{}").format(name, e))
return None
def tryImport_new(_root, name, org_lang, stock=False, subFolderString="", unicode_name=False):
with open(os.path.join(_root, name)) as module_file:
module_name = name.split(os.path.sep)[-1].replace(".py", "")
try:
if unicode_name:
source_code = module_file.read()
module = imp.new_module(module_name)
exec (source_code, module.__dict__)
if module_name not in sys.modules.keys():
sys.modules[module_name] = module
else:
module = imp.load_source(module_name, os.path.join(_root, name), module_file)
module.foldersForDisplayName = subFolderString
if not (hasattr(module, 'displayName')):
module.displayName = module_name # Python is awesome
if not stock:
# This work fine with custom filters, but the choice buttons are broken for the stock ones...
if directories.getFiltersDir() in name:
trn_path = os.path.split(name)[0]
else:
trn_path = directories.getFiltersDir()
trn_path = os.path.join(trn_path, subFolderString[1:-1], module_name)
if os.path.exists(trn_path):
albow.translate.buildTranslation(config.settings.langCode.get(), extend=True, langPath=trn_path)
# module.trn = albow.translate
module.displayName = _(module.displayName)
module.trn = albow.translate
return module
except Exception as e:
traceback.print_exc()
alert(_(u"Exception while importing filter module {}. " +
u"See console for details.\n\n{}").format(name, e))
return None
if new_method:
tryImport = tryImport_new
else:
tryImport = tryImport_old
|
|
#!/usr/bin/env python
from flask import Flask, request, redirect, session, url_for, render_template, Response, jsonify, make_response, send_from_directory
from flask.ext.assets import Environment, Bundle
from flask.ext.mail import Mail
import urllib
import urlparse
import json
import random
import base64
import re
import filters
import threading
import api
from stackblink import stackblink
from skymorph import skymorph
def import_sdss():
from sdss import sdss
pass
t1 = threading.Thread(target=import_sdss)
t1.start()
app = Flask(__name__)
mail = Mail(app)
filters.register_filters(app)
app.secret_key = 'not a secret key'
try:
import local_config
app.config['ASSETS_DEBUG'] = local_config.DEBUG
except ImportError:
pass
# bundling
assets = Environment(app)
# This filter can be helping for debugging javascript.
def noop_filter(_in, out, **kw):
out.write(_in.read())
# static files
@app.route('/sitemap.xml')
@app.route('/robots.txt')
def static_from_route():
return send_from_directory(app.static_folder, request.path[1:])
# main routes
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upcoming')
def upcoming():
return render_template('upcoming.html')
@app.route('/3d/')
def view_3d_slash():
return render_template('full3d.html', noop=noop_filter)
@app.route('/offline_3d')
def view_3d_offline():
pt_vars = {};
pt_vars['offline_mode'] = True
pt_vars['score_rankings'] = json.dumps(api.rankings('score', 4000, True), allow_nan=False)
pt_vars['value_rankings'] = json.dumps(api.rankings('value', 4000, True), allow_nan=False)
pt_vars['accessibility_rankings'] = json.dumps(api.rankings('accessibility', 4000, True), allow_nan=False)
pt_vars['smallest_rankings'] = json.dumps(api.rankings('smallest', 4000, True), allow_nan=False)
return render_template('full3d.html', noop=noop_filter, passthrough_vars=pt_vars, \
offline_mode=True)
@app.route('/3d/notsupported.html')
def notsupported_3d():
return render_template('notsupported.html')
@app.route('/asteroid-<asteroid_slug>')
def asteroid_details(asteroid_slug=None):
# slug is a slug of asteroid prov des
if not asteroid_slug:
return 'Sorry, could not find this asteroid in our database.', 404
unslug = asteroid_slug.replace('-', ' ')
# Need to get top 10, otherwise sometimes the best match is not returned by mongo.
candidates = api.autocomplete(unslug, 10) # TODO better way?
if len(candidates) < 1:
return 'Sorry, could not find this asteroid in our database.', 404
asteroid = candidates[0]
jpl_result = api.jpl_lookup(asteroid['prov_des'])
if 'spec' in asteroid:
composition_result = api.compositions()[asteroid['spec']]
else:
composition_result = []
return render_template('asteroid.html', asteroid=asteroid, jpl=jpl_result, composition=composition_result)
# General api routes
@app.route('/api/mpc')
def api_mpc():
try:
query = json.loads(request.args.get('query') or '{}')
limit = min(5000, int(request.args.get('limit') or 1000))
json_resp = json.dumps(api.mpc(query, limit))
return Response(json_resp, mimetype='application/json')
except Exception, e:
print str(e)
resp = jsonify({'error': 'bad request'})
resp.status_code = 500
return resp
@app.route('/api/kepler')
def api_kepler():
try:
query = json.loads(request.args.get('query'))
limit = min(1000, int(request.args.get('limit')))
json_resp = json.dumps(api.kepler(query, limit))
return Response(json_resp, mimetype='application/json')
except Exception, e:
print str(e)
resp = jsonify({'error': 'bad request'})
resp.status_code = 500
return resp
@app.route('/api/exoplanets')
def api_exoplanets():
try:
query = json.loads(request.args.get('query'))
limit = min(1000, int(request.args.get('limit')))
json_resp = json.dumps(api.exoplanets(query, limit))
return Response(json_resp, mimetype='application/json')
except ValueError:
resp = jsonify({'error': 'bad request'})
resp.status_code = 500
return resp
@app.route('/api/asterank')
def api_asterank():
try:
query = json.loads(request.args.get('query'))
limit = min(1000, int(request.args.get('limit')))
json_resp = json.dumps(api.asterank(query, limit))
return Response(json_resp, mimetype='application/json')
except Exception, e:
print str(e)
resp = jsonify({'error': 'bad request'})
resp.status_code = 500
return resp
@app.route('/api/rankings')
def rankings():
try:
limit = int(request.args.get('limit')) or 10
orbital_info_only = request.args.get('orbits_only')
results = api.rankings(request.args.get('sort_by'), limit, orbits_only=orbital_info_only)
json_resp = json.dumps(results)
return Response(json_resp, mimetype='application/json', headers={ \
'Cache-Control': 'max-age=432000', # 5 days
})
except Exception, e:
resp = jsonify({'error': 'bad request', 'details': str(e)})
resp.status_code = 500
return resp
@app.route('/api/autocomplete')
def autocomplete():
results = api.autocomplete(request.args.get('query'), 10)
json_resp = json.dumps(results)
return Response(json_resp, mimetype='application/json', headers={ \
'Cache-Control': 'max-age=432000', # 5 days
})
@app.route('/api/compositions')
def compositions():
json_resp = json.dumps(api.compositions())
return Response(json_resp, mimetype='application/json')
@app.route('/jpl/lookup')
def horizons():
query = request.args.get('query')
result = api.jpl_lookup(query)
if result:
json_resp = json.dumps(result)
return Response(json_resp, mimetype='application/json')
else:
return Response('{}', mimetype='application/json')
# Skymorph routes
@app.route('/api/skymorph/search')
def skymorph_search_target():
return jsonify({'results': skymorph.search_target(request.args.get('target'))})
@app.route('/api/skymorph/images_for')
def skymorph_images_for():
return jsonify({'images': skymorph.images_for(request.args.get('target'))})
@app.route('/api/skymorph/search_orbit')
def skymorph_search_orbit():
search_results = skymorph.search_ephem( \
request.args.get('epoch'),
request.args.get('ecc'),
request.args.get('per'),
request.args.get('per_date'),
request.args.get('om'),
request.args.get('w'),
request.args.get('i'),
request.args.get('H'),
)
ret = {'results': search_results}
return jsonify(ret)
@app.route('/api/skymorph/search_position')
def skymorph_search_time():
search_results = skymorph.search_position( \
request.args.get('ra'),
request.args.get('dec'),
request.args.get('time'),
)
ret = {'results': search_results}
return jsonify(ret)
@app.route('/api/skymorph/image')
def skymorph_image():
ret = skymorph.get_image(request.args.get('key'))
if type(ret) == dict:
return jsonify(ret)
else:
response = make_response(ret)
response.headers['Content-type'] = 'image/gif'
return response
@app.route('/api/skymorph/fast_image')
def skymorph_fast_image():
ret = skymorph.get_fast_image(request.args.get('key'))
if type(ret) == dict:
return jsonify(ret)
else:
response = make_response(ret)
response.headers['Content-type'] = 'image/png'
return response
# SDSS routes
@app.route('/api/sdss/get_unknown_group')
def sdss_unknown_group():
from sdss import sdss
json_resp = json.dumps(sdss.get_unknown_group())
return Response(json_resp, mimetype='application/json', headers={ \
'Cache-Control': 'no-cache',
})
@app.route('/api/sdss/image')
def sdss_image():
from sdss import sdss
ret = sdss.image_from_key(request.args.get('key'))
response = make_response(ret)
response.headers['Content-type'] = 'image/png'
return response
# Stack/blink Discover routes
@app.route('/discover')
def discover():
first_time = session.get('discover_first_time', True)
session['discover_first_time'] = False
return render_template('discover.html',
first_time=first_time,
image_count=stackblink.get_image_count(),
interesting_count=stackblink.get_interesting_count(),
user_count=stackblink.get_user_count(),
)
@app.route('/api/stackblink/get_neat_control_group')
def get_neat_control_group():
json_resp = json.dumps(stackblink.get_control_groups())
return Response(json_resp, mimetype='application/json', headers={ \
'Cache-Control': 'no-cache',
})
@app.route('/api/stackblink/get_sdss_unknown_group')
def get_sdss_unknown_group():
from sdss import sdss
json_resp = json.dumps(sdss.get_unknown_group())
return Response(json_resp, mimetype='application/json', headers={ \
'Cache-Control': 'no-cache',
})
@app.route('/api/stackblink/record', methods=['GET', 'POST'])
def stackblink_record():
postdata = json.loads(request.data)
json_resp = json.dumps(stackblink.record( \
postdata.get('email', None), \
postdata.get('keys', None), \
postdata.get('interesting', None), \
postdata.get('poor_quality', None)))
return Response(json_resp, mimetype='application/json', headers={ \
'Cache-Control': 'no-cache',
})
# Kepler
@app.route('/exoplanets')
@app.route('/kepler3d')
def kepler3d():
return render_template('kepler3d.html')
# User custom objects
@app.route('/api/user_objects', methods=['GET', 'POST'])
def user_objects():
if request.method == 'GET':
return jsonify({'results': api.retrieve_user_objects(300)}) # limit set to 300 objects for now
postdata = json.loads(request.data)
if 'object' not in postdata:
return jsonify({})
obj = postdata['object']
image_keys = postdata.get('keys', None)
return jsonify(api.insert_user_object(obj, image_keys))
# Other Pages
@app.route('/about', methods=['GET', 'POST'])
def about():
if request.method == 'GET':
return render_template('about.html')
else:
email = request.form.get('email', None)
feedback = request.form.get('feedback', None)
if not feedback or feedback.find('a href') > -1:
return 'Form rejected because you look like a spambot. Please email me directly.'
from flask.ext.mail import Message
msg = Message('Asterank Feedback',
sender='[email protected]',
recipients=['[email protected]'],
body='%s:\r\n%s' % (email, feedback))
mail.send(msg)
return render_template('about.html')
@app.route('/feedback')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/mpc')
def mpc():
return render_template('mpc.html')
@app.route('/kepler')
def kepler():
return render_template('kepler.html')
@app.route('/exoplanets')
def exoplanets():
return render_template('exoplanets.html')
@app.route('/neat')
def neat_docs():
return redirect('/skymorph')
@app.route('/skymorph')
def skymorph_docs():
return render_template('skymorph.html')
@app.route('/api')
def api_route():
return render_template('api.html')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', use_reloader=True, threaded=True)
|
|
# Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from .. import AWSHelperFn, AWSProperty, BaseAWSObject, encode_to_dict
from . import boolean, check_required, encoding
def validate_int_to_str(x):
"""
Backward compatibility - field was int and now str.
Property: WaitCondition.Timeout
"""
if isinstance(x, int):
return str(x)
if isinstance(x, str):
return str(int(x))
raise TypeError(f"Value {x} of type {type(x)} must be either int or str")
class AWSCustomObject(BaseAWSObject):
"""
Export:
"""
dictname = "Properties"
def validate_wait_condition(self):
"""
Class: WaitCondition
"""
if "CreationPolicy" in self.resource:
for k in self.props.keys():
if k in self.properties:
raise ValueError(
"Property %s cannot be specified with CreationPolicy" % k
)
else:
required = ["Handle", "Timeout"]
check_required(self.__class__.__name__, self.properties, required)
class Metadata(AWSHelperFn):
"""
Export:
"""
def __init__(self, *args):
self.data = args
def to_dict(self):
t = []
for i in self.data:
t += list(encode_to_dict(i).items())
return dict(t)
class InitFileContext(AWSHelperFn):
"""
Export:
"""
def __init__(self, data):
self.data = data
class InitFile(AWSProperty):
"""
Export:
"""
props = {
"content": (str, False),
"mode": (str, False),
"owner": (str, False),
"encoding": (encoding, False),
"group": (str, False),
"source": (str, False),
"authentication": (str, False),
"context": (InitFileContext, False),
}
class InitFiles(AWSHelperFn):
"""
Export:
"""
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
for k in data:
if not isinstance(data[k], InitFile):
raise ValueError("File '" + k + "' must be of type InitFile")
class InitService(AWSProperty):
"""
Export:
"""
props = {
"ensureRunning": (boolean, False),
"enabled": (boolean, False),
"files": (list, False),
"packages": (dict, False),
"sources": (list, False),
"commands": (list, False),
}
class InitServices(AWSHelperFn):
"""
Export:
"""
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
for k in data:
if not isinstance(data[k], InitService):
raise ValueError("Service '" + k + "' must be of type InitService")
class InitConfigSets(AWSHelperFn):
"""
Export:
"""
def __init__(self, **kwargs):
self.validate(dict(kwargs))
self.data = kwargs
def validate(self, config_sets):
for k, v in config_sets.items():
if not isinstance(v, list):
raise ValueError("configSets values must be of type list")
class InitConfig(AWSProperty):
"""
Export:
"""
props = {
"groups": (dict, False),
"users": (dict, False),
"sources": (dict, False),
"packages": (dict, False),
"files": (dict, False),
"commands": (dict, False),
"services": (dict, False),
}
def validate_authentication_type(auth_type):
valid_types = ["S3", "basic"]
if auth_type not in valid_types:
raise ValueError("Type needs to be one of %r" % valid_types)
return auth_type
class AuthenticationBlock(AWSProperty):
"""
Export:
"""
props = {
"accessKeyId": (str, False),
"buckets": ([str], False),
"password": (str, False),
"secretKey": (str, False),
"type": (validate_authentication_type, False),
"uris": ([str], False),
"username": (str, False),
"roleName": (str, False),
}
class Authentication(AWSHelperFn):
"""
Export:
"""
def __init__(self, data):
self.validate(data)
self.data = {"AWS::CloudFormation::Authentication": data}
def validate(self, data):
for k, v in data.items():
if not isinstance(v, AuthenticationBlock):
raise ValueError(
"authentication block must be of type"
" cloudformation.AuthenticationBlock"
)
class Init(AWSHelperFn):
"""
Export:
"""
def __init__(self, data, **kwargs):
self.validate(data, dict(kwargs))
if isinstance(data, InitConfigSets):
self.data = {
"AWS::CloudFormation::Init": dict({"configSets": data}, **kwargs)
}
else:
self.data = {"AWS::CloudFormation::Init": data}
def validate(self, data, config_sets):
if isinstance(data, InitConfigSets):
for k, v in sorted(config_sets.items()):
if not isinstance(v, InitConfig):
raise ValueError(
"init configs must of type ", "cloudformation.InitConfigSet"
)
else:
if "config" not in data:
raise ValueError("config property is required")
if not isinstance(data["config"], InitConfig):
raise ValueError(
"config property must be of type cloudformation.InitConfig"
)
|
|
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from pypowervm.utils import validation as vldn
from pypowervm.wrappers import base_partition as bp
from pypowervm.wrappers import logical_partition as lpar
from pypowervm.wrappers import managed_system as mgd_sys
class TestValidator(testtools.TestCase):
"""Unit tests for validation."""
def setUp(self):
super(TestValidator, self).setUp()
def _bld_mgd_sys(proc_units_avail=20.0, mem_free=32768,
system_name='default_sys_name',
max_procs_per_aix_linux_lpar=10,
max_sys_procs_limit=15,
max_vcpus_per_aix_linux_lpar=10,
max_sys_vcpus_limit=15,
dynamic_srr_capable=True):
# Build a fake managed system wrapper
mngd_sys = mock.MagicMock(spec=mgd_sys.System)
mngd_sys.system_name = system_name
mngd_sys.proc_units_avail = proc_units_avail
mngd_sys.memory_free = mem_free
mngd_sys.max_procs_per_aix_linux_lpar = (
max_procs_per_aix_linux_lpar)
mngd_sys.max_sys_procs_limit = max_sys_procs_limit
mngd_sys.max_vcpus_per_aix_linux_lpar = (
max_vcpus_per_aix_linux_lpar)
mngd_sys.max_sys_vcpus_limit = max_sys_vcpus_limit
mngd_sys.get_capability.return_value = dynamic_srr_capable
return mngd_sys
def _bld_lpar(proc_units=1.0, min_mem=512, des_mem=2048, max_mem=4096,
has_dedicated=False, name='default', rmc_state='active',
mem_dlpar=True, proc_dlpar=True, state='running',
env='AIX/Linux', proc_compat='Default', srr_enabled=True,
min_vcpus=1, des_vcpus=2, max_vcpus=4,
min_proc_units=0.1, max_proc_units=1.0, pool_id=None,
exp_factor=0.0, ame_enabled=False, ppt_ratio=None):
lpar_w = mock.MagicMock()
# name, states, env, etc.
lpar_w.name = name
lpar_w.state = state
lpar_w.rmc_state = rmc_state
lpar_w.env = env
lpar_w.proc_compat_mode = proc_compat
lpar_w.srr_enabled = srr_enabled
# Proc
lpar_w.proc_config.has_dedicated = has_dedicated
if has_dedicated:
lpar_w.proc_config.dedicated_proc_cfg.desired = proc_units
lpar_w.proc_config.dedicated_proc_cfg.max = max_vcpus
lpar_w.proc_config.dedicated_proc_cfg.min = min_vcpus
else:
lpar_w.proc_config.shared_proc_cfg.desired_units = proc_units
lpar_w.proc_config.shared_proc_cfg.desired_virtual = des_vcpus
lpar_w.proc_config.shared_proc_cfg.max_virtual = max_vcpus
lpar_w.proc_config.shared_proc_cfg.min_virtual = min_vcpus
lpar_w.proc_config.shared_proc_cfg.pool_id = (
pool_id if pool_id else 0)
lpar_w.proc_config.shared_proc_cfg.min_units = min_proc_units
lpar_w.proc_config.shared_proc_cfg.max_units = max_proc_units
# Mem
lpar_w.mem_config.desired = des_mem
lpar_w.mem_config.min = min_mem
lpar_w.mem_config.max = max_mem
lpar_w.mem_config.exp_factor = exp_factor
lpar_w.mem_config.ppt_ratio = ppt_ratio
# Can Modify
if (state != bp.LPARState.NOT_ACTIVATED
and rmc_state != bp.RMCState.ACTIVE):
lpar_w.can_modify_proc.return_value = (False, 'Bad RMC')
lpar_w.can_modify_mem.return_value = (False, 'Bad RMC')
else:
# Doesn't matter what the message is unless it's bad
# so always make it bad
lpar_w.can_modify_proc.return_value = (proc_dlpar,
'Bad proc DLPAR')
lpar_w.can_modify_mem.return_value = (mem_dlpar,
'Bad mem DLPAR')
mocked = mock.MagicMock(spec_set=lpar.LPAR, return_value=lpar_w)
return mocked()
self.mngd_sys = _bld_mgd_sys()
self.mngd_sys_no_dyn_srr = _bld_mgd_sys(dynamic_srr_capable=False)
self.lpar_21_procs = _bld_lpar(proc_units=21.0, name='lpar_21_procs')
self.lpar_1_proc = _bld_lpar()
self.lpar_11_vcpus = _bld_lpar(des_vcpus=11, name='11_vcpus')
self.lpar_16_max_vcpus = _bld_lpar(max_vcpus=16, name='16_max_vcpus')
self.lpar_1_proc_ded = _bld_lpar(has_dedicated=True, name='1_proc_ded')
self.lpar_11_proc_ded = _bld_lpar(proc_units=11, has_dedicated=True,
name='11_proc_ded')
self.lpar_16_proc_max_ded = _bld_lpar(max_vcpus=16, has_dedicated=True,
name='16_proc_max_ded')
self.lpar_21_proc_ded = _bld_lpar(proc_units=21, has_dedicated=True,
name='21_proc_ded')
self.lpar_no_rmc = _bld_lpar(rmc_state='inactive')
self.lpar_bad_mem_dlpar = _bld_lpar(mem_dlpar=False)
self.lpar_bad_proc_dlpar = _bld_lpar(proc_dlpar=False)
self.lpar_48g_mem = _bld_lpar(des_mem=48000, name='lpar_48g_mem')
self.lpar_1_min_vcpus = _bld_lpar(min_vcpus=1, name='1_min_vcpus')
self.lpar_2_min_vcpus = _bld_lpar(min_vcpus=2, name='2_min_vcpus')
self.lpar_1_min_proc_units = _bld_lpar(min_proc_units=0.1,
name='0.1_min_procs')
self.lpar_3_min_proc_units = _bld_lpar(min_proc_units=0.3,
name='0.3_min_procs')
self.lpar_6_max_proc_units = _bld_lpar(max_proc_units=0.6,
name='0.6_max_procs')
self.lpar_9_max_proc_units = _bld_lpar(max_proc_units=0.9,
name='0.9_max_procs')
self.lpar_6_max_vcpus = _bld_lpar(max_vcpus=6, name='6_max_vcpus')
self.lpar_8_max_vcpus = _bld_lpar(max_vcpus=8, name='8_max_vcpus')
self.lpar_512mb_min_mem = _bld_lpar(min_mem=512, name='512_min_mem')
self.lpar_1gb_min_mem = _bld_lpar(min_mem=1024, name='1gb_min_mem')
self.lpar_6g_max_mem = _bld_lpar(max_mem=6144, name='6gb_max_mem')
self.lpar_8g_max_mem = _bld_lpar(max_mem=8192, name='8gb_max_mem')
self.lpar_default_spp = _bld_lpar(pool_id=0, name='default_spp')
self.lpar_non_default_spp = _bld_lpar(pool_id=2,
name='non_default_spp')
self.lpar_power8_proc_compat = _bld_lpar(proc_compat="POWER8",
name='power8_compat_mode')
self.lpar_srr_disabled = _bld_lpar(srr_enabled=False,
name='srr_disabled')
self.lpar_1_proc_ded_inactive = _bld_lpar(has_dedicated=True,
name='1_proc_ded_inactive',
state='not activated')
self.lpar_22_procs = _bld_lpar(proc_units=22.0, name='lpar_22_procs')
self.lpar_4_proc_ded = _bld_lpar(proc_units=4.0,
has_dedicated=True, name='4_proc_ded')
self.lpar_22_proc_ded = _bld_lpar(proc_units=22, has_dedicated=True,
name='21_proc_ded')
self.lpar_4g_mem = _bld_lpar(des_mem=4096, name='4gb_mem')
self.lpar_6g_mem = _bld_lpar(des_mem=6144, name='6gb_mem')
self.lpar_1dot6_proc_units = _bld_lpar(proc_units=1.6,
name='1.6_procs')
self.lpar_2dot2_proc_units = _bld_lpar(proc_units=2.2,
name='2.2_procs')
self.lpar_1_vcpus = _bld_lpar(des_vcpus=1, name='lpar_1_vcpus')
self.lpar_not_activated = _bld_lpar(name='lpar_not_activated',
state='not activated')
self.lpar_running = _bld_lpar(name='lpar_running', state='running')
self.lpar_starting = _bld_lpar(name='lpar_starting', state='starting')
self.lpar_ame_2 = _bld_lpar(name='ame_2', exp_factor=2.0,
ame_enabled=True)
self.lpar_ame_3 = _bld_lpar(name='ame_3', exp_factor=3.0,
ame_enabled=True)
self.lpar_ppt_1 = _bld_lpar(name='ppt_1', ppt_ratio=4)
self.lpar_ppt_2 = _bld_lpar(name='ppt_2', ppt_ratio=2)
def test_validator(self):
# Test desired proc units > host avail proc units fails for shared
vldr = vldn.LPARWrapperValidator(self.lpar_21_procs, self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test desired proc units < host avail proc units passes for shared
vldn.LPARWrapperValidator(self.lpar_1_proc,
self.mngd_sys).validate_all()
# Test desired proc units > host avail proc units fails for dedicated
vldr = vldn.LPARWrapperValidator(self.lpar_21_proc_ded, self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test desired proc units < host avail proc units passes for dedicated
vldn.LPARWrapperValidator(self.lpar_1_proc_ded,
self.mngd_sys).validate_all()
# Test resize fails with inactive rmc
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys,
cur_lpar_w=self.lpar_no_rmc)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test resize fails with no mem dlpar
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys,
cur_lpar_w=self.lpar_bad_mem_dlpar)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test resize fails with no proc dlpar
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys,
cur_lpar_w=self.lpar_bad_proc_dlpar)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test dedicated procs > host max allowed procs per lpar fails
vldr = vldn.LPARWrapperValidator(self.lpar_11_proc_ded, self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test dedicated max procs > host max sys procs limit fails
vldr = vldn.LPARWrapperValidator(self.lpar_16_proc_max_ded,
self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test shared desired vcpus > host max allowed vcpus per lpar fails
vldr = vldn.LPARWrapperValidator(self.lpar_11_vcpus, self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test shared desired max vcpus > host max sys vcpus limit fails
vldr = vldn.LPARWrapperValidator(self.lpar_16_max_vcpus, self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test desired memory > host available memory fails
vldr = vldn.LPARWrapperValidator(self.lpar_48g_mem, self.mngd_sys)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing min vcpus fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_1_min_vcpus, self.mngd_sys,
cur_lpar_w=self.lpar_2_min_vcpus)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing max vcpus fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_8_max_vcpus, self.mngd_sys,
cur_lpar_w=self.lpar_6_max_vcpus)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing min proc units fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_3_min_proc_units,
self.mngd_sys,
cur_lpar_w=self.lpar_1_min_proc_units)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing max proc units fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_9_max_proc_units,
self.mngd_sys,
cur_lpar_w=self.lpar_6_max_proc_units)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing min memory fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_512mb_min_mem,
self.mngd_sys,
cur_lpar_w=self.lpar_1gb_min_mem)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing max memory fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_8g_max_mem, self.mngd_sys,
cur_lpar_w=self.lpar_6g_max_mem)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing AME expansion factor from 2 to 3 fails active resize
vldr = vldn.LPARWrapperValidator(self.lpar_ame_3, self.mngd_sys,
cur_lpar_w=self.lpar_ame_2)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test toggling AME fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_ame_2, self.mngd_sys,
cur_lpar_w=self.lpar_1_proc)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing PPT ratio fails during active resize
vldr = vldn.LPARWrapperValidator(self.lpar_ppt_1, self.mngd_sys,
cur_lpar_w=self.lpar_ppt_2)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test not changing PPT ratio passes during active resize
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys,
cur_lpar_w=self.lpar_ppt_2)
vldr.validate_all()
# Test resizing lpar from defaultSPP to non-defaultSPP passes
vldr = vldn.LPARWrapperValidator(self.lpar_non_default_spp,
self.mngd_sys,
cur_lpar_w=self.lpar_default_spp)
vldr.validate_all()
# Test resizing lpar from non-defaultSPP to defaultSPP passes
vldr = vldn.LPARWrapperValidator(self.lpar_default_spp,
self.mngd_sys,
cur_lpar_w=self.lpar_non_default_spp)
vldr.validate_all()
# Test changing from dedicated to non-defaultSPP passes
vldr = vldn.LPARWrapperValidator(self.lpar_non_default_spp,
self.mngd_sys,
self.lpar_1_proc_ded_inactive)
vldr.validate_all()
# Test changing processor mode (shared -> ded) fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc_ded,
self.mngd_sys,
cur_lpar_w=self.lpar_1_proc)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing processor mode (ded to shared) fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc,
self.mngd_sys,
cur_lpar_w=self.lpar_1_proc_ded)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing processor compatibility mode fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_power8_proc_compat,
self.mngd_sys,
cur_lpar_w=self.lpar_1_proc)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test changing SRR capabilty fails for active resize
vldr = vldn.LPARWrapperValidator(self.lpar_srr_disabled,
self.mngd_sys_no_dyn_srr,
cur_lpar_w=self.lpar_1_proc)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# ...unless dynamic_srr_capable
vldr = vldn.LPARWrapperValidator(self.lpar_srr_disabled,
self.mngd_sys,
cur_lpar_w=self.lpar_1_proc)
vldr.validate_all()
# Test desired delta proc units > host avail proc units fails
# during resize (shared -> shared)
vldr = vldn.LPARWrapperValidator(self.lpar_22_procs, self.mngd_sys,
cur_lpar_w=self.lpar_1_proc)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test desired delta proc units <= host avail proc units passes
# during resize (shared -> shared)
vldn.LPARWrapperValidator(self.lpar_21_procs,
self.mngd_sys,
cur_lpar_w=self.lpar_1_proc).validate_all()
# Test desired delta proc units > host avail proc units fails
# during resize (dedicated -> dedicated)
vldr = vldn.LPARWrapperValidator(self.lpar_22_proc_ded, self.mngd_sys,
cur_lpar_w=self.lpar_1_proc_ded)
self.assertRaises(vldn.ValidatorException, vldr.validate_all)
# Test desired delta proc units <= host avail proc units passes
# during resize (dedicated -> dedicated)
vldn.LPARWrapperValidator(self.lpar_4_proc_ded,
self.mngd_sys,
self.lpar_1_proc_ded).validate_all()
# Test resize delta mem
mem_vldr = vldn.MemValidator(self.lpar_6g_mem, self.mngd_sys,
cur_lpar_w=self.lpar_4g_mem)
mem_vldr._populate_new_values()
mem_vldr._populate_resize_diffs()
self.assertEqual(2048, mem_vldr.delta_des_mem,
'Incorrect resize delta memory calculation')
# Test resize delta procs
proc_vldr = vldn.ProcValidator(self.lpar_4_proc_ded, self.mngd_sys,
cur_lpar_w=self.lpar_1_proc_ded)
proc_vldr._populate_new_values()
proc_vldr._populate_resize_diffs()
self.assertEqual(3, proc_vldr.delta_des_vcpus,
'Incorrect resize delta proc calculation'
' in dedicated mode')
proc_vldr = vldn.ProcValidator(self.lpar_2dot2_proc_units,
self.mngd_sys,
cur_lpar_w=self.lpar_1dot6_proc_units)
proc_vldr._populate_new_values()
proc_vldr._populate_resize_diffs()
self.assertEqual(0.60, proc_vldr.delta_des_vcpus,
'Incorrect resize delta proc calculation in'
' shared mode')
proc_vldr = vldn.ProcValidator(self.lpar_1dot6_proc_units,
self.mngd_sys,
cur_lpar_w=self.lpar_1_proc_ded)
proc_vldr._populate_new_values()
proc_vldr._populate_resize_diffs()
self.assertEqual(0.60, proc_vldr.delta_des_vcpus,
'Incorrect delta proc calculation while resizing '
'from dedicated to shared mode')
proc_vldr = vldn.ProcValidator(self.lpar_4_proc_ded, self.mngd_sys,
cur_lpar_w=self.lpar_1dot6_proc_units)
proc_vldr._populate_new_values()
proc_vldr._populate_resize_diffs()
self.assertEqual(2.40, proc_vldr.delta_des_vcpus,
'Incorrect delta proc calculation while resizing '
'from shared to dedicated mode')
# Test resizing not activated state lpar makes inactive_resize_checks
with mock.patch('pypowervm.utils.validation.ProcValidator.'
'_validate_inactive_resize') as inactive_resize_checks:
proc_vldr = vldn.ProcValidator(self.lpar_not_activated,
self.mngd_sys,
cur_lpar_w=self.lpar_not_activated)
proc_vldr.validate()
self.assertTrue(inactive_resize_checks.called,
'Inactive resize validations not performed.')
# Test resizing running state lpar makes active_resize_checks
with mock.patch('pypowervm.utils.validation.ProcValidator.'
'_validate_active_resize') as active_resize_checks:
proc_vldr = vldn.ProcValidator(self.lpar_running, self.mngd_sys,
cur_lpar_w=self.lpar_running)
proc_vldr.validate()
self.assertTrue(active_resize_checks.called,
'Active resize validations not performed.')
# Test resizing starting state lpar makes active_resize_checks
with mock.patch('pypowervm.utils.validation.ProcValidator.'
'_validate_active_resize') as active_resize_checks:
proc_vldr = vldn.ProcValidator(self.lpar_starting, self.mngd_sys,
cur_lpar_w=self.lpar_starting)
proc_vldr.validate()
self.assertTrue(active_resize_checks.called,
'Active resize validations not performed.')
@mock.patch('pypowervm.utils.validation.ProcValidator.validate')
@mock.patch('pypowervm.utils.validation.MemValidator.validate')
def test_validator_check_dlpar(self, mem_val_validate, proc_val_validate):
vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys,
cur_lpar_w=self.lpar_no_rmc)
vldr.validate_all(check_dlpar=False)
mem_val_validate.assert_called_once_with(check_dlpar=False)
proc_val_validate.assert_called_once_with(check_dlpar=False)
mem_val_validate.reset_mock()
proc_val_validate.reset_mock()
vldr = vldn.LPARWrapperValidator(self.lpar_running, self.mngd_sys,
cur_lpar_w=self.lpar_running)
vldr.validate_all()
mem_val_validate.assert_called_once_with(check_dlpar=True)
proc_val_validate.assert_called_once_with(check_dlpar=True)
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore.client import ClientError
from collections import Counter
from concurrent.futures import as_completed
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
import logging
import itertools
import time
from c7n.actions import ActionRegistry, BaseAction, AutoTagUser
from c7n.filters import (
FilterRegistry, ValueFilter, AgeFilter, Filter, FilterValidationError,
OPERATORS)
from c7n.filters.offhours import OffHour, OnHour
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.tags import TagActionFilter, DEFAULT_TAG, TagCountFilter, TagTrim
from c7n.utils import (
local_session, query_instances, type_schema, chunks, get_retry)
log = logging.getLogger('custodian.asg')
filters = FilterRegistry('asg.filters')
actions = ActionRegistry('asg.actions')
filters.register('offhour', OffHour)
filters.register('onhour', OnHour)
filters.register('tag-count', TagCountFilter)
filters.register('marked-for-op', TagActionFilter)
actions.register('auto-tag-user', AutoTagUser)
@resources.register('asg')
class ASG(QueryResourceManager):
resource_type = "aws.autoscaling.autoScalingGroup"
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('ResourceInUse', 'Throttling',)))
class LaunchConfigFilterBase(object):
"""Mixin base class for querying asg launch configs."""
def initialize(self, asgs):
"""Get launch configs for the set of asgs"""
config_names = set()
skip = []
for a in asgs:
# Per https://github.com/capitalone/cloud-custodian/issues/143
if 'LaunchConfigurationName' not in a:
skip.append(a)
continue
config_names.add(a['LaunchConfigurationName'])
for a in skip:
asgs.remove(a)
self.configs = {}
self.log.debug(
"Querying launch configs for filter %s",
self.__class__.__name__)
config_manager = LaunchConfig(self.manager.ctx, {})
if len(asgs) < 20:
configs = config_manager.get_resources(
[asg['LaunchConfigurationName'] for asg in asgs])
else:
configs = config_manager.resources()
self.configs = {
cfg['LaunchConfigurationName']: cfg for cfg in configs}
@filters.register('security-group')
class SecurityGroupFilter(
net_filters.SecurityGroupFilter, LaunchConfigFilterBase):
RelatedIdsExpression = ""
def get_related_ids(self, resources):
group_ids = []
for asg in resources:
cfg = self.configs.get(asg['LaunchConfigurationName'])
group_ids.extend(cfg.get('SecurityGroups', ()))
return set(group_ids)
def process(self, asgs, event=None):
self.initialize(asgs)
return super(SecurityGroupFilter, self).process(asgs, event)
@filters.register('launch-config')
class LaunchConfigFilter(ValueFilter, LaunchConfigFilterBase):
"""Filter asg by launch config attributes."""
schema = type_schema(
'launch-config', rinherit=ValueFilter.schema)
config = None
def process(self, asgs, event=None):
self.initialize(asgs)
return super(LaunchConfigFilter, self).process(asgs, event)
def __call__(self, asg):
# Active launch configs can be deleted..
cfg = self.configs.get(asg['LaunchConfigurationName'])
return self.match(cfg)
class ConfigValidFilter(Filter, LaunchConfigFilterBase):
def validate(self):
if self.manager.data.get('mode'):
raise FilterValidationError(
"invalid-config makes too many queries to be run efficiently in lambda")
return self
def initialize(self, asgs):
super(ConfigValidFilter, self).initialize(asgs)
self.subnets = self.get_subnets()
self.security_groups = self.get_security_groups()
self.key_pairs = self.get_key_pairs()
self.elbs = self.get_elbs()
self.appelb_target_groups = self.get_appelb_target_groups()
self.snapshots = self.get_snapshots()
self.images = self.get_images()
def get_subnets(self):
from c7n.resources.vpc import Subnet
manager = Subnet(self.manager.ctx, {})
return set([s['SubnetId'] for s in manager.resources()])
def get_security_groups(self):
from c7n.resources.vpc import SecurityGroup
manager = SecurityGroup(self.manager.ctx, {})
return set([s['GroupId'] for s in manager.resources()])
def get_key_pairs(self):
from c7n.resources.vpc import KeyPair
manager = KeyPair(self.manager.ctx, {})
return set([k['KeyName'] for k in manager.resources()])
def get_elbs(self):
from c7n.resources.elb import ELB
manager = ELB(self.manager.ctx, {})
return set([e['LoadBalancerName'] for e in manager.resources()])
def get_appelb_target_groups(self):
from c7n.resources.appelb import AppELBTargetGroup
manager = AppELBTargetGroup(self.manager.ctx, {})
return set([a['TargetGroupArn'] for a in manager.resources()])
def get_images(self):
from c7n.resources.ami import AMI
manager = AMI(self.manager.ctx, {})
images = set()
# Verify image snapshot validity, i've been told by a TAM this
# is a possibility, but haven't seen evidence of it, since
# snapshots are strongly ref'd by amis, but its negible cost
# to verify.
for a in manager.resources():
found = True
for bd in a.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
if bd['Ebs']['SnapshotId'] not in self.snapshots:
found = False
break
if found:
images.add(a['ImageId'])
return images
def get_snapshots(self):
from c7n.resources.ebs import Snapshot
manager = Snapshot(self.manager.ctx, {})
return set([s['SnapshotId'] for s in manager.resources()])
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ConfigValidFilter, self).process(asgs, event)
def get_asg_errors(self, asg):
errors = []
subnets = asg.get('VPCZoneIdentifier', '').split(',')
for s in subnets:
if s not in self.subnets:
errors.append(('invalid-subnet', s))
for elb in asg['LoadBalancerNames']:
if elb not in self.elbs:
errors.append(('invalid-elb', elb))
for appelb_target_group in asg.get('TargetGroupARNs', []):
if appelb_target_group not in self.appelb_target_groups:
errors.append(('invalid-appelb-target-group', elb))
cfg_id = asg.get(
'LaunchConfigurationName', asg['AutoScalingGroupName'])
cfg = self.configs.get(cfg_id)
if cfg is None:
errors.append(('invalid-config', cfg_id))
self.log.debug(
"asg:%s no launch config found" % asg['AutoScalingGroupName'])
asg['Invalid'] = errors
return True
for sg in cfg['SecurityGroups']:
if sg not in self.security_groups:
errors.append(('invalid-security-group', sg))
if cfg['KeyName'] and cfg['KeyName'] not in self.key_pairs:
errors.append(('invalid-key-pair', cfg['KeyName']))
if cfg['ImageId'] not in self.images:
errors.append(('invalid-image', cfg['ImageId']))
for bd in cfg['BlockDeviceMappings']:
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
if bd['Ebs']['SnapshotId'] not in self.snapshots:
errors.append(('invalid-snapshot', bd['Ebs']['SnapshotId']))
return errors
@filters.register('valid')
class ValidConfigFilter(ConfigValidFilter):
"""Filters autoscale groups to find those that are structurally valid.
This operates as the inverse of the invalid filter for multi-step
workflows.
See details on the invalid filter for a list of checks made.
"""
schema = type_schema('valid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
return not bool(errors)
@filters.register('invalid')
class InvalidConfigFilter(ConfigValidFilter):
"""Filter autoscale groups to find those that are structurally invalid.
Structurally invalid means that the auto scale group will not be able
to launch an instance succesfully as the configuration has
- invalid subnets
- invalid security groups
- invalid key pair name
- invalid launch config volume snapshots
- invalid amis
- invalid health check elb (slower)
Internally this tries to reuse other resource managers for better
cache utilization.
"""
schema = type_schema('invalid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
if errors:
asg['Invalid'] = errors
return True
@filters.register('not-encrypted')
class NotEncryptedFilter(Filter, LaunchConfigFilterBase):
"""Check if an asg is configured to have unencrypted volumes.
Checks both the ami snapshots and the launch configuration.
"""
schema = type_schema('not-encrypted', exclude_image={'type': 'boolean'})
images = unencrypted_configs = unencrypted_images = None
def process(self, asgs, event=None):
self.initialize(asgs)
return super(NotEncryptedFilter, self).process(asgs, event)
def __call__(self, asg):
cfg = self.configs.get(asg['LaunchConfigurationName'])
if not cfg:
self.log.warning(
"ASG %s instances: %d has missing config: %s",
asg['AutoScalingGroupName'], len(asg['Instances']),
asg['LaunchConfigurationName'])
return False
unencrypted = []
if (not self.data.get('exclude_image')
and cfg['ImageId'] in self.unencrypted_images):
unencrypted.append('Image')
if cfg['LaunchConfigurationName'] in self.unencrypted_configs:
unencrypted.append('LaunchConfig')
if unencrypted:
asg['Unencrypted'] = unencrypted
return bool(unencrypted)
def initialize(self, asgs):
super(NotEncryptedFilter, self).initialize(asgs)
ec2 = local_session(self.manager.session_factory).client('ec2')
self.unencrypted_images = self.get_unencrypted_images(ec2)
self.unencrypted_configs = self.get_unencrypted_configs(ec2)
def _fetch_images(self, ec2, image_ids):
while True:
try:
return ec2.describe_images(ImageIds=list(image_ids))
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidAMIID.NotFound':
msg = e.response['Error']['Message']
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[")+2:msg.rfind("]'")].split(',')]
self.log.warning(
"asg:not-encrypted filter image not found %s",
e_ami_ids)
for e_ami_id in e_ami_ids:
image_ids.remove(e_ami_id)
continue
raise
def get_unencrypted_images(self, ec2):
"""retrieve images which have unencrypted snapshots referenced."""
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
self.log.debug("querying %d images", len(image_ids))
results = self._fetch_images(ec2, image_ids)
self.images = {i['ImageId']: i for i in results['Images']}
unencrypted_images = set()
for i in self.images.values():
for bd in i['BlockDeviceMappings']:
if 'Ebs' in bd and not bd['Ebs'].get('Encrypted'):
unencrypted_images.add(i['ImageId'])
break
return unencrypted_images
def get_unencrypted_configs(self, ec2):
"""retrieve configs that have unencrypted ebs voluems referenced."""
unencrypted_configs = set()
snaps = {}
for cid, c in self.configs.items():
image = self.images.get(c['ImageId'])
# image deregistered/unavailable
if image is not None:
image_block_devs = {
bd['DeviceName']: bd['Ebs']
for bd in image['BlockDeviceMappings'] if 'Ebs' in bd}
else:
image_block_devs = {}
for bd in c['BlockDeviceMappings']:
if 'Ebs' not in bd:
continue
# Launch configs can shadow image devices, images have
# precedence.
if bd['DeviceName'] in image_block_devs:
continue
if 'SnapshotId' in bd['Ebs']:
snaps.setdefault(
bd['Ebs']['SnapshotId'].strip(), []).append(cid)
elif not bd['Ebs'].get('Encrypted'):
unencrypted_configs.add(cid)
if not snaps:
return unencrypted_configs
self.log.debug("querying %d snapshots", len(snaps))
for s in self.get_snapshots(ec2, snaps.keys()):
if not s.get('Encrypted'):
unencrypted_configs.update(snaps[s['SnapshotId']])
return unencrypted_configs
def get_snapshots(self, ec2, snap_ids):
"""get snapshots corresponding to id, but tolerant of missing."""
while True:
try:
result = ec2.describe_snapshots(SnapshotIds=snap_ids)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
msg = e.response['Error']['Message']
e_snap_id = msg[msg.find("'")+1:msg.rfind("'")]
self.log.warning("Snapshot not found %s" % e_snap_id)
snap_ids.remove(e_snap_id)
continue
raise
else:
return result.get('Snapshots', ())
@filters.register('image-age')
class ImageAgeFilter(AgeFilter, LaunchConfigFilterBase):
"""Filter asg by image age."""
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'type': 'string', 'enum': OPERATORS.keys()},
days={'type': 'number'})
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ImageAgeFilter, self).process(asgs, event)
def initialize(self, asgs):
super(ImageAgeFilter, self).initialize(asgs)
image_ids = set()
for cfg in self.configs.values():
image_ids.add(cfg['ImageId'])
ec2 = local_session(self.manager.session_factory).client('ec2')
results = ec2.describe_images(ImageIds=list(image_ids))
self.images = {i['ImageId']: i for i in results['Images']}
def get_resource_date(self, i):
cfg = self.configs[i['LaunchConfigurationName']]
ami = self.images[cfg['ImageId']]
return parse(ami[self.date_attribute])
@filters.register('vpc-id')
class VpcIdFilter(ValueFilter):
schema = type_schema(
'vpc-id', rinherit=ValueFilter.schema)
schema['properties'].pop('key')
def __init__(self, data, manager=None):
super(VpcIdFilter, self).__init__(data, manager)
self.data['key'] = 'VpcId'
def process(self, asgs, event=None):
subnets = {}
for a in asgs:
subnet_ids = a.get('VPCZoneIdentifier', '')
if not subnet_ids:
continue
subnets.setdefault(subnet_ids.split(',')[0], []).append(a)
session = local_session(self.manager.session_factory)
ec2 = session.client('ec2')
# Invalid subnets on asgs happen, so query all
all_subnets = {s['SubnetId']: s for s in ec2.describe_subnets()[
'Subnets']}
for s, s_asgs in subnets.items():
if s not in all_subnets:
self.log.warning(
"invalid subnet %s for asgs: %s",
s, [a['AutoScalingGroupName'] for a in s_asgs])
continue
for a in s_asgs:
a['VpcId'] = all_subnets[s]['VpcId']
return super(VpcIdFilter, self).process(asgs)
@actions.register('tag-trim')
class GroupTagTrim(TagTrim):
max_tag_count = 10
def process_tag_removal(self, resource, candidates):
client = local_session(
self.manager.session_factory).client('autoscaling')
tags = []
for t in candidates:
tags.append(
dict(Key=t, ResourceType='auto-scaling-group',
ResourceId=resource['AutoScalingGroupName']))
client.delete_tags(Tags=tags)
@filters.register('capacity-delta')
class CapacityDelta(Filter):
schema = type_schema('size-delta')
def process(self, asgs, event=None):
return [a for a in asgs
if len(a['Instances']) < a['DesiredCapacity'] or
len(a['Instances']) < a['MinSize']]
@actions.register('resize')
class Resize(BaseAction):
schema = type_schema(
'resize',
# min_size={'type': 'string'},
# max_size={'type': 'string'},
desired_size={'type': 'string'},
required=('desired_size',))
def validate(self):
if self.data['desired_size'] != 'current':
raise FilterValidationError(
"only resizing desired/min to current capacity is supported")
return self
def process(self, asgs):
client = local_session(self.manager.session_factory).client(
'autoscaling')
for a in asgs:
current_size = len(a['Instances'])
min_size = a['MinSize']
desired = a['DesiredCapacity']
log.debug('desired %d to %s, min %d to %d',
desired, current_size, min_size, current_size)
self.manager.retry(
client.update_auto_scaling_group,
AutoScalingGroupName=a['AutoScalingGroupName'],
DesiredCapacity=min((current_size, desired)),
MinSize=min((current_size, min_size)))
@actions.register('remove-tag')
@actions.register('untag')
@actions.register('unmark')
class RemoveTag(BaseAction):
schema = type_schema(
'remove-tag',
aliases=('untag', 'unmark'),
key={'type': 'string'})
batch_size = 1
def process(self, asgs):
error = False
key = self.data.get('key', DEFAULT_TAG)
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(self.process_asg_set, asg_set, key)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
error = f.exception()
self.log.exception(
"Exception untagging asg:%s tag:%s error:%s" % (
", ".join([a['AutoScalingGroupName']
for a in asg_set]),
self.data.get('key', DEFAULT_TAG),
f.exception()))
if error:
raise error
def process_asg_set(self, asgs, key):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
tags = [dict(
Key=key, ResourceType='auto-scaling-group',
ResourceId=a['AutoScalingGroupName']) for a in asgs]
self.manager.retry(client.delete_tags, Tags=tags)
@actions.register('tag')
@actions.register('mark')
class Tag(BaseAction):
schema = type_schema(
'tag',
key={'type': 'string'},
value={'type': 'string'},
# Backwards compatibility
tag={'type': 'string'},
msg={'type': 'string'},
propagate={'type': 'boolean'})
batch_size = 1
def process(self, asgs):
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
value = self.data.get(
'value', self.data.get(
'msg', 'AutoScaleGroup does not meet policy guidelines'))
return self.tag(asgs, key, value)
def tag(self, asgs, key, value):
error = None
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(
self.process_asg_set, asg_set, key, value)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
error = f.exception()
self.log.exception(
"Exception untagging tag:%s error:%s asg:%s" % (
self.data.get('key', DEFAULT_TAG),
f.exception(),
", ".join([a['AutoScalingGroupName']
for a in asg_set])))
if error:
raise error
def process_asg_set(self, asgs, key, value):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
propagate = self.data.get('propagate_launch', True)
tags = [
dict(Key=key, ResourceType='auto-scaling-group', Value=value,
PropagateAtLaunch=propagate,
ResourceId=a['AutoScalingGroupName']) for a in asgs]
self.manager.retry(client.create_or_update_tags, Tags=tags)
@actions.register('propagate-tags')
class PropagateTags(BaseAction):
"""Propagate tags to an asg instances.
In AWS changing an asg tag does not propagate to instances.
This action exists to do that, and can also trim older tags
not present on the asg anymore that are present on instances.
"""
schema = type_schema(
'propagate-tags',
tags={'type': 'array', 'items': {'type': 'string'}},
trim={'type': 'boolean'})
def validate(self):
if not isinstance(self.data.get('tags', []), (list, tuple)):
raise ValueError("No tags specified")
return self
def process(self, asgs):
if not asgs:
return
if self.data.get('trim', False):
self.instance_map = self.get_instance_map(asgs)
with self.executor_factory(max_workers=10) as w:
instance_count = sum(list(w.map(self.process_asg, asgs)))
self.log.info("Applied tags to %d instances" % instance_count)
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
tag_map = {t['Key']: t['Value'] for t in asg.get('Tags', [])
if t['PropagateAtLaunch']
and not t['Key'].startswith('aws:')}
if self.data.get('tags'):
tag_map = {
k: v for k, v in tag_map.items()
if k in self.data['tags']}
tag_set = set(tag_map)
if self.data.get('trim', False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun:
client.create_tags(
Resources=instance_ids,
Tags=[{'Key': k, 'Value': v} for k, v in tag_map.items()])
return len(instance_ids)
def prune_instance_tags(self, client, asg, tag_set, instances):
"""Remove tags present on all asg instances which are not present
on the asg.
"""
instance_tags = Counter()
instance_count = len(instances)
remove_tags = []
extra_tags = []
for i in instances:
instance_tags.update([
t['Key'] for t in i['Tags']
if not t['Key'].startswith('aws:')])
for k, v in instance_tags.items():
if not v >= instance_count:
extra_tags.append(k)
continue
if k not in tag_set:
remove_tags.append(k)
if remove_tags:
log.debug("Pruning asg:%s instances:%d of old tags: %s" % (
asg['AutoScalingGroupName'], instance_count, remove_tags))
if extra_tags:
log.debug("Asg: %s has uneven tags population: %s" % (
asg['AutoScalingGroupName'], instance_tags))
# Remove orphan tags
remove_tags.extend(extra_tags)
if not self.manager.config.dryrun:
client.delete_tags(
Resources=[i['InstanceId'] for i in instances],
Tags=[{'Key': t} for t in remove_tags])
def get_instance_map(self, asgs):
instance_ids = [
i['InstanceId'] for i in
list(itertools.chain(*[
g['Instances']
for g in asgs if g['Instances']]))]
if not instance_ids:
return {}
instances = query_instances(
local_session(self.manager.session_factory),
InstanceIds=instance_ids)
return {i['InstanceId']: i for i in instances}
@actions.register('rename-tag')
class RenameTag(BaseAction):
"""Rename a tag on an AutoScaleGroup.
"""
schema = type_schema(
'rename-tag', required=['source', 'dest'],
propagate={'type': 'boolean'},
source={'type': 'string'},
dest={'type': 'string'})
def process(self, asgs):
source = self.data.get('source')
dest = self.data.get('dest')
count = len(asgs)
filtered = []
for a in asgs:
for t in a.get('Tags'):
if t['Key'] == source:
filtered.append(a)
break
asgs = filtered
self.log.info("Filtered from %d asgs to %d" % (
count, len(asgs)))
self.log.info("Renaming %s to %s on %d asgs" % (
source, dest, len(filtered)))
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Move source tag to destination tag.
Check tag count on asg
Create new tag tag
Delete old tag
Check tag count on instance
Create new tag
Delete old tag
"""
source_tag = self.data.get('source')
tag_map = {t['Key']: t for t in asg.get('Tags', [])}
source = tag_map[source_tag]
destination_tag = self.data.get('dest')
propagate = self.data.get('propagate', True)
client = local_session(
self.manager.session_factory).client('autoscaling')
# technically safer to create first, but running into
# max tags constraints, otherwise.
#
# delete_first = len([t for t in tag_map if not t.startswith('aws:')])
client.delete_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'Key': source_tag,
'Value': source['Value']}])
client.create_or_update_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'PropagateAtLaunch': propagate,
'Key': destination_tag,
'Value': source['Value']}])
self.propogate_instance_tag(source, destination_tag, asg)
def propogate_instance_tag(self, source, destination_tag, asg):
client = local_session(self.manager.session_factory).client('ec2')
client.delete_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{"Key": source['Key']}])
client.create_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{'Key': source['Key'], 'Value': source['Value']}])
@actions.register('mark-for-op')
class MarkForOp(Tag):
schema = type_schema(
'mark-for-op',
op={'enum': ['suspend', 'resume', 'delete']},
key={'type': 'string'},
tag={'type': 'string'},
message={'type': 'string'},
days={'type': 'number', 'minimum': 0})
default_template = (
'AutoScaleGroup does not meet org policy: {op}@{action_date}')
def process(self, asgs):
msg_tmpl = self.data.get('message', self.default_template)
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
op = self.data.get('op', 'suspend')
date = self.data.get('days', 4)
n = datetime.now(tz=tzutc())
stop_date = n + timedelta(days=date)
try:
msg = msg_tmpl.format(
op=op, action_date=stop_date.strftime('%Y/%m/%d'))
except Exception:
self.log.warning("invalid template %s" % msg_tmpl)
msg = self.default_template.format(
op=op, action_date=stop_date.strftime('%Y/%m/%d'))
self.log.info("Tagging %d asgs for %s on %s" % (
len(asgs), op, stop_date.strftime('%Y/%m/%d')))
self.tag(asgs, key, msg)
@actions.register('suspend')
class Suspend(BaseAction):
schema = type_schema('suspend')
def process(self, asgs):
original_count = len(asgs)
asgs = [a for a in asgs if a['Instances']]
self.log.debug("Filtered from %d to %d asgs with instances" % (
original_count, len(asgs)))
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Multistep process to stop an asg aprori of setup
- suspend processes
- stop instances
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
self.manager.retry(
asg_client.suspend_processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
ec2_client = session.client('ec2')
try:
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.stop_instances, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] in (
'InvalidInstanceID.NotFound',
'IncorrectInstanceState'):
log.warning("Erroring stopping asg instances %s %s" % (
asg['AutoScalingGroupName'], e))
return
raise
@actions.register('resume')
class Resume(BaseAction):
"""Resume a suspended autoscale group and its instances
"""
schema = type_schema('resume', delay={'type': 'number'})
def process(self, asgs):
original_count = len(asgs)
asgs = [a for a in asgs if a['SuspendedProcesses']]
self.delay = self.data.get('delay', 30)
self.log.debug("Filtered from %d to %d suspended asgs" % (
original_count, len(asgs)))
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg_instances, a)] = a
for f in as_completed(futures):
if f.exception():
log.error("Traceback resume asg:%s instances error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
continue
log.debug("Sleeping for asg health check grace")
time.sleep(self.delay)
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg, a)] = a
for f in as_completed(futures):
if f.exception():
log.error("Traceback resume asg:%s error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
def resume_asg_instances(self, asg):
"""Resume asg instances.
"""
session = local_session(self.manager.session_factory)
ec2_client = session.client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
ec2_client.start_instances(InstanceIds=instance_ids)
def resume_asg(self, asg):
"""Resume asg processes.
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
self.manager.retry(
asg_client.resume_processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
@actions.register('delete')
class Delete(BaseAction):
schema = type_schema('delete', force={'type': 'boolean'})
def process(self, asgs):
with self.executor_factory(max_workers=5) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
force_delete = self.data.get('force', False)
if force_delete:
log.info('Forcing deletion of Auto Scaling group %s' % (
asg['AutoScalingGroupName']))
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
try:
asg_client.delete_auto_scaling_group(
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
log.warning("Erroring deleting asg %s %s" % (
asg['AutoScalingGroupName'], e))
return
raise
@resources.register('launch-config')
class LaunchConfig(QueryResourceManager):
resource_type = "aws.autoscaling.launchConfigurationName"
def augment(self, resources):
for r in resources:
r.pop('UserData', None)
return resources
@LaunchConfig.filter_registry.register('age')
class LaunchConfigAge(AgeFilter):
date_attribute = "CreatedTime"
schema = type_schema(
'age',
op={'type': 'string', 'enum': OPERATORS.keys()},
days={'type': 'number'})
@LaunchConfig.filter_registry.register('unused')
class UnusedLaunchConfig(Filter):
schema = type_schema('unused')
def process(self, configs, event=None):
asgs = self.manager._cache.get(
{'region': self.manager.config.region,
'resource': 'asg'})
if asgs is None:
self.log.debug(
"Querying asgs to determine unused launch configs")
asg_manager = ASG(self.manager.ctx, {})
asgs = asg_manager.resources()
self.used = set([
a.get('LaunchConfigurationName', a['AutoScalingGroupName'])
for a in asgs])
return super(UnusedLaunchConfig, self).process(configs)
def __call__(self, config):
return config['LaunchConfigurationName'] not in self.used
@LaunchConfig.action_registry.register('delete')
class LaunchConfigDelete(BaseAction):
schema = type_schema('delete')
def process(self, configs):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_config, configs))
def process_config(self, config):
session = local_session(self.manager.session_factory)
client = session.client('autoscaling')
try:
client.delete_launch_configuration(
LaunchConfigurationName=config[
'LaunchConfigurationName'])
except ClientError as e:
# Catch already deleted
if e.response['Error']['Code'] == 'ValidationError':
return
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PodSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, active_deadline_seconds=None, affinity=None, automount_service_account_token=None, containers=None, dns_policy=None, host_aliases=None, host_ipc=None, host_network=None, host_pid=None, hostname=None, image_pull_secrets=None, init_containers=None, node_name=None, node_selector=None, restart_policy=None, scheduler_name=None, security_context=None, service_account=None, service_account_name=None, subdomain=None, termination_grace_period_seconds=None, tolerations=None, volumes=None):
"""
V1PodSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'active_deadline_seconds': 'int',
'affinity': 'V1Affinity',
'automount_service_account_token': 'bool',
'containers': 'list[V1Container]',
'dns_policy': 'str',
'host_aliases': 'list[V1HostAlias]',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'hostname': 'str',
'image_pull_secrets': 'list[V1LocalObjectReference]',
'init_containers': 'list[V1Container]',
'node_name': 'str',
'node_selector': 'dict(str, str)',
'restart_policy': 'str',
'scheduler_name': 'str',
'security_context': 'V1PodSecurityContext',
'service_account': 'str',
'service_account_name': 'str',
'subdomain': 'str',
'termination_grace_period_seconds': 'int',
'tolerations': 'list[V1Toleration]',
'volumes': 'list[V1Volume]'
}
self.attribute_map = {
'active_deadline_seconds': 'activeDeadlineSeconds',
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'containers': 'containers',
'dns_policy': 'dnsPolicy',
'host_aliases': 'hostAliases',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'hostname': 'hostname',
'image_pull_secrets': 'imagePullSecrets',
'init_containers': 'initContainers',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'restart_policy': 'restartPolicy',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'service_account': 'serviceAccount',
'service_account_name': 'serviceAccountName',
'subdomain': 'subdomain',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'tolerations': 'tolerations',
'volumes': 'volumes'
}
self._active_deadline_seconds = active_deadline_seconds
self._affinity = affinity
self._automount_service_account_token = automount_service_account_token
self._containers = containers
self._dns_policy = dns_policy
self._host_aliases = host_aliases
self._host_ipc = host_ipc
self._host_network = host_network
self._host_pid = host_pid
self._hostname = hostname
self._image_pull_secrets = image_pull_secrets
self._init_containers = init_containers
self._node_name = node_name
self._node_selector = node_selector
self._restart_policy = restart_policy
self._scheduler_name = scheduler_name
self._security_context = security_context
self._service_account = service_account
self._service_account_name = service_account_name
self._subdomain = subdomain
self._termination_grace_period_seconds = termination_grace_period_seconds
self._tolerations = tolerations
self._volumes = volumes
@property
def active_deadline_seconds(self):
"""
Gets the active_deadline_seconds of this V1PodSpec.
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
:return: The active_deadline_seconds of this V1PodSpec.
:rtype: int
"""
return self._active_deadline_seconds
@active_deadline_seconds.setter
def active_deadline_seconds(self, active_deadline_seconds):
"""
Sets the active_deadline_seconds of this V1PodSpec.
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
:param active_deadline_seconds: The active_deadline_seconds of this V1PodSpec.
:type: int
"""
self._active_deadline_seconds = active_deadline_seconds
@property
def affinity(self):
"""
Gets the affinity of this V1PodSpec.
If specified, the pod's scheduling constraints
:return: The affinity of this V1PodSpec.
:rtype: V1Affinity
"""
return self._affinity
@affinity.setter
def affinity(self, affinity):
"""
Sets the affinity of this V1PodSpec.
If specified, the pod's scheduling constraints
:param affinity: The affinity of this V1PodSpec.
:type: V1Affinity
"""
self._affinity = affinity
@property
def automount_service_account_token(self):
"""
Gets the automount_service_account_token of this V1PodSpec.
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
:return: The automount_service_account_token of this V1PodSpec.
:rtype: bool
"""
return self._automount_service_account_token
@automount_service_account_token.setter
def automount_service_account_token(self, automount_service_account_token):
"""
Sets the automount_service_account_token of this V1PodSpec.
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
:param automount_service_account_token: The automount_service_account_token of this V1PodSpec.
:type: bool
"""
self._automount_service_account_token = automount_service_account_token
@property
def containers(self):
"""
Gets the containers of this V1PodSpec.
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.
:return: The containers of this V1PodSpec.
:rtype: list[V1Container]
"""
return self._containers
@containers.setter
def containers(self, containers):
"""
Sets the containers of this V1PodSpec.
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.
:param containers: The containers of this V1PodSpec.
:type: list[V1Container]
"""
if containers is None:
raise ValueError("Invalid value for `containers`, must not be `None`")
self._containers = containers
@property
def dns_policy(self):
"""
Gets the dns_policy of this V1PodSpec.
Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
:return: The dns_policy of this V1PodSpec.
:rtype: str
"""
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
"""
Sets the dns_policy of this V1PodSpec.
Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
:param dns_policy: The dns_policy of this V1PodSpec.
:type: str
"""
self._dns_policy = dns_policy
@property
def host_aliases(self):
"""
Gets the host_aliases of this V1PodSpec.
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.
:return: The host_aliases of this V1PodSpec.
:rtype: list[V1HostAlias]
"""
return self._host_aliases
@host_aliases.setter
def host_aliases(self, host_aliases):
"""
Sets the host_aliases of this V1PodSpec.
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.
:param host_aliases: The host_aliases of this V1PodSpec.
:type: list[V1HostAlias]
"""
self._host_aliases = host_aliases
@property
def host_ipc(self):
"""
Gets the host_ipc of this V1PodSpec.
Use the host's ipc namespace. Optional: Default to false.
:return: The host_ipc of this V1PodSpec.
:rtype: bool
"""
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
"""
Sets the host_ipc of this V1PodSpec.
Use the host's ipc namespace. Optional: Default to false.
:param host_ipc: The host_ipc of this V1PodSpec.
:type: bool
"""
self._host_ipc = host_ipc
@property
def host_network(self):
"""
Gets the host_network of this V1PodSpec.
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.
:return: The host_network of this V1PodSpec.
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""
Sets the host_network of this V1PodSpec.
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.
:param host_network: The host_network of this V1PodSpec.
:type: bool
"""
self._host_network = host_network
@property
def host_pid(self):
"""
Gets the host_pid of this V1PodSpec.
Use the host's pid namespace. Optional: Default to false.
:return: The host_pid of this V1PodSpec.
:rtype: bool
"""
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
"""
Sets the host_pid of this V1PodSpec.
Use the host's pid namespace. Optional: Default to false.
:param host_pid: The host_pid of this V1PodSpec.
:type: bool
"""
self._host_pid = host_pid
@property
def hostname(self):
"""
Gets the hostname of this V1PodSpec.
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
:return: The hostname of this V1PodSpec.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1PodSpec.
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
:param hostname: The hostname of this V1PodSpec.
:type: str
"""
self._hostname = hostname
@property
def image_pull_secrets(self):
"""
Gets the image_pull_secrets of this V1PodSpec.
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
:return: The image_pull_secrets of this V1PodSpec.
:rtype: list[V1LocalObjectReference]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""
Sets the image_pull_secrets of this V1PodSpec.
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
:param image_pull_secrets: The image_pull_secrets of this V1PodSpec.
:type: list[V1LocalObjectReference]
"""
self._image_pull_secrets = image_pull_secrets
@property
def init_containers(self):
"""
Gets the init_containers of this V1PodSpec.
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
:return: The init_containers of this V1PodSpec.
:rtype: list[V1Container]
"""
return self._init_containers
@init_containers.setter
def init_containers(self, init_containers):
"""
Sets the init_containers of this V1PodSpec.
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
:param init_containers: The init_containers of this V1PodSpec.
:type: list[V1Container]
"""
self._init_containers = init_containers
@property
def node_name(self):
"""
Gets the node_name of this V1PodSpec.
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
:return: The node_name of this V1PodSpec.
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""
Sets the node_name of this V1PodSpec.
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
:param node_name: The node_name of this V1PodSpec.
:type: str
"""
self._node_name = node_name
@property
def node_selector(self):
"""
Gets the node_selector of this V1PodSpec.
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
:return: The node_selector of this V1PodSpec.
:rtype: dict(str, str)
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""
Sets the node_selector of this V1PodSpec.
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
:param node_selector: The node_selector of this V1PodSpec.
:type: dict(str, str)
"""
self._node_selector = node_selector
@property
def restart_policy(self):
"""
Gets the restart_policy of this V1PodSpec.
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
:return: The restart_policy of this V1PodSpec.
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""
Sets the restart_policy of this V1PodSpec.
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
:param restart_policy: The restart_policy of this V1PodSpec.
:type: str
"""
self._restart_policy = restart_policy
@property
def scheduler_name(self):
"""
Gets the scheduler_name of this V1PodSpec.
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
:return: The scheduler_name of this V1PodSpec.
:rtype: str
"""
return self._scheduler_name
@scheduler_name.setter
def scheduler_name(self, scheduler_name):
"""
Sets the scheduler_name of this V1PodSpec.
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
:param scheduler_name: The scheduler_name of this V1PodSpec.
:type: str
"""
self._scheduler_name = scheduler_name
@property
def security_context(self):
"""
Gets the security_context of this V1PodSpec.
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:return: The security_context of this V1PodSpec.
:rtype: V1PodSecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""
Sets the security_context of this V1PodSpec.
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:param security_context: The security_context of this V1PodSpec.
:type: V1PodSecurityContext
"""
self._security_context = security_context
@property
def service_account(self):
"""
Gets the service_account of this V1PodSpec.
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.
:return: The service_account of this V1PodSpec.
:rtype: str
"""
return self._service_account
@service_account.setter
def service_account(self, service_account):
"""
Sets the service_account of this V1PodSpec.
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.
:param service_account: The service_account of this V1PodSpec.
:type: str
"""
self._service_account = service_account
@property
def service_account_name(self):
"""
Gets the service_account_name of this V1PodSpec.
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
:return: The service_account_name of this V1PodSpec.
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""
Sets the service_account_name of this V1PodSpec.
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
:param service_account_name: The service_account_name of this V1PodSpec.
:type: str
"""
self._service_account_name = service_account_name
@property
def subdomain(self):
"""
Gets the subdomain of this V1PodSpec.
If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.
:return: The subdomain of this V1PodSpec.
:rtype: str
"""
return self._subdomain
@subdomain.setter
def subdomain(self, subdomain):
"""
Sets the subdomain of this V1PodSpec.
If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.
:param subdomain: The subdomain of this V1PodSpec.
:type: str
"""
self._subdomain = subdomain
@property
def termination_grace_period_seconds(self):
"""
Gets the termination_grace_period_seconds of this V1PodSpec.
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.
:return: The termination_grace_period_seconds of this V1PodSpec.
:rtype: int
"""
return self._termination_grace_period_seconds
@termination_grace_period_seconds.setter
def termination_grace_period_seconds(self, termination_grace_period_seconds):
"""
Sets the termination_grace_period_seconds of this V1PodSpec.
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.
:param termination_grace_period_seconds: The termination_grace_period_seconds of this V1PodSpec.
:type: int
"""
self._termination_grace_period_seconds = termination_grace_period_seconds
@property
def tolerations(self):
"""
Gets the tolerations of this V1PodSpec.
If specified, the pod's tolerations.
:return: The tolerations of this V1PodSpec.
:rtype: list[V1Toleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""
Sets the tolerations of this V1PodSpec.
If specified, the pod's tolerations.
:param tolerations: The tolerations of this V1PodSpec.
:type: list[V1Toleration]
"""
self._tolerations = tolerations
@property
def volumes(self):
"""
Gets the volumes of this V1PodSpec.
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes
:return: The volumes of this V1PodSpec.
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""
Sets the volumes of this V1PodSpec.
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes
:param volumes: The volumes of this V1PodSpec.
:type: list[V1Volume]
"""
self._volumes = volumes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1PodSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
"""
Forward Simulation on a Tree Mesh
=================================
Here we use the module *SimPEG.electromagnetics.frequency_domain* to simulate the
FDEM response for an airborne survey using an OcTree mesh and a
conductivity/resistivity model.
To limit computational demant, we simulate airborne data at a single frequency
for a vertical coplanar survey geometry. This tutorial can be easily adapted to
simulate data at many frequencies. For this tutorial, we focus on the following:
- How to define the transmitters and receivers
- How to define the survey
- How to define the topography
- How to solve the FDEM problem on OcTree meshes
- The units of the conductivity/resistivity model and resulting data
Please note that we have used a coarse mesh to shorten the time of the simulation.
Proper discretization is required to simulate the fields at each frequency with
sufficient accuracy.
"""
#########################################################################
# Import modules
# --------------
#
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import plot2Ddata, surface2ind_topo
from SimPEG import maps
import SimPEG.electromagnetics.frequency_domain as fdem
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
save_file = False
# sphinx_gallery_thumbnail_number = 2
###############################################################
# Defining Topography
# -------------------
#
# Here we define surface topography as an (N, 3) numpy array. Topography could
# also be loaded from a file. Here we define flat topography, however more
# complex topographies can be considered.
#
xx, yy = np.meshgrid(np.linspace(-3000, 3000, 101), np.linspace(-3000, 3000, 101))
zz = np.zeros(np.shape(xx))
topo_xyz = np.c_[mkvc(xx), mkvc(yy), mkvc(zz)]
#####################################################################
# Create Airborne Survey
# ----------------------
#
# For this example, the survey consists of a uniform grid of airborne
# measurements. To save time, we will only compute the response for a single
# frequency.
#
# Frequencies being predicted
frequencies = [100, 500, 2500]
# Defining transmitter locations
N = 9
xtx, ytx, ztx = np.meshgrid(np.linspace(-200, 200, N), np.linspace(-200, 200, N), [40])
source_locations = np.c_[mkvc(xtx), mkvc(ytx), mkvc(ztx)]
ntx = np.size(xtx)
# Define receiver locations
xrx, yrx, zrx = np.meshgrid(np.linspace(-200, 200, N), np.linspace(-200, 200, N), [20])
receiver_locations = np.c_[mkvc(xrx), mkvc(yrx), mkvc(zrx)]
source_list = [] # Create empty list to store sources
# Each unique location and frequency defines a new transmitter
for ii in range(len(frequencies)):
for jj in range(ntx):
# Define receivers of different type at each location
bzr_receiver = fdem.receivers.PointMagneticFluxDensitySecondary(
receiver_locations[jj, :], "z", "real"
)
bzi_receiver = fdem.receivers.PointMagneticFluxDensitySecondary(
receiver_locations[jj, :], "z", "imag"
)
receivers_list = [bzr_receiver, bzi_receiver]
# Must define the transmitter properties and associated receivers
source_list.append(
fdem.sources.MagDipole(
receivers_list,
frequencies[ii],
source_locations[jj],
orientation="z",
moment=100,
)
)
survey = fdem.Survey(source_list)
###############################################################
# Create OcTree Mesh
# ------------------
#
# Here we define the OcTree mesh that is used for this example.
# We chose to design a coarser mesh to decrease the run time.
# When designing a mesh to solve practical frequency domain problems:
#
# - Your smallest cell size should be 10%-20% the size of your smallest skin depth
# - The thickness of your padding needs to be 2-3 times biggest than your largest skin depth
# - The skin depth is ~500*np.sqrt(rho/f)
#
#
dh = 25.0 # base cell width
dom_width = 3000.0 # domain width
nbc = 2 ** int(np.round(np.log(dom_width / dh) / np.log(2.0))) # num. base cells
# Define the base mesh
h = [(dh, nbc)]
mesh = TreeMesh([h, h, h], x0="CCC")
# Mesh refinement based on topography
mesh = refine_tree_xyz(
mesh, topo_xyz, octree_levels=[0, 0, 0, 1], method="surface", finalize=False
)
# Mesh refinement near transmitters and receivers
mesh = refine_tree_xyz(
mesh, receiver_locations, octree_levels=[2, 4], method="radial", finalize=False
)
# Refine core mesh region
xp, yp, zp = np.meshgrid([-250.0, 250.0], [-250.0, 250.0], [-300.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)]
mesh = refine_tree_xyz(mesh, xyz, octree_levels=[0, 2, 4], method="box", finalize=False)
mesh.finalize()
###############################################################
# Defining the Conductivity/Resistivity Model and Mapping
# -------------------------------------------------------
#
# Here, we create the model that will be used to predict frequency
# domain data and the mapping from the model to the mesh. Here,
# the model consists of a conductive block within a more resistive
# background.
#
# Conductivity in S/m (or resistivity in Ohm m)
air_conductivity = 1e-8
background_conductivity = 1e-2
block_conductivity = 1e1
# Find cells that are active in the forward modeling (cells below surface)
ind_active = surface2ind_topo(mesh, topo_xyz)
# Define mapping from model to active cells
model_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity)
# Define model. Models in SimPEG are vector arrays
model = background_conductivity * np.ones(ind_active.sum())
ind_block = (
(mesh.gridCC[ind_active, 0] < 100.0)
& (mesh.gridCC[ind_active, 0] > -100.0)
& (mesh.gridCC[ind_active, 1] < 100.0)
& (mesh.gridCC[ind_active, 1] > -100.0)
& (mesh.gridCC[ind_active, 2] > -275.0)
& (mesh.gridCC[ind_active, 2] < -75.0)
)
model[ind_block] = block_conductivity
# Plot Resistivity Model
mpl.rcParams.update({"font.size": 12})
fig = plt.figure(figsize=(7, 6))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
log_model = np.log10(model)
ax1 = fig.add_axes([0.13, 0.1, 0.6, 0.85])
mesh.plotSlice(
plotting_map * log_model,
normal="Y",
ax=ax1,
ind=int(mesh.hx.size / 2),
grid=True,
clim=(np.log10(background_conductivity), np.log10(block_conductivity)),
)
ax1.set_title("Conductivity Model at Y = 0 m")
ax2 = fig.add_axes([0.75, 0.1, 0.05, 0.85])
norm = mpl.colors.Normalize(
vmin=np.log10(background_conductivity), vmax=np.log10(block_conductivity)
)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", format="$10^{%.1f}$"
)
cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12)
######################################################
# Simulation: Predicting FDEM Data
# --------------------------------
#
# Here we define the formulation for solving Maxwell's equations. Since we are
# measuring the magnetic flux density and working with a conductivity model,
# the EB formulation is the most natural. We must also remember to define
# the mapping for the conductivity model. If you defined a resistivity model,
# use the kwarg *rhoMap* instead of *sigmaMap*
#
simulation = fdem.simulation.Simulation3DMagneticFluxDensity(
mesh, survey=survey, sigmaMap=model_map, solver=Solver
)
######################################################
# Predict and Plot Data
# ---------------------
#
# Here we show how the simulation is used to predict data.
#
# Compute predicted data for a your model.
dpred = simulation.dpred(model)
# Data are organized by frequency, transmitter location, then by receiver. We nFreq transmitters
# and each transmitter had 2 receivers (real and imaginary component). So
# first we will pick out the real and imaginary data
bz_real = dpred[0 : len(dpred) : 2]
bz_imag = dpred[1 : len(dpred) : 2]
# Then we will will reshape the data for plotting.
bz_real_plotting = np.reshape(bz_real, (len(frequencies), ntx))
bz_imag_plotting = np.reshape(bz_imag, (len(frequencies), ntx))
fig = plt.figure(figsize=(10, 4))
# Real Component
frequencies_index = 0
v_max = np.max(np.abs(bz_real_plotting[frequencies_index, :]))
ax1 = fig.add_axes([0.05, 0.05, 0.35, 0.9])
plot2Ddata(
receiver_locations[:, 0:2],
bz_real_plotting[frequencies_index, :],
ax=ax1,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
ax1.set_title("Re[$B_z$] at 100 Hz")
ax2 = fig.add_axes([0.41, 0.05, 0.02, 0.9])
norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr
)
cbar.set_label("$T$", rotation=270, labelpad=15, size=12)
# Imaginary Component
v_max = np.max(np.abs(bz_imag_plotting[frequencies_index, :]))
ax1 = fig.add_axes([0.55, 0.05, 0.35, 0.9])
plot2Ddata(
receiver_locations[:, 0:2],
bz_imag_plotting[frequencies_index, :],
ax=ax1,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
ax1.set_title("Im[$B_z$] at 100 Hz")
ax2 = fig.add_axes([0.91, 0.05, 0.02, 0.9])
norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr
)
cbar.set_label("$T$", rotation=270, labelpad=15, size=12)
plt.show()
#######################################################
# Optional: Export Data
# ---------------------
#
# Write the true model, data and topography
#
if save_file:
dir_path = os.path.dirname(fdem.__file__).split(os.path.sep)[:-3]
dir_path.extend(["tutorials", "assets", "fdem"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
# Write topography
fname = dir_path + "fdem_topo.txt"
np.savetxt(fname, np.c_[topo_xyz], fmt="%.4e")
# Write data with 2% noise added
fname = dir_path + "fdem_data.obs"
bz_real = bz_real + 1e-14 * np.random.rand(len(bz_real))
bz_imag = bz_imag + 1e-14 * np.random.rand(len(bz_imag))
f_vec = np.kron(frequencies, np.ones(ntx))
receiver_locations = np.kron(np.ones((len(frequencies), 1)), receiver_locations)
np.savetxt(fname, np.c_[f_vec, receiver_locations, bz_real, bz_imag], fmt="%.4e")
# Plot true model
output_model = plotting_map * model
output_model[np.isnan(output_model)] = 1e-8
fname = dir_path + "true_model.txt"
np.savetxt(fname, output_model, fmt="%.4e")
|
|
"""
Platform for Ecobee Thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.ecobee/
"""
import logging
from os import path
import voluptuous as vol
from homeassistant.components import ecobee
from homeassistant.components.climate import (
DOMAIN, STATE_COOL, STATE_HEAT, STATE_AUTO, STATE_IDLE, ClimateDevice,
ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_AWAY_MODE, SUPPORT_HOLD_MODE, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_HUMIDITY_LOW, SUPPORT_TARGET_HUMIDITY_HIGH)
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_OFF, STATE_ON, ATTR_TEMPERATURE, TEMP_FAHRENHEIT)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_MIN_ON_TIME = 'fan_min_on_time'
ATTR_RESUME_ALL = 'resume_all'
DEFAULT_RESUME_ALL = False
TEMPERATURE_HOLD = 'temp'
VACATION_HOLD = 'vacation'
AWAY_MODE = 'awayMode'
DEPENDENCIES = ['ecobee']
SERVICE_SET_FAN_MIN_ON_TIME = 'ecobee_set_fan_min_on_time'
SERVICE_RESUME_PROGRAM = 'ecobee_resume_program'
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
})
RESUME_PROGRAM_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_AWAY_MODE |
SUPPORT_HOLD_MODE | SUPPORT_OPERATION_MODE |
SUPPORT_TARGET_HUMIDITY_LOW | SUPPORT_TARGET_HUMIDITY_HIGH)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Ecobee Thermostat Platform."""
if discovery_info is None:
return
data = ecobee.NETWORK
hold_temp = discovery_info['hold_temp']
_LOGGER.info(
"Loading ecobee thermostat component with hold_temp set to %s",
hold_temp)
devices = [Thermostat(data, index, hold_temp)
for index in range(len(data.ecobee.thermostats))]
add_devices(devices)
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [device for device in devices
if device.entity_id in entity_id]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [device for device in devices
if device.entity_id in entity_id]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(
DOMAIN, SERVICE_SET_FAN_MIN_ON_TIME, fan_min_on_time_set_service,
descriptions.get(SERVICE_SET_FAN_MIN_ON_TIME),
schema=SET_FAN_MIN_ON_TIME_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_RESUME_PROGRAM, resume_program_set_service,
descriptions.get(SERVICE_RESUME_PROGRAM),
schema=RESUME_PROGRAM_SCHEMA)
class Thermostat(ClimateDevice):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index, hold_temp):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = self.data.ecobee.get_thermostat(
self.thermostat_index)
self._name = self.thermostat['name']
self.hold_temp = hold_temp
self.vacation = None
self._climate_list = self.climate_list
self._operation_list = ['auto', 'auxHeatOnly', 'cool',
'heat', 'off']
self.update_without_throttle = False
def update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(
self.thermostat_index)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat['name']
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat['runtime']['actualTemperature'] / 10.0
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.thermostat['runtime']['desiredHeat'] / 10.0
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.thermostat['runtime']['desiredCool'] / 10.0
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return None
if self.current_operation == STATE_HEAT:
return self.thermostat['runtime']['desiredHeat'] / 10.0
elif self.current_operation == STATE_COOL:
return self.thermostat['runtime']['desiredCool'] / 10.0
return None
@property
def desired_fan_mode(self):
"""Return the desired fan mode of operation."""
return self.thermostat['runtime']['desiredFanMode']
@property
def fan(self):
"""Return the current fan state."""
if 'fan' in self.thermostat['equipmentStatus']:
return STATE_ON
return STATE_OFF
@property
def current_hold_mode(self):
"""Return current hold mode."""
mode = self._current_hold_mode
return None if mode == AWAY_MODE else mode
@property
def _current_hold_mode(self):
events = self.thermostat['events']
for event in events:
if event['running']:
if event['type'] == 'hold':
if event['holdClimateRef'] == 'away':
if int(event['endDate'][0:4]) - \
int(event['startDate'][0:4]) <= 1:
# A temporary hold from away climate is a hold
return 'away'
# A permanent hold from away climate
return AWAY_MODE
elif event['holdClimateRef'] != "":
# Any other hold based on climate
return event['holdClimateRef']
# Any hold not based on a climate is a temp hold
return TEMPERATURE_HOLD
elif event['type'].startswith('auto'):
# All auto modes are treated as holds
return event['type'][4:].lower()
elif event['type'] == 'vacation':
self.vacation = event['name']
return VACATION_HOLD
return None
@property
def current_operation(self):
"""Return current operation."""
if self.operation_mode == 'auxHeatOnly' or \
self.operation_mode == 'heatPump':
return STATE_HEAT
return self.operation_mode
@property
def operation_list(self):
"""Return the operation modes list."""
return self._operation_list
@property
def operation_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self.thermostat['settings']['hvacMode']
@property
def mode(self):
"""Return current mode, as the user-visible name."""
cur = self.thermostat['program']['currentClimateRef']
climates = self.thermostat['program']['climates']
current = list(filter(lambda x: x['climateRef'] == cur, climates))
return current[0]['name']
@property
def fan_min_on_time(self):
"""Return current fan minimum on time."""
return self.thermostat['settings']['fanMinOnTime']
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
# Move these to Thermostat Device and make them global
status = self.thermostat['equipmentStatus']
operation = None
if status == '':
operation = STATE_IDLE
elif 'Cool' in status:
operation = STATE_COOL
elif 'auxHeat' in status:
operation = STATE_HEAT
elif 'heatPump' in status:
operation = STATE_HEAT
else:
operation = status
return {
"actual_humidity": self.thermostat['runtime']['actualHumidity'],
"fan": self.fan,
"mode": self.mode,
"operation": operation,
"climate_list": self.climate_list,
"fan_min_on_time": self.fan_min_on_time
}
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._current_hold_mode == AWAY_MODE
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
return 'auxHeat' in self.thermostat['equipmentStatus']
def turn_away_mode_on(self):
"""Turn away mode on by setting it on away hold indefinitely."""
if self._current_hold_mode != AWAY_MODE:
self.data.ecobee.set_climate_hold(self.thermostat_index, 'away',
'indefinite')
self.update_without_throttle = True
def turn_away_mode_off(self):
"""Turn away off."""
if self._current_hold_mode == AWAY_MODE:
self.data.ecobee.resume_program(self.thermostat_index)
self.update_without_throttle = True
def set_hold_mode(self, hold_mode):
"""Set hold mode (away, home, temp, sleep, etc.)."""
hold = self.current_hold_mode
if hold == hold_mode:
# no change, so no action required
return
elif hold_mode == 'None' or hold_mode is None:
if hold == VACATION_HOLD:
self.data.ecobee.delete_vacation(
self.thermostat_index, self.vacation)
else:
self.data.ecobee.resume_program(self.thermostat_index)
else:
if hold_mode == TEMPERATURE_HOLD:
self.set_temp_hold(self.current_temperature)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index, hold_mode, self.hold_preference())
self.update_without_throttle = True
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = (
self.thermostat['runtime']['desiredCool'] / 10.0)
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = (
self.thermostat['runtime']['desiredCool'] / 10.0)
self.data.ecobee.set_hold_temp(self.thermostat_index,
cool_temp_setpoint, heat_temp_setpoint,
self.hold_preference())
_LOGGER.debug("Setting ecobee hold_temp to: heat=%s, is=%s, "
"cool=%s, is=%s", heat_temp, isinstance(
heat_temp, (int, float)), cool_temp,
isinstance(cool_temp, (int, float)))
self.update_without_throttle = True
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto."""
# Set arbitrary range when not in auto mode
if self.current_operation == STATE_HEAT:
heat_temp = temp
cool_temp = temp + 20
elif self.current_operation == STATE_COOL:
heat_temp = temp - 20
cool_temp = temp
else:
# In auto mode set temperature between
heat_temp = temp - 10
cool_temp = temp + 10
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.current_operation == STATE_AUTO and (low_temp is not None or
high_temp is not None):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error(
"Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
self.data.ecobee.set_humidity(self.thermostat_index, humidity)
def set_operation_mode(self, operation_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
self.data.ecobee.set_hvac_mode(self.thermostat_index, operation_mode)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(
self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, 'true' if resume_all else 'false')
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are 'useEndTime4hour',
# 'useEndTime2hour', 'nextTransition', 'indefinite', 'askMe'
default = self.thermostat['settings']['holdAction']
if default == 'nextTransition':
return default
# add further conditions if other hold durations should be
# supported; note that this should not include 'indefinite'
# as an indefinite away hold is interpreted as away_mode
return 'nextTransition'
@property
def climate_list(self):
"""Return the list of climates currently available."""
climates = self.thermostat['program']['climates']
return list(map((lambda x: x['name']), climates))
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Integration tests for nanoemoji
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otTables as ot
from lxml import etree # pytype: disable=import-error
from nanoemoji import config
import operator
import os
from pathlib import Path
from picosvg.svg import SVG
from picosvg.svg_transform import Affine2D
import pytest
import shutil
import subprocess
import tempfile
from test_helper import (
assert_expected_ttx,
color_font_config,
locate_test_file,
mkdtemp,
cleanup_temp_dirs,
run_nanoemoji,
run_nanoemoji_memoized,
)
@pytest.fixture(scope="module", autouse=True)
def _cleanup_temporary_dirs():
# The mkdtemp() docs say the user is responsible for deleting the directory
# and its contents when done with it. So we use an autouse fixture that
# automatically removes all the temp dirs at the end of the test module
yield
# teardown happens after the 'yield'
cleanup_temp_dirs()
def _svg_element_names(xpath, svg_content):
return tuple(
etree.QName(e).localname
for e in SVG.fromstring(svg_content).xpath(xpath.replace("/", "/svg:"))
)
def _svg_element_attributes(xpath, svg_content):
return SVG.fromstring(svg_content).xpath_one(xpath.replace("/", "/svg:")).attrib
def test_build_static_font_default_config_cli_svg_list():
tmp_dir = run_nanoemoji((locate_test_file("minimal_static/svg/61.svg"),))
font = TTFont(tmp_dir / "Font.ttf")
assert "fvar" not in font
def _build_and_check_ttx(config_overrides, svgs, expected_ttx):
config_file = mkdtemp() / "config.toml"
font_config, _ = color_font_config(
config_overrides, svgs, tmp_dir=config_file.parent
)
config.write(config_file, font_config)
print(config_file, font_config)
run_nanoemoji((str(config_file),), tmp_dir=config_file.parent)
font = TTFont(config_file.parent / "Font.ttf")
assert_expected_ttx(svgs, font, expected_ttx)
# Drop content outside viewbox
# https://github.com/googlefonts/nanoemoji/issues/200
def test_build_static_font_clipped():
_build_and_check_ttx({}, ("emoji_u25fd.svg",), "outside_viewbox_clipped_colr_1.ttx")
# Retain content outside viewbox
# https://github.com/googlefonts/nanoemoji/issues/200
def test_build_static_font_unclipped():
_build_and_check_ttx(
{"clip_to_viewbox": False},
("emoji_u25fd.svg",),
"outside_viewbox_not_clipped_colr_1.ttx",
)
def test_build_variable_font():
tmp_dir = run_nanoemoji((locate_test_file("minimal_vf/config.toml"),))
font = TTFont(tmp_dir / "MinimalVF.ttf")
assert "fvar" in font
def test_build_picosvg_font():
tmp_dir = run_nanoemoji((locate_test_file("minimal_static/config_picosvg.toml"),))
font = TTFont(tmp_dir / "Font.ttf")
# fill=none ellipse dropped, rect became path, everything is under a group
svg_content = font["SVG "].docList[0][0]
assert _svg_element_names("/svg/g/*", svg_content) == ("path", "path"), svg_content
def test_build_untouchedsvg_font():
tmp_dir = run_nanoemoji(
(locate_test_file("minimal_static/config_untouchedsvg.toml"),)
)
font = TTFont(tmp_dir / "Font.ttf")
assert "SVG " in font
font = TTFont(tmp_dir / "Font.ttf")
svg_content = font["SVG "].docList[0][0]
# one group introduced
assert _svg_element_names("/svg/*", svg_content) == ("g",), svg_content
# rect stayed rect, fill non ellipse still around
assert _svg_element_names("/svg/g/*", svg_content) == (
"path",
"rect",
"ellipse",
), svg_content
# transform OT-SVG=>UPEM is not identity
g_attrs = _svg_element_attributes("/svg/g", svg_content)
assert "transform" in g_attrs
transform = Affine2D.fromstring(g_attrs["transform"])
assert transform != Affine2D.identity(), transform
def test_build_glyf_colr_1_and_picosvg_font():
tmp_dir = run_nanoemoji(
(locate_test_file("minimal_static/config_glyf_colr_1_and_picosvg.toml"),)
)
font = TTFont(tmp_dir / "Font.ttf")
assert "COLR" in font
assert "SVG " in font
def _assert_table_size_cmp(table_tag, op, original_font, original_cmd, **options):
cmd = original_cmd + tuple(f"--{'' if v else 'no'}{k}" for k, v in options.items())
tmp_dir = run_nanoemoji(cmd)
font = TTFont(next(tmp_dir.glob("*.ttf")))
new_size = len(font.getTableData(table_tag))
original_size = len(original_font.getTableData(table_tag))
assert op(new_size, original_size)
@pytest.mark.parametrize("use_zopflipng", [True, False])
@pytest.mark.parametrize("use_pngquant", [True, False])
def test_build_sbix_font(use_pngquant, use_zopflipng):
cmd = (locate_test_file("minimal_static/config_sbix.toml"),)
tmp_dir = run_nanoemoji_memoized(cmd)
font = TTFont(tmp_dir / "Font.ttf")
assert "sbix" in font
# check building the same font without zopflipng/pngquant produces a larger table
if not use_zopflipng or not use_pngquant:
_assert_table_size_cmp(
"sbix",
operator.gt,
font,
cmd,
use_pngquant=use_pngquant,
use_zopflipng=use_zopflipng,
)
@pytest.mark.parametrize("use_zopflipng", [True, False])
@pytest.mark.parametrize("use_pngquant", [True, False])
def test_build_cbdt_font(use_pngquant, use_zopflipng):
cmd = (locate_test_file("minimal_static/config_cbdt.toml"),)
tmp_dir = run_nanoemoji_memoized(cmd)
font = TTFont(tmp_dir / "Font.ttf")
assert "CBDT" in font
assert "CBLC" in font
# check building the same font without zopflipng/pngquant produces a larger table
if not use_zopflipng or not use_pngquant:
_assert_table_size_cmp(
"CBDT",
operator.gt,
font,
cmd,
use_pngquant=use_pngquant,
use_zopflipng=use_zopflipng,
)
@pytest.mark.parametrize(
"config_file",
[
"minimal_static/config_glyf_colr_1_and_picosvg_and_cbdt.toml",
# https://github.com/googlefonts/nanoemoji/issues/385
"compat_font/config.toml",
],
)
@pytest.mark.parametrize("use_zopflipng", [True, False])
@pytest.mark.parametrize("use_pngquant", [True, False])
def test_build_compat_font(config_file, use_pngquant, use_zopflipng):
cmd = (locate_test_file(config_file),)
tmp_dir = run_nanoemoji_memoized(cmd)
font = TTFont(tmp_dir / "Font.ttf")
assert "COLR" in font
assert "SVG " in font
assert "CBDT" in font
assert "CBLC" in font
# check building the same font without zopflipng/pngquant produces larger bitmaps
if not use_zopflipng or not use_pngquant:
_assert_table_size_cmp(
"CBDT",
operator.gt,
font,
cmd,
use_pngquant=use_pngquant,
use_zopflipng=use_zopflipng,
)
def test_the_curious_case_of_the_parentless_reused_el():
# https://github.com/googlefonts/nanoemoji/issues/346
svgs = [
f"parentless_reused_el/emoji_u{codepoints}.svg"
for codepoints in ("0023_20e3", "1f170", "1f171")
]
tmp_dir = run_nanoemoji(
(
"--color_format=picosvg",
"--pretty_print",
"--keep_glyph_names",
*(locate_test_file(svg) for svg in svgs),
)
)
font = TTFont(tmp_dir / "Font.ttf")
assert_expected_ttx(
svgs, font, "parentless_reused_el.ttx", include_tables=["GlyphOrder", "SVG "]
)
def test_glyphmap_games():
# https://github.com/googlefonts/nanoemoji/issues/354
# We want to see both glyphs but only one cmap'd, and the use of our special naming scheme
svgs = [
"emoji_u25fd.svg",
"emoji_u42.svg",
]
tmp_dir = run_nanoemoji(
(
"--color_format=glyf_colr_1",
"--keep_glyph_names",
"--glyphmap_generator=write_test_glyphmap",
*(locate_test_file(svg) for svg in svgs),
)
)
font = TTFont(tmp_dir / "Font.ttf")
# We don't really need glyf but ... perhaps it's informative
assert_expected_ttx(
svgs, font, "glyphmap_games.ttx", include_tables=["GlyphOrder", "cmap"]
)
def test_omit_empty_color_glyphs():
svgs = [
"emoji_u200c.svg", # whitespace glyph, contains no paths
"emoji_u42.svg",
]
tmp_dir = run_nanoemoji(
(
"--color_format=glyf_colr_1_and_picosvg",
"--pretty_print",
"--keep_glyph_names",
*(locate_test_file(svg) for svg in svgs),
)
)
font = TTFont(tmp_dir / "Font.ttf")
colr = font["COLR"].table
assert len(colr.BaseGlyphList.BaseGlyphPaintRecord) == 1
svg = font["SVG "]
assert len(svg.docList) == 1
assert_expected_ttx(
svgs,
font,
"omit_empty_color_glyphs.ttx",
include_tables=["GlyphOrder", "cmap", "glyf", "COLR", "SVG "],
)
# https://github.com/googlefonts/nanoemoji/issues/367
def test_path_to_src_matters():
def _glyph(font):
assert font["COLR"].version == 1
colr_table = font["COLR"].table
assert colr_table.BaseGlyphList.BaseGlyphCount == 1
paint = colr_table.BaseGlyphList.BaseGlyphPaintRecord[0].Paint
assert paint.Format == ot.PaintFormat.PaintGlyph
return font["glyf"][paint.Glyph]
tomls = [
"multi_toml/a.toml",
"multi_toml/b.toml",
]
tmp_dir = run_nanoemoji(tuple(locate_test_file(toml) for toml in tomls))
font_a = TTFont(tmp_dir / "A.ttf")
font_b = TTFont(tmp_dir / "B.ttf")
# Each font should define a single PaintGlyph and the glyph it uses shouldn't be identical
assert _glyph(font_a) != _glyph(font_b)
def test_input_symlinks_support(tmp_path):
# Symbolic links are not resolved but treated as distinct input files.
shutil.copyfile(locate_test_file("emoji_u42.svg"), tmp_path / "emoji_u42.svg")
# $ ln -s emoji_u43.svg emoji_u42.svg
(tmp_path / "emoji_u43.svg").symlink_to(tmp_path / "emoji_u42.svg")
# $ ln -s emoji_u66_69.svg emoji_u42.svg
(tmp_path / "emoji_u66_69.svg").symlink_to(tmp_path / "emoji_u42.svg")
run_nanoemoji(
(
tmp_path / "emoji_u42.svg", # glyph 'B'
tmp_path / "emoji_u43.svg", # glyph 'C'
tmp_path / "emoji_u66_69.svg", # ligature 'f_i'
"--keep_glyph_names",
),
tmp_dir=tmp_path,
)
font = TTFont(tmp_path / "Font.ttf")
colr_table = font["COLR"].table
# check we get three identical color glyphs with the same Paint
assert colr_table.BaseGlyphList.BaseGlyphCount == 3
assert colr_table.BaseGlyphList.BaseGlyphPaintRecord[0].BaseGlyph == "B"
assert colr_table.BaseGlyphList.BaseGlyphPaintRecord[1].BaseGlyph == "C"
assert colr_table.BaseGlyphList.BaseGlyphPaintRecord[2].BaseGlyph == "f_i"
assert (
colr_table.BaseGlyphList.BaseGlyphPaintRecord[0].Paint
== (colr_table.BaseGlyphList.BaseGlyphPaintRecord[1].Paint)
== colr_table.BaseGlyphList.BaseGlyphPaintRecord[2].Paint
)
# check that the symlinked ligature was built as usual
ligatures = font["GSUB"].table.LookupList.Lookup[0].SubTable[0].ligatures
assert "f" in ligatures
assert len(ligatures["f"]) == 1
assert ligatures["f"][0].Component == ["i"]
assert ligatures["f"][0].LigGlyph == "f_i"
|
|
# encoding: utf-8
"""
Read graphs in GML format.
"GML, the G>raph Modelling Language, is our proposal for a portable
file format for graphs. GML's key features are portability, simple
syntax, extensibility and flexibility. A GML file consists of a
hierarchical key-value lists. Graphs can be annotated with arbitrary
data structures. The idea for a common file format was born at the
GD'95; this proposal is the outcome of many discussions. GML is the
standard file format in the Graphlet graph editor system. It has been
overtaken and adapted by several other systems for drawing graphs."
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Format
------
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
for format specification.
Example graphs in GML format:
http://www-personal.umich.edu/~mejn/netdata/
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['read_gml', 'parse_gml', 'generate_gml', 'write_gml']
try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ast import literal_eval
from collections import defaultdict
from lib2to3.pgen2.parse import ParseError
from lib2to3.pgen2.tokenize import TokenError
from lib2to3.refactor import RefactoringTool
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
import re
try:
import htmlentitydefs
except ImportError:
# Python 3.x
import html.entities as htmlentitydefs
try:
long
except NameError:
long = int
try:
unicode
except NameError:
unicode = str
try:
unichr
except NameError:
unichr = chr
try:
literal_eval(r"u'\u4444'")
except SyntaxError:
# Remove 'u' prefixes in unicode literals in Python 3
rtp_fix_unicode = RefactoringTool(['lib2to3.fixes.fix_unicode'],
{'print_function': True})
else:
rtp_fix_unicode = None
def escape(text):
"""Escape unprintable or non-ASCII characters, double quotes and ampersands
in a string using XML character references.
"""
def fixup(m):
ch = m.group(0)
return '&#' + str(ord(ch)) + ';'
text = re.sub('[^ -~]|[&"]', fixup, text)
return text if isinstance(text, str) else str(text)
def unescape(text):
"""Replace XML character references in a string with the referenced
characters.
"""
def fixup(m):
text = m.group(0)
if text[1] == '#':
# Character reference
if text[2] == 'x':
code = int(text[3:-1], 16)
else:
code = int(text[2:-1])
else:
# Named entity
try:
code = htmlentitydefs.name2codepoint[text[1:-1]]
except KeyError:
return text # leave unchanged
try:
return chr(code) if code < 256 else unichr(code)
except (ValueError, OverflowError):
return text # leave unchanged
return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text)
def literal_destringizer(rep):
"""Convert a Python literal to the value it represents.
Parameters
----------
rep : string
A Python literal.
Returns
-------
value : object
The value of the Python literal.
Raises
------
ValueError
If ``rep`` is not a Python literal.
"""
if isinstance(rep, (str, unicode)):
orig_rep = rep
try:
# Python 3.2 does not recognize 'u' prefixes before string literals
if rtp_fix_unicode:
rep = str(rtp_fix_unicode.refactor_string(
rep + '\n', '<string>'))[:-1]
return literal_eval(rep)
except (ParseError, SyntaxError, TokenError):
raise ValueError('%r is not a valid Python literal' % (orig_rep,))
else:
raise ValueError('%r is not a string' % (rep,))
@open_file(0, mode='rb')
def read_gml(path, label='label', destringizer=None):
"""Read graph in GML format from path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to read from.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by ``label``. Default value: ``'label'``.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
cannot convert a string to a value, a ``ValueError`` is raised. Default
value : ``None``.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, parse_gml
Notes
-----
The GML specification says that files should be ASCII encoded, with any
extended ASCII characters (iso8859-1) appearing as HTML character entities.
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, 'test.gml')
>>> H = nx.read_gml('test.gml')
"""
def filter_lines(lines):
for line in lines:
try:
line = line.decode('ascii')
except UnicodeDecodeError:
raise NetworkXError('input is not ASCII-encoded')
if not isinstance(line, str):
lines = str(lines)
if line and line[-1] == '\n':
line = line[:-1]
yield line
G = parse_gml_lines(filter_lines(path), label, destringizer)
return G
def parse_gml(lines, label='label', destringizer=None):
"""Parse GML graph from a string or iterable.
Parameters
----------
lines : string or iterable of strings
Data in GML format.
label : string, optional
If not None, the parsed nodes will be renamed according to node
attributes indicated by ``label``. Default value: ``'label'``.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
cannot convert a string to a value, a ``ValueError`` is raised. Default
value : ``None``.
Returns
-------
G : NetworkX graph
The parsed graph.
Raises
------
NetworkXError
If the input cannot be parsed.
See Also
--------
write_gml, read_gml
Notes
-----
This stores nested GML attributes as dictionaries in the
NetworkX graph, node, and edge attribute structures.
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
"""
def decode_line(line):
if isinstance(line, bytes):
try:
line.decode('ascii')
except UnicodeDecodeError:
raise NetworkXError('input is not ASCII-encoded')
if not isinstance(line, str):
line = str(line)
return line
def filter_lines(lines):
if isinstance(lines, (str, unicode)):
lines = decode_line(lines)
lines = lines.splitlines()
for line in lines:
yield line
else:
for line in lines:
line = decode_line(line)
if line and line[-1] == '\n':
line = line[:-1]
if line.find('\n') != -1:
raise NetworkXError('input line contains newline')
yield line
G = parse_gml_lines(filter_lines(lines), label, destringizer)
return G
def parse_gml_lines(lines, label, destringizer):
"""Parse GML into a graph.
"""
def tokenize():
patterns = [
r'[A-Za-z][0-9A-Za-z]*\s+', # keys
r'[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?', # reals
r'[+-]?[0-9]+', # ints
r'".*?"', # strings
r'\[', # dict start
r'\]', # dict end
r'#.*$|\s+' # comments and whitespaces
]
tokens = re.compile(
'|'.join('(' + pattern + ')' for pattern in patterns))
lineno = 0
for line in lines:
length = len(line)
pos = 0
while pos < length:
match = tokens.match(line, pos)
if match is not None:
for i in range(len(patterns)):
group = match.group(i + 1)
if group is not None:
if i == 0: # keys
value = group.rstrip()
elif i == 1: # reals
value = float(group)
elif i == 2: # ints
value = int(group)
else:
value = group
if i != 6: # comments and whitespaces
yield (i, value, lineno + 1, pos + 1)
pos += len(group)
break
else:
raise NetworkXError('cannot tokenize %r at (%d, %d)' %
(line[pos:], lineno + 1, pos + 1))
lineno += 1
yield (None, None, lineno + 1, 1) # EOF
def unexpected(curr_token, expected):
type, value, lineno, pos = curr_token
raise NetworkXError(
'expected %s, found %s at (%d, %d)' %
(expected, repr(value) if value is not None else 'EOF', lineno,
pos))
def consume(curr_token, type, expected):
if curr_token[0] == type:
return next(tokens)
unexpected(curr_token, expected)
def parse_kv(curr_token):
dct = defaultdict(list)
while curr_token[0] == 0: # keys
key = curr_token[1]
curr_token = next(tokens)
type = curr_token[0]
if type == 1 or type == 2: # reals or ints
value = curr_token[1]
curr_token = next(tokens)
elif type == 3: # strings
value = unescape(curr_token[1][1:-1])
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
elif type == 4: # dict start
curr_token, value = parse_dict(curr_token)
else:
unexpected(curr_token, "an int, float, string or '['")
dct[key].append(value)
dct = {key: (value if not isinstance(value, list) or len(value) != 1
else value[0]) for key, value in dct.items()}
return curr_token, dct
def parse_dict(curr_token):
curr_token = consume(curr_token, 4, "'['") # dict start
curr_token, dct = parse_kv(curr_token)
curr_token = consume(curr_token, 5, "']'") # dict end
return curr_token, dct
def parse_graph():
curr_token, dct = parse_kv(next(tokens))
if curr_token[0] is not None: # EOF
unexpected(curr_token, 'EOF')
if 'graph' not in dct:
raise NetworkXError('input contains no graph')
graph = dct['graph']
if isinstance(graph, list):
raise NetworkXError('input contains more than one graph')
return graph
tokens = tokenize()
graph = parse_graph()
directed = graph.pop('directed', False)
multigraph = graph.pop('multigraph', False)
if not multigraph:
G = nx.DiGraph() if directed else nx.Graph()
else:
G = nx.MultiDiGraph() if directed else nx.MultiGraph()
G.graph.update((key, value) for key, value in graph.items()
if key != 'node' and key != 'edge')
def pop_attr(dct, type, attr, i):
try:
return dct.pop(attr)
except KeyError:
raise NetworkXError(
"%s #%d has no '%s' attribute" % (type, i, attr))
nodes = graph.get('node', [])
mapping = {}
labels = set()
for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]):
id = pop_attr(node, 'node', 'id', i)
if id in G:
raise NetworkXError('node id %r is duplicated' % (id,))
if label != 'id':
label = pop_attr(node, 'node', 'label', i)
if label in labels:
raise NetworkXError('node label %r is duplicated' % (label,))
labels.add(label)
mapping[id] = label
G.add_node(id, node)
edges = graph.get('edge', [])
for i, edge in enumerate(edges if isinstance(edges, list) else [edges]):
source = pop_attr(edge, 'edge', 'source', i)
target = pop_attr(edge, 'edge', 'target', i)
if source not in G:
raise NetworkXError(
'edge #%d has an undefined source %r' % (i, source))
if target not in G:
raise NetworkXError(
'edge #%d has an undefined target %r' % (i, target))
if not multigraph:
if not G.has_edge(source, target):
G.add_edge(source, target, edge)
else:
raise nx.NetworkXError(
'edge #%d (%r%s%r) is duplicated' %
(i, source, '->' if directed else '--', target))
else:
key = edge.pop('key', None)
if key is not None and G.has_edge(source, target, key):
raise nx.NetworkXError(
'edge #%d (%r%s%r, %r) is duplicated' %
(i, source, '->' if directed else '--', target, key))
G.add_edge(source, target, key, edge)
if label != 'id':
G = nx.relabel_nodes(G, mapping)
if 'name' in graph:
G.graph['name'] = graph['name']
else:
del G.graph['name']
return G
def literal_stringizer(value):
"""Convert a value to a Python literal in GML representation.
Parameters
----------
value : object
The value to be converted to GML representation.
Returns
-------
rep : string
A double-quoted Python literal representing value. Unprintable
characters are replaced by XML character references.
Raises
------
ValueError
If ``value`` cannot be converted to GML.
Notes
-----
``literal_stringizer`` is largely the same as ``repr`` in terms of
functionality but attempts prefix ``unicode`` and ``bytes`` literals with
``u`` and ``b`` to provide better interoperability of data generated by
Python 2 and Python 3.
The original value can be recovered using the
``networkx.readwrite.gml.literal_destringizer`` function.
"""
def stringize(value):
if isinstance(value, (int, long, bool)) or value is None:
buf.write(str(value))
elif isinstance(value, unicode):
text = repr(value)
if text[0] != 'u':
try:
value.encode('latin1')
except UnicodeEncodeError:
text = 'u' + text
buf.write(text)
elif isinstance(value, (float, complex, str, bytes)):
buf.write(repr(value))
elif isinstance(value, list):
buf.write('[')
first = True
for item in value:
if not first:
buf.write(',')
else:
first = False
stringize(item)
buf.write(']')
elif isinstance(value, tuple):
if len(value) > 1:
buf.write('(')
first = True
for item in value:
if not first:
buf.write(',')
else:
first = False
stringize(item)
buf.write(')')
elif value:
buf.write('(')
stringize(value[0])
buf.write(',)')
else:
buf.write('()')
elif isinstance(value, dict):
buf.write('{')
first = True
for key, value in value.items():
if not first:
buf.write(',')
else:
first = False
stringize(key)
buf.write(':')
stringize(value)
buf.write('}')
elif isinstance(value, set):
buf.write('{')
first = True
for item in value:
if not first:
buf.write(',')
else:
first = False
stringize(item)
buf.write('}')
else:
raise ValueError(
'%r cannot be converted into a Python literal' % (value,))
buf = StringIO()
stringize(value)
return buf.getvalue()
def generate_gml(G, stringizer=None):
"""Generate a single entry of the graph G in GML format.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
stringizer : callable, optional
A stringizer which converts non-int/float/dict values into strings. If
it cannot convert a value into a string, it should raise a
``ValueError`` raised to indicate that. Default value: ``None``.
Returns
-------
lines: generator of strings
Lines of GML data. Newlines are not appended.
Raises
------
NetworkXError
If ``stringizer`` cannot convert a value into a string, or the value to
convert is not a string while ``stringizer`` is ``None``.
Notes
-----
Graph attributes named ``'directed'``, ``'multigraph'``, ``'node'`` or
``'edge'``,node attributes named ``'id'`` or ``'label'``, edge attributes
named ``'source'`` or ``'target'`` (or ``'key'`` if ``G`` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
"""
valid_keys = re.compile('^[A-Za-z][0-9A-Za-z]*$')
def stringize(key, value, ignored_keys, indent, in_list=False):
if not isinstance(key, (str, unicode)):
raise NetworkXError('%r is not a string' % (key,))
if not valid_keys.match(key):
raise NetworkXError('%r is not a valid key' % (key,))
if not isinstance(key, str):
key = str(key)
if key not in ignored_keys:
if isinstance(value, (int, long)):
yield indent + key + ' ' + str(value)
elif isinstance(value, float):
text = repr(value).upper()
# GML requires that a real literal contain a decimal point, but
# repr may not output a decimal point when the mantissa is
# integral and hence needs fixing.
epos = text.rfind('E')
if epos != -1 and text.find('.', 0, epos) == -1:
text = text[:epos] + '.' + text[epos:]
yield indent + key + ' ' + text
elif isinstance(value, dict):
yield indent + key + ' ['
next_indent = indent + ' '
for key, value in value.items():
for line in stringize(key, value, (), next_indent):
yield line
yield indent + ']'
elif isinstance(value, list) and value and not in_list:
next_indent = indent + ' '
for value in value:
for line in stringize(key, value, (), next_indent, True):
yield line
else:
if stringizer:
try:
value = stringizer(value)
except ValueError:
raise NetworkXError(
'%r cannot be converted into a string' % (value,))
if not isinstance(value, (str, unicode)):
raise NetworkXError('%r is not a string' % (value,))
yield indent + key + ' "' + escape(value) + '"'
multigraph = G.is_multigraph()
yield 'graph ['
# Output graph attributes
if G.is_directed():
yield ' directed 1'
if multigraph:
yield ' multigraph 1'
ignored_keys = {'directed', 'multigraph', 'node', 'edge'}
for attr, value in G.graph.items():
for line in stringize(attr, value, ignored_keys, ' '):
yield line
# Output node data
node_id = dict(zip(G, range(len(G))))
ignored_keys = {'id', 'label'}
for node, attrs in G.node.items():
yield ' node ['
yield ' id ' + str(node_id[node])
for line in stringize('label', node, (), ' '):
yield line
for attr, value in attrs.items():
for line in stringize(attr, value, ignored_keys, ' '):
yield line
yield ' ]'
# Output edge data
ignored_keys = {'source', 'target'}
kwargs = {'data': True}
if multigraph:
ignored_keys.add('key')
kwargs['keys'] = True
for e in G.edges_iter(**kwargs):
yield ' edge ['
yield ' source ' + str(node_id[e[0]])
yield ' target ' + str(node_id[e[1]])
if multigraph:
for line in stringize('key', e[2], (), ' '):
yield line
for attr, value in e[-1].items():
for line in stringize(attr, value, ignored_keys, ' '):
yield line
yield ' ]'
yield ']'
@open_file(1, mode='wb')
def write_gml(G, path, stringizer=None):
"""Write a graph ``G`` in GML format to the file or file handle ``path``.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
path : filename or filehandle
The filename or filehandle to write. Files whose names end with .gz or
.bz2 will be compressed.
stringizer : callable, optional
A stringizer which converts non-int/non-float/non-dict values into
strings. If it cannot convert a value into a string, it should raise a
``ValueError`` to indicate that. Default value: ``None``.
Raises
------
NetworkXError
If ``stringizer`` cannot convert a value into a string, or the value to
convert is not a string while ``stringizer`` is ``None``.
See Also
--------
read_gml, generate_gml
Notes
-----
Graph attributes named ``'directed'``, ``'multigraph'``, ``'node'`` or
``'edge'``,node attributes named ``'id'`` or ``'label'``, edge attributes
named ``'source'`` or ``'target'`` (or ``'key'`` if ``G`` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gml(G, "test.gml")
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_gml(G, "test.gml.gz")
"""
for line in generate_gml(G, stringizer):
path.write((line + '\n').encode('ascii'))
# fixture for nose
def teardown_module(module):
import os
for fname in ['test.gml', 'test.gml.gz']:
if os.path.isfile(fname):
os.unlink(fname)
|
|
from __future__ import absolute_import, division, print_function
import json
from base64 import b64encode
import pytest
import requests
import requests_mock
import appr
from appr.client import DEFAULT_PREFIX, DEFAULT_REGISTRY, ApprClient
@pytest.fixture()
def channels_data():
return {'dev': {'current': '1.0.0-rc', 'name': 'dev'}}
@pytest.fixture(autouse=True)
def fakehome(fake_home):
pass
def test_headers_without_auth():
r = ApprClient()
assert sorted(r.headers.keys()) == ['Content-Type', 'User-Agent']
assert r.headers["Content-Type"] == "application/json"
assert r.headers["User-Agent"] == "apprpy-cli/%s" % appr.__version__
def test_headers_with_auth():
r = ApprClient()
r.auth.add_token('http://localhost:5000/cnr', 'titi')
assert sorted(r.headers.keys()) == ["Authorization", 'Content-Type', 'User-Agent']
assert r.headers["Authorization"] == "titi"
assert r.headers["Content-Type"] == "application/json"
assert r.headers["User-Agent"] == "apprpy-cli/%s" % appr.__version__
def test_headers_with_auth_star():
r = ApprClient()
r.auth.add_token('*', 'titi')
assert sorted(r.headers.keys()) == ["Authorization", 'Content-Type', 'User-Agent']
assert r.headers["Authorization"] == "titi"
assert r.headers["Content-Type"] == "application/json"
assert r.headers["User-Agent"] == "apprpy-cli/%s" % appr.__version__
def test_default_endpoint():
r = ApprClient(endpoint=None)
assert r.endpoint.geturl() == DEFAULT_REGISTRY + DEFAULT_PREFIX
def test_url():
r = ApprClient(endpoint="http://test.com")
assert r._url("/test") == "http://test.com" + DEFAULT_PREFIX + "/test"
def test_url_prefix():
r = ApprClient(endpoint="http://test.com/test")
assert r._url("/2") == "http://test.com/test" + DEFAULT_PREFIX + "/2"
def test_pull():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/1.0.0/helm/pull", content=response)
assert r.pull("orga/p1", {"value": "1.0.0", "key": "version"}, "helm") == response
def test_pull_channel(channels_data):
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/1.0.0-rc/helm/pull", content=response)
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/channels/dev",
text=json.dumps(channels_data['dev']))
assert r.pull("orga/p1", {"value": "dev", "key": "channel"}, "helm") == response
def test_pull_digest():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/blobs/sha256/2432", content=response)
assert r.pull("orga/p1", {"key": "digest", "value": "2432"}, "helm") == response
def test_pull_version():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/0.8.1/helm/pull", content=response)
assert r.pull("orga/p1", {"key": "version", "value": "0.8.1"}, "helm") == response
def test_pull_discovery_https(discovery_html):
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get("https://appr.sh/?appr-discovery=1", text=discovery_html, complete_qs=True)
m.get("https://api.kubespray.io/api/v1/packages/orga/p1/pull", content=response)
assert r.pull("appr.sh/orga/p1", {"key": "version", "value": "1.0.0"}, "helm") == response
def test_pull_discovery_http(discovery_html):
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get("https://appr.sh/?appr-discovery=1", text="<html/>", complete_qs=True)
m.get("http://appr.sh/?appr-discovery=1", text=discovery_html, complete_qs=True)
m.get("https://api.kubespray.io/api/v1/packages/orga/p1/pull", content=response)
assert r.pull("appr.sh/orga/p1", {"key": "version", "value": "1.0.0"}, "helm") == response
def test_pull_with_version():
r = ApprClient()
with requests_mock.mock() as m:
response = b'package_data'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/orga/p1/1.0.1/helm/pull", complete_qs=True, content=response)
assert r.pull("orga/p1", {"key": "version", "value": "1.0.1"}, "helm") == response
def test_list_packages():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages", text=response)
assert json.dumps(r.list_packages({})) == response
def test_list_packages_username():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages?username=ant31", complete_qs=True, text=response)
assert json.dumps(r.list_packages({'username': "ant31"})) == response
def test_list_packages_orga():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages?namespace=ant31", complete_qs=True, text=response)
assert json.dumps(r.list_packages({'namespace': "ant31"})) == response
def test_list_packages_orga_and_user():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages?username=titi&namespace=ant31", complete_qs=True, text=response)
assert json.dumps(r.list_packages({"username": "titi", "namespace": "ant31"})) == response
def test_delete_package():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.delete(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui/1.4.3/helm", complete_qs=True, text=response)
assert r.delete_package("ant31/kube-ui", "1.4.3", "helm") == {"packages": "true"}
def test_delete_package_version():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.delete(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui/1.4.3/helm", complete_qs=True, text=response)
assert r.delete_package(name="ant31/kube-ui", version="1.4.3", media_type="helm") == {"packages": "true"}
def test_delete_package_unauthorized():
r = ApprClient()
with requests_mock.mock() as m:
response = '{"packages": "true"}'
m.delete(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui/1.4.3/helm",
complete_qs=True,
text=response,
status_code=401)
with pytest.raises(requests.HTTPError):
r.delete_package("ant31/kube-ui", "1.4.3", "helm")
def test_push_unauthorized():
r = ApprClient()
with requests_mock.mock() as m:
body = {"blob": "fdsfds"}
response = b'{"packages": "true"}'
m.post(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui?force=false",
complete_qs=True,
content=response,
status_code=401)
with pytest.raises(requests.HTTPError):
r.push(name="ant31/kube-ui", body=body)
def test_push():
body = {"blob": b64encode(b"testdata").decode('utf-8')}
r = ApprClient()
response = '{"packages": "true"}'
with requests_mock.mock() as m:
m.post(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui?force=false",
complete_qs=True,
text=response)
assert json.dumps(r.push(name="ant31/kube-ui", body=body)) == json.dumps(json.loads(response))
def test_push_force():
body = {"blob": b64encode(b"foobar").decode('utf-8')}
r = ApprClient()
response = '{"packages": "true"}'
with requests_mock.mock() as m:
m.post(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/api/v1/packages/ant31/kube-ui?force=true",
complete_qs=True,
text=response)
assert json.dumps(r.push(name="ant31/kube-ui", body=body, force=True)) == json.dumps(json.loads(response))
def test_get_version():
r = ApprClient()
response = '{"appr-server": "0.23.0"}'
with requests_mock.mock() as m:
m.get(DEFAULT_REGISTRY + DEFAULT_PREFIX + "/version",
complete_qs=True,
text=response)
assert json.dumps(r.version()) == json.dumps(json.loads(response))
|
|
# PyVot
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from xl.range import Range, Vector, RowVector, ColumnVector, Scalar
from xl.sheet import Workbook, Worksheet
from xl.cache import CacheManager, enable_caching, cache_result
import collections
def workbooks():
"""Returns a list of open workbooks"""
import xl._impl.com_utils
return [Workbook(x) for x in xl._impl.com_utils.get_open_xlWorkbooks()]
# XL operations
def view(x, name=None, to=None):
# if it's an array, load into excel, return the range
return Workbook.default_workbook().view(x, name, to)
# Get the range from excel
def get(r):
"""Returns a Range for the given table column name, named range, or Excel address (ex. A1:B4).
`get` guesses the active workbook, and begins its search on the active sheet.
See also: xl.Workbook.get and xl.Workbook.range"""
return Workbook.default_workbook().get(r)
def selected_range():
"""Gets the currently selected range. The returned range filters
hidden cells by default"""
wb = Workbook.default_workbook()
xlApp = wb.xlWorkbook.Application
return Range(xlApp.Selection, with_hidden=False).normalize()
def selected_value():
"""Gets the values in the currently selected range. See xl.selected_range()"""
return selected_range().get()
def filter(func, range):
"""Filters rows or columns by applying `func` to the given range.
`func` is called for each value in the range. If it returns False,
the corresponding row / column is hidden. Otherwise, the row / column is
made visible.
`range` must be a row or column vector. If it is a row vector, columns are hidden, and vice versa.
Note that, to unhide rows / columns, `range` must include hidden cells. For example, to unhide a range:
xl.filter(lambda v: True, some_vector.including_hidden)"""
# $$$ maybe we should kill scalar ranges
if not (range.shape in (Scalar, RowVector, ColumnVector)):
raise ValueError("range must be a vector or scalar")
hide_dim = range._vector_dim.other
with CacheManager.caching_disabled():
for cell in range.itercells():
assert cell.shape is Scalar
visible = bool( func(cell.get()) )
hide_dim.entire(cell._full_xlRange).Hidden = not visible
def map(func, *rangeIn):
"""Excel equivalent to the built-in map().
ColumnVector ranges as well as Python iterables are accepted.
The result list is written back to Excel as a column. A ColumnVector
representing the stored results is returned"""
import __builtin__
xs = (_to_value(r) for r in rangeIn)
name = getattr(func, '__name__', "<callable>")
y = __builtin__.map(func, *xs)
r = _dest_for_source_ranges(rangeIn)
return view(y, name, to=r)
def apply(func, *rangeIn):
"""Excel equivalent to the built-in apply().
Ranges as well as Python iterables are accepted. Ranges
are converted to lists of Python values (with Range.get()).
The value returned by `func` is then passed to xl.view"""
import __builtin__
xs = (_to_value(r) for r in rangeIn)
name = getattr(func, '__name__', "<callable>")
y = __builtin__.apply(func, xs)
r = _dest_for_source_ranges(rangeIn)
return view(y, name, to=r)
# accept excel range or value.
# if it's a range, convert to a value
def _to_value(obj):
r = _tryToRange(obj)
if r is not None:
return r.get()
if isinstance(obj, collections.Sequence):
return obj
raise ValueError("Expected range or value")
# Convert a variety or ranges to a Range object. Good for normalizing inputs
def _tryToRange(obj):
if obj is None:
raise ValueError("range object can't be None")
if isinstance(obj, Range):
return obj
t = type(obj)
# $$$ is it an xlRange?
if t is str:
return get(obj)
return None
def _toRange(obj):
r = _tryToRange(obj)
if r is None:
raise ValueError("Unrecognized range object:%s" % str(obj))
return r
def _dest_for_source_ranges(ranges):
"""Given a set of source ranges/values (for map or apply), attempts to find a sensible target range
If a source is found that is both a range and part of a table, returns a new column range in that table
If no such range exists, None is returned"""
rs = [r for r in ranges if not r is None
if isinstance(r, Range)
if not r.containing_table is None]
if rs:
r = rs[0]
# $$$ do something about partial column selections...
dest_col = r.containing_table.append_empty_columns(1)
# map / apply respect range filtering when fetching values
# We inserted a full column, but we must return a range with indices that align visually
dest_col = dest_col.with_filter(include_hidden_cells=r.includes_hidden_cells)
return dest_col
else: return None
def join(key_range_a, key_range_b):
"""Joins the table associated with key range B to the table for key range A.
Each key in range A must have zero or one matching keys in range B (i.e. rows will not be added to table A)"""
b_headers, b_key_map = _join_map(key_range_b)
assert not b_headers is None, "Headerless tables not supported yet"
# Number of columns being added to table A
num_joined_cols = len(b_headers)
if num_joined_cols == 0:
raise ValueError("key_range_b indicates the source table; there must be at least one value column in addition to the key column")
new_rows = [ b_headers ]
for a_key in key_range_a:
v = b_key_map.get(a_key, ("",) * num_joined_cols)
assert len(v) == num_joined_cols
new_rows.append(v)
ws_a = Worksheet(key_range_a._full_xlRange.Worksheet)
tb_a = ws_a._find_table_containing_range(key_range_a)
# We may have appended a single column
joined_cols = tb_a.append_empty_columns(num_joined_cols)
# If num_joined_cols is 1, may behave as a vector or scalar. However,
# new_rows is constructed for a 2D range
joined_cols.as_matrix.set( new_rows )
def _join_map(r):
ws = Worksheet(r._full_xlRange.Worksheet)
tb = ws._find_table_containing_range(r)
key_col_idx = r.column - tb.rData.column
headers = None
if tb.rHeader:
assert not tb.rHeader.shape is Scalar
headers = list(tb.rHeader.get())
del headers[key_col_idx]
m = {}
for r in tb.data_rows:
assert not r[key_col_idx] in m, "Duplicate key during join"
m[r[key_col_idx]] = r[:key_col_idx] + r[key_col_idx + 1:]
return (headers, m)
# Decorators
# Used to provide metadata to Tooling about exposed function types
def tool_map(func):
return func
def tool_apply(func):
return func
def tool_workbook(func):
return func
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
# NOTE(morganfainberg): import endpoint filter to populate the SQL model
from keystone.contrib import endpoint_filter # flake8: noqa
from keystone.tests import test_v3
class TestExtensionCase(test_v3.RestfulTestCase):
EXTENSION_NAME = 'endpoint_filter'
EXTENSION_TO_ADD = 'endpoint_filter_extension'
def config_overrides(self):
super(TestExtensionCase, self).config_overrides()
self.config_fixture.config(
group='catalog',
driver='keystone.contrib.endpoint_filter.backends.catalog_sql.'
'EndpointFilterCatalog')
def setUp(self):
super(TestExtensionCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
"""Test OS-EP-FILTER endpoint to project associations extension."""
# endpoint-project associations crud tests
# PUT
def test_create_endpoint_project_assoc(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
def test_create_endpoint_project_assoc_noproj(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_create_endpoint_project_assoc_noendp(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_create_endpoint_project_assoc_unexpected_body(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
"""
self.put(self.default_request_url,
body={'project_id': self.default_domain_project_id},
expected_status=204)
# HEAD
def test_check_endpoint_project_assoc(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_check_endpoint_project_assoc_noproj(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_check_endpoint_project_assoc_noendp(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
# GET
def test_get_endpoint_project_assoc(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints success."""
self.put(self.default_request_url)
r = self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id})
self.assertValidEndpointListResponse(r, self.endpoint)
def test_get_endpoint_project_assoc_noproj(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints no project."""
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_list_projects_for_endpoint_default(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects success
Don't associate project and endpoint, then get empty list.
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_for_endpoint_noendpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
"""
self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_list_projects_for_endpoint_assoc(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects success
Associate default project and endpoint, then get it.
"""
self.put(self.default_request_url)
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, self.default_domain_project)
# DELETE
def test_remove_endpoint_project_assoc(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_remove_endpoint_project_assoc_noproj(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_remove_endpoint_project_assoc_noendp(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
"""Test OS-EP-FILTER catalog filtering extension."""
def test_default_project_id_scoped_token_with_user_id_ep_filter(self):
# create a second project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_implicit_project_id_scoped_token_with_user_id_ep_filter(self):
# attempt to authenticate without requesting a project
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_default_project_id_scoped_token_ep_filter_no_catalog(self):
# create a second project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_implicit_project_id_scoped_token_ep_filter_no_catalog(self):
# attempt to authenticate without requesting a project
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_default_project_id_scoped_token_ep_filter_full_catalog(self):
# create a second project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_implicit_project_id_scoped_token_ep_filter_full_catalog(self):
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_implicit_project_id_scoped_token_handling_bad_reference(self):
# handling the case with an endpoint that is not associate with
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# create a second temporary endpoint
self.endpoint_id2 = uuid.uuid4().hex
self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint2['id'] = self.endpoint_id2
self.catalog_api.create_endpoint(
self.endpoint_id2,
self.endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id2},
body='',
expected_status=204)
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
self.catalog_api.delete_endpoint(self.endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_disabled_endpoint(self):
"""The catalog contains only enabled endpoints."""
# Add an enabled endpoint to the default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
# Add a disabled endpoint to the default project.
# Create a disabled endpoint that's like the enabled one.
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({
'id': disabled_endpoint_id,
'enabled': False,
'interface': 'internal'
})
self.catalog_api.create_endpoint(disabled_endpoint_id,
disabled_endpoint_ref)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': disabled_endpoint_id},
expected_status=204)
# Authenticate to get token with catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids)
|
|
from django.contrib.auth import get_user_model, authenticate
from django.conf import settings
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import urlsafe_base64_decode as uid_decoder
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from rest_framework import serializers, exceptions
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
# Get the UserModel
UserModel = get_user_model()
class LoginSerializer(serializers.Serializer):
username = serializers.CharField(required=False, allow_blank=True)
email = serializers.EmailField(required=False, allow_blank=True)
password = serializers.CharField(style={'input_type': 'password'})
def validate(self, attrs):
username = attrs.get('username')
email = attrs.get('email')
password = attrs.get('password')
if 'allauth' in settings.INSTALLED_APPS:
from allauth.account import app_settings
# Authentication through email
if app_settings.AUTHENTICATION_METHOD == app_settings.AuthenticationMethod.EMAIL:
if email and password:
user = authenticate(email=email, password=password)
else:
msg = _('Must include "email" and "password".')
raise exceptions.ValidationError(msg)
# Authentication through username
elif app_settings.AUTHENTICATION_METHOD == app_settings.AuthenticationMethod.USERNAME:
if username and password:
user = authenticate(username=username, password=password)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
# Authentication through either username or email
else:
if email and password:
user = authenticate(email=email, password=password)
elif username and password:
user = authenticate(username=username, password=password)
else:
msg = _('Must include either "username" or "email" and "password".')
raise exceptions.ValidationError(msg)
elif username and password:
user = authenticate(username=username, password=password)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
# Did we get back an active user?
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
# If required, is the email verified?
if 'rest_auth.registration' in settings.INSTALLED_APPS:
from allauth.account import app_settings
if app_settings.EMAIL_VERIFICATION == app_settings.EmailVerificationMethod.MANDATORY:
email_address = user.emailaddress_set.get(email=user.email)
if not email_address.verified:
raise serializers.ValidationError('E-mail is not verified.')
attrs['user'] = user
return attrs
class TokenSerializer(serializers.ModelSerializer):
"""
Serializer for Token model.
"""
class Meta:
model = Token
fields = ('key',)
class UserDetailsSerializer(serializers.ModelSerializer):
"""
User model w/o password
"""
class Meta:
model = UserModel
fields = ('username', 'email', 'first_name', 'last_name')
read_only_fields = ('email', )
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
email = serializers.EmailField()
password_reset_form_class = PasswordResetForm
def validate_email(self, value):
# Create PasswordResetForm with the serializer
self.reset_form = self.password_reset_form_class(data=self.initial_data)
if not self.reset_form.is_valid():
raise serializers.ValidationError(_('Error'))
if not UserModel.objects.filter(email__iexact=value).exists():
raise serializers.ValidationError(_('Invalid e-mail address'))
return value
def save(self):
request = self.context.get('request')
# Set some values to trigger the send_email method.
opts = {
'use_https': request.is_secure(),
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': request,
}
self.reset_form.save(**opts)
class PasswordResetConfirmSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
uid = serializers.CharField(required=True)
token = serializers.CharField(required=True)
set_password_form_class = SetPasswordForm
def custom_validation(self, attrs):
pass
def validate(self, attrs):
self._errors = {}
# Decode the uidb64 to uid to get User object
try:
uid = force_text(uid_decoder(attrs['uid']))
self.user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
raise ValidationError({'uid': ['Invalid value']})
self.custom_validation(attrs)
# Construct SetPasswordForm instance
self.set_password_form = self.set_password_form_class(
user=self.user, data=attrs
)
if not self.set_password_form.is_valid():
raise serializers.ValidationError(self.set_password_form.errors)
if not default_token_generator.check_token(self.user, attrs['token']):
raise ValidationError({'token': ['Invalid value']})
return attrs
def save(self):
self.set_password_form.save()
class PasswordChangeSerializer(serializers.Serializer):
old_password = serializers.CharField(max_length=128)
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
set_password_form_class = SetPasswordForm
def __init__(self, *args, **kwargs):
self.old_password_field_enabled = getattr(
settings, 'OLD_PASSWORD_FIELD_ENABLED', False
)
self.logout_on_password_change = getattr(
settings, 'LOGOUT_ON_PASSWORD_CHANGE', False
)
super(PasswordChangeSerializer, self).__init__(*args, **kwargs)
if not self.old_password_field_enabled:
self.fields.pop('old_password')
self.request = self.context.get('request')
self.user = getattr(self.request, 'user', None)
def validate_old_password(self, value):
invalid_password_conditions = (
self.old_password_field_enabled,
self.user,
not self.user.check_password(value)
)
if all(invalid_password_conditions):
raise serializers.ValidationError('Invalid password')
return value
def validate(self, attrs):
self.set_password_form = self.set_password_form_class(
user=self.user, data=attrs
)
if not self.set_password_form.is_valid():
raise serializers.ValidationError(self.set_password_form.errors)
return attrs
def save(self):
self.set_password_form.save()
if not self.logout_on_password_change:
from django.contrib.auth import update_session_auth_hash
update_session_auth_hash(self.request, self.user)
|
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
tests.unit.config.schemas.test_ssh
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.config.schemas import ssh as ssh_schemas
from salt.config.schemas.minion import MinionConfiguration
# Import 3rd-party libs
try:
import jsonschema
import jsonschema.exceptions
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
class RoosterEntryConfigTest(TestCase):
def test_config(self):
config = ssh_schemas.RosterEntryConfig()
expected = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'Roster Entry',
'description': 'Salt SSH roster entry definition',
'type': 'object',
'properties': {
'host': {
'title': 'Host',
'description': 'The IP address or DNS name of the remote host',
'type': 'string',
'pattern': r'^((\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([A-Za-z0-9][A-Za-z0-9\.\-]{1,255}))$',
'minLength': 1
},
'port': {
'description': 'The target system\'s ssh port number',
'title': 'Port',
'default': 22,
'maximum': 65535,
'minimum': 0,
'type': 'integer'
},
'user': {
'default': 'root',
'type': 'string',
'description': 'The user to log in as. Defaults to root',
'title': 'User',
'minLength': 1
},
'passwd': {
'title': 'Password',
'type': 'string',
'description': 'The password to log in with',
'format': 'secret',
'minLength': 1
},
'priv': {
'type': 'string',
'description': 'File path to ssh private key, defaults to salt-ssh.rsa',
'title': 'Private Key',
'minLength': 1
},
'sudo': {
'default': False,
'type': 'boolean',
'description': 'run command via sudo. Defaults to False',
'title': 'Sudo'
},
'timeout': {
'type': 'integer',
'description': 'Number of seconds to wait for response when establishing an SSH connection',
'title': 'Timeout'
},
'thin_dir': {
'type': 'string',
'description': 'The target system\'s storage directory for Salt components. Defaults to /tmp/salt-<hash>.',
'title': 'Thin Directory'
},
# The actuall representation of the minion options would make this HUGE!
'minion_opts': ssh_schemas.DictItem(title='Minion Options',
description='Dictionary of minion options',
properties=MinionConfiguration()).serialize(),
},
'anyOf': [
{
'required': [
'passwd'
]
},
{
'required': [
'priv'
]
}
],
'required': [
'host',
'user',
],
'x-ordering': [
'host',
'port',
'user',
'passwd',
'priv',
'sudo',
'timeout',
'thin_dir',
'minion_opts'
],
'additionalProperties': False
}
try:
self.assertDictContainsSubset(expected['properties'], config.serialize()['properties'])
self.assertDictContainsSubset(expected, config.serialize())
except AssertionError:
import json
print(json.dumps(config.serialize(), indent=4))
raise
@skipIf(HAS_JSONSCHEMA is False, 'The \'jsonschema\' library is missing')
def test_config_validate(self):
try:
jsonschema.validate(
{
'host': 'localhost',
'user': 'root',
'passwd': 'foo'
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
try:
jsonschema.validate(
{
'host': '127.0.0.1',
'user': 'root',
'passwd': 'foo'
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
try:
jsonschema.validate(
{
'host': '127.1.0.1',
'user': 'root',
'priv': 'foo',
'passwd': 'foo'
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
try:
jsonschema.validate(
{
'host': '127.1.0.1',
'user': 'root',
'passwd': 'foo',
'sudo': False
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
try:
jsonschema.validate(
{
'host': '127.1.0.1',
'user': 'root',
'priv': 'foo',
'passwd': 'foo',
'thin_dir': '/foo/bar'
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
try:
jsonschema.validate(
{
'host': '127.1.0.1',
'user': 'root',
'passwd': 'foo',
'minion_opts': {
'interface': '0.0.0.0'
}
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate(
{
'host': '127.1.0.1',
'user': '',
'passwd': 'foo',
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
self.assertIn('is too short', excinfo.exception.message)
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate(
{
'host': '127.1.0.1',
'user': 'root',
'passwd': 'foo',
'minion_opts': {
'interface': 0
}
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker()
)
self.assertIn('is not of type', excinfo.exception.message)
class RosterItemTest(TestCase):
def test_roster_config(self):
try:
self.assertDictContainsSubset(
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Roster Configuration",
"description": "Roster entries definition",
"type": "object",
"patternProperties": {
r"^([^:]+)$": ssh_schemas.RosterEntryConfig.serialize()
},
"additionalProperties": False
},
ssh_schemas.RosterItem.serialize()
)
except AssertionError:
import json
print(json.dumps(ssh_schemas.RosterItem.serialize(), indent=4))
raise
@skipIf(HAS_JSONSCHEMA is False, 'The \'jsonschema\' library is missing')
def test_roster_config_validate(self):
try:
jsonschema.validate(
{'target-1':
{
'host': 'localhost',
'user': 'root',
'passwd': 'foo'
}
},
ssh_schemas.RosterItem.serialize(),
format_checker=jsonschema.FormatChecker()
)
except jsonschema.exceptions.ValidationError as exc:
self.fail('ValidationError raised: {0}'.format(exc))
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate(
{'target-1:1':
{
'host': 'localhost',
'user': 'root',
'passwd': 'foo'
}
},
ssh_schemas.RosterItem.serialize(),
format_checker=jsonschema.FormatChecker()
)
self.assertIn(
'Additional properties are not allowed (\'target-1:1\' was unexpected)',
excinfo.exception.message
)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize initial 2nd moment vector)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
```
t <- t + 1
lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- beta2 * v_{t-1} + (1 - beta2) * g * g
variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
```
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
# Variables to accumulate the powers of the beta parameters.
# Created in _create_slots when we know the variables to optimize.
self._beta1_power = None
self._beta2_power = None
# Created in SparseApply if needed.
self._updated_lr = None
def _get_beta_accumulators(self):
return self._beta1_power, self._beta2_power
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
create_new = self._beta1_power is None
if not create_new and context.in_graph_mode():
create_new = (self._beta1_power.graph is not first_var.graph)
if create_new:
with ops.colocate_with(first_var):
self._beta1_power = variable_scope.variable(self._beta1,
name="beta1_power",
trainable=False)
self._beta2_power = variable_scope.variable(self._beta2,
name="beta2_power",
trainable=False)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.apply_adam(
var, m, v,
math_ops.cast(self._beta1_power, var.dtype.base_dtype),
math_ops.cast(self._beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad, use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return training_ops.resource_apply_adam(
var.handle, m.handle, v.handle,
math_ops.cast(self._beta1_power, grad.dtype.base_dtype),
math_ops.cast(self._beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t,
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(
x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(
grad, var, indices, self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
with ops.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign(
self._beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(
self._beta2_power * self._beta2_t,
use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
|
|
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""ResNet-32x4 with rank-1 distributions."""
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from experimental.rank1_bnns import resnet_cifar_model # local file import
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
from uncertainty_baselines.baselines.cifar import utils
import uncertainty_metrics as um
# ~24.4 steps per epoch for 4x4 TPU; per_core_batch_size=64; 300 epochs;
# so 2/3 of training time.
flags.DEFINE_integer('kl_annealing_steps', int(24.4 * 200),
'Number of steps over which to anneal the KL term to 1.')
flags.DEFINE_string('alpha_initializer', 'trainable_deterministic',
'Initializer name for the alpha parameters.')
flags.DEFINE_string('gamma_initializer', 'trainable_deterministic',
'Initializer name for the gamma parameters.')
flags.DEFINE_string('alpha_regularizer', None,
'Regularizer name for the alpha parameters.')
flags.DEFINE_string('gamma_regularizer', None,
'Regularizer name for the gamma parameters.')
flags.DEFINE_boolean('use_additive_perturbation', False,
'Use additive perturbations instead of multiplicative.')
flags.DEFINE_integer('num_train_samples', 1,
'Number of samples per example during training.')
flags.DEFINE_integer('num_eval_samples', 1,
'Number of samples per example during evaluation.')
# General model flags
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_bool(
'member_sampling', default=False,
help=('Whether or not to sample a single ensemble member per step with '
'which to compute the loss and derivatives.'))
flags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('random_sign_init', -0.5,
'Use random sign init for fast weights.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('fast_weight_lr_multiplier', 0.5,
'fast weights lr multiplier.')
flags.DEFINE_bool('version2', True, 'Use ensemble version2.')
flags.DEFINE_bool(
'expected_probs', default=False,
help=('Whether or not to compute the loss over the per-example average of '
'the predicted probabilities across the ensemble members.'))
flags.DEFINE_float('base_learning_rate', 0.1,
'Base learning rate when total training batch size is 128.')
flags.DEFINE_integer('lr_warmup_epochs', 1,
'Number of epochs for a linear warmup to the initial '
'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('lr_decay_ratio', 0.1, 'Amount to decay learning rate.')
flags.DEFINE_list('lr_decay_epochs', ['80', '160', '180'],
'Epochs to decay learning rate by.')
flags.DEFINE_float('dropout_rate', 0.,
'Dropout rate. Only used if alpha/gamma initializers are, '
'e.g., trainable normal with a fixed stddev.')
flags.DEFINE_float('l2', 2e-4, 'L2 coefficient.')
flags.DEFINE_enum('dataset', 'cifar10',
enum_values=['cifar10', 'cifar100'],
help='Dataset.')
# TODO(ghassen): consider adding CIFAR-100-C to TFDS.
flags.DEFINE_string('cifar100_c_path',
'',
'Path to the TFRecords files for CIFAR-100-C. Only valid '
'(and required) if dataset is cifar100 and corruptions.')
flags.DEFINE_integer('corruptions_interval', 250,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer('checkpoint_interval', 25,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
flags.DEFINE_string('output_dir', '/tmp/cifar',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 250, 'Number of training epochs.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 8, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused arg.
tf.random.set_seed(FLAGS.seed)
if FLAGS.version2:
per_core_bs_train = FLAGS.per_core_batch_size // (FLAGS.ensemble_size *
FLAGS.num_train_samples)
per_core_bs_eval = FLAGS.per_core_batch_size // (FLAGS.ensemble_size *
FLAGS.num_eval_samples)
else:
per_core_bs_train = FLAGS.per_core_batch_size // FLAGS.num_train_samples
per_core_bs_eval = FLAGS.per_core_batch_size // FLAGS.num_eval_samples
batch_size_train = per_core_bs_train * FLAGS.num_cores
batch_size_eval = per_core_bs_eval * FLAGS.num_cores
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
if FLAGS.dataset == 'cifar10':
dataset_builder_class = ub.datasets.Cifar10Dataset
else:
dataset_builder_class = ub.datasets.Cifar100Dataset
train_dataset_builder = dataset_builder_class(
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
normalize=False)
train_dataset = train_dataset_builder.load(batch_size=batch_size_train)
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
clean_test_dataset_builder = dataset_builder_class(
split=tfds.Split.TEST,
use_bfloat16=FLAGS.use_bfloat16,
normalize=False)
clean_test_dataset = clean_test_dataset_builder.load(
batch_size=batch_size_eval)
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
}
if FLAGS.corruptions_interval > 0:
if FLAGS.dataset == 'cifar10':
load_c_dataset = utils.load_cifar10_c
else:
load_c_dataset = functools.partial(utils.load_cifar100_c,
path=FLAGS.cifar100_c_path)
corruption_types, max_intensity = utils.load_corrupted_test_info(
FLAGS.dataset)
for corruption in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset = load_c_dataset(
corruption_name=corruption,
corruption_intensity=intensity,
batch_size=batch_size_eval,
use_bfloat16=FLAGS.use_bfloat16,
normalize=False)
test_datasets['{0}_{1}'.format(corruption, intensity)] = (
strategy.experimental_distribute_dataset(dataset))
ds_info = tfds.builder(FLAGS.dataset).info
train_dataset_size = ds_info.splits['train'].num_examples
test_dataset_size = ds_info.splits['test'].num_examples
num_classes = ds_info.features['label'].num_classes
steps_per_epoch = train_dataset_size // batch_size_train
steps_per_eval = test_dataset_size // batch_size_eval
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building Keras ResNet-32 model')
model = resnet_cifar_model.rank1_resnet_v1(
input_shape=ds_info.features['image'].shape,
depth=32,
num_classes=num_classes,
width_multiplier=4,
alpha_initializer=FLAGS.alpha_initializer,
gamma_initializer=FLAGS.gamma_initializer,
alpha_regularizer=FLAGS.alpha_regularizer,
gamma_regularizer=FLAGS.gamma_regularizer,
use_additive_perturbation=FLAGS.use_additive_perturbation,
ensemble_size=FLAGS.ensemble_size,
random_sign_init=FLAGS.random_sign_init,
dropout_rate=FLAGS.dropout_rate)
logging.info(model.summary())
base_lr = FLAGS.base_learning_rate * batch_size_train / 128
lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
for start_epoch_str in FLAGS.lr_decay_epochs]
lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch,
base_lr,
decay_ratio=FLAGS.lr_decay_ratio,
decay_epochs=lr_decay_epochs,
warmup_epochs=FLAGS.lr_warmup_epochs)
optimizer = tf.keras.optimizers.SGD(
lr_schedule, momentum=0.9, nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/loss': tf.keras.metrics.Mean(),
}
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
if FLAGS.ensemble_size > 1:
metrics['test/diversity'] = rm.metrics.AveragePairwiseDiversity()
for i in range(FLAGS.ensemble_size):
metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
metrics['test/accuracy_member_{}'.format(i)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images, labels = inputs
if FLAGS.version2 and FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
if not (FLAGS.member_sampling or FLAGS.expected_probs):
labels = tf.tile(labels, [FLAGS.ensemble_size])
if FLAGS.num_train_samples > 1:
images = tf.tile(images, [FLAGS.num_train_samples, 1, 1, 1])
with tf.GradientTape() as tape:
logits = model(images, training=True)
probs = tf.nn.softmax(logits)
if FLAGS.num_train_samples > 1:
probs = tf.reshape(probs,
tf.concat([[FLAGS.num_train_samples, -1],
probs.shape[1:]], 0))
probs = tf.reduce_mean(probs, 0)
if FLAGS.member_sampling and FLAGS.version2 and FLAGS.ensemble_size > 1:
idx = tf.random.uniform([], maxval=FLAGS.ensemble_size,
dtype=tf.int64)
idx_one_hot = tf.expand_dims(tf.one_hot(idx, FLAGS.ensemble_size,
dtype=probs.dtype), 0)
probs_shape = probs.shape
probs = tf.reshape(probs, [FLAGS.ensemble_size, -1])
probs = tf.matmul(idx_one_hot, probs)
probs = tf.reshape(probs, tf.concat([[-1], probs_shape[1:]], 0))
elif FLAGS.expected_probs and FLAGS.version2 and FLAGS.ensemble_size > 1:
probs = tf.reshape(probs,
tf.concat([[FLAGS.ensemble_size, -1],
probs.shape[1:]], 0))
probs = tf.reduce_mean(probs, 0)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels, probs))
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the slow weights and bias terms. This excludes BN
# parameters and fast weight approximate posterior/prior parameters,
# but pay caution to their naming scheme.
if 'kernel' in var.name or 'bias' in var.name:
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
kl = sum(model.losses) / train_dataset_size
kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
kl_scale /= FLAGS.kl_annealing_steps
kl_scale = tf.minimum(1., kl_scale)
kl_loss = kl_scale * kl
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss + kl_loss
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
# Separate learning rate implementation.
grad_list = []
if FLAGS.fast_weight_lr_multiplier != 1.0:
grads_and_vars = list(zip(grads, model.trainable_variables))
for vec, var in grads_and_vars:
# Apply different learning rate on the fast weight approximate
# posterior/prior parameters. This is excludes BN and slow weights,
# but pay caution to the naming scheme.
if ('batch_norm' not in var.name and 'kernel' not in var.name):
grad_list.append((vec * FLAGS.fast_weight_lr_multiplier, var))
else:
grad_list.append((vec, var))
optimizer.apply_gradients(grad_list)
else:
optimizer.apply_gradients(zip(grads, model.trainable_variables))
metrics['train/ece'].update_state(labels, probs)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, probs)
strategy.run(step_fn, args=(next(iterator),))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images, labels = inputs
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
if FLAGS.num_eval_samples > 1:
images = tf.tile(images, [FLAGS.num_eval_samples, 1, 1, 1])
logits = model(images, training=False)
probs = tf.nn.softmax(logits)
if FLAGS.num_eval_samples > 1:
probs = tf.reshape(probs,
tf.concat([[FLAGS.num_eval_samples, -1],
probs.shape[1:]], 0))
probs = tf.reduce_mean(probs, 0)
if FLAGS.ensemble_size > 1:
per_probs = tf.split(probs,
num_or_size_splits=FLAGS.ensemble_size,
axis=0)
if dataset_name == 'clean':
per_probs_tensor = tf.reshape(
probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
metrics['test/diversity'].add_batch(per_probs_tensor)
for i in range(FLAGS.ensemble_size):
member_probs = per_probs[i]
member_nll = tf.keras.losses.sparse_categorical_crossentropy(
labels, member_probs)
metrics['test/nll_member_{}'.format(i)].update_state(member_nll)
metrics['test/accuracy_member_{}'.format(i)].update_state(
labels, member_probs)
probs = tf.reduce_mean(per_probs, axis=0)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels, probs))
filtered_variables = []
for var in model.trainable_variables:
if 'kernel' in var.name or 'bias' in var.name:
filtered_variables.append(tf.reshape(var, (-1,)))
kl = sum(model.losses) / test_dataset_size
l2_loss = kl + FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
loss = negative_log_likelihood + l2_loss
if dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
metrics['test/loss'].update_state(loss)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
labels, probs)
strategy.run(step_fn, args=(next(iterator),))
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
for step in range(steps_per_epoch):
train_step(train_iterator)
current_step = epoch * steps_per_epoch + (step + 1)
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
work_unit.set_notes(message)
if step % 20 == 0:
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
for step in range(steps_per_eval):
if step % 20 == 0:
logging.info('Starting to run eval step %s of epoch: %s', step,
epoch)
test_step(test_iterator, dataset_name)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
for i in range(FLAGS.ensemble_size):
logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
i, metrics['test/nll_member_{}'.format(i)].result(),
metrics['test/accuracy_member_{}'.format(i)].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Results from Robustness Metrics themselves return a dict, so flatten them.
total_results = utils.flatten_dictionary(total_results)
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for name, result in total_results.items():
name = name.replace('/', '_')
if 'negative_log_likelihood' in name:
# Plots sort WIDs from high-to-low so look at maximization objectives.
name = name.replace('negative_log_likelihood', 'log_likelihood')
result = -result
objective = work_unit.get_measurement_series(name)
objective.create_measurement(result, epoch + 1)
for _, metric in metrics.items():
metric.reset_states()
for _, metric in corrupt_metrics.items():
metric.reset_states()
summary_writer.flush()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
if __name__ == '__main__':
app.run(main)
|
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections
import itertools
import sys
from paleomix.common.bamfiles import BAMRegionsIter
from paleomix.common.timer import BAMTimer
from paleomix.tools.bam_stats.common import (
collect_readgroups,
collect_references,
main_wrapper,
)
##############################################################################
##############################################################################
##
# Maximum depth to record, and hence the number of columns in output table
_MAX_DEPTH = 200
# Maximum number of count patterns (numbers of bases per library for a given
# site) to cache for bulk processing; see MappingsToTotals for implementation
_MAX_CACHE_SIZE = 10000
# Header prepended to output tables
_HEADER = """# Columns:
# Contig: Contig, chromosome, or feature for which a depth histogram was
# created. Unnamed features are named after the chromosome or
# contig on which they are located, with a star appended. For
# example "chr1*". If the maximum number of contigs was exceeded,
# these are collapsed into one meta-contig named "<Genome>".
# Size: The total size of the region. Multiple features with the same
# name are combined into one row, with the size representing the
# total for these. Note that overlapping bases are counted 2 (or
# more) times.
# MaxDepth: Maximum depth to use when calling SNPs, in order to exclude
# (at least) the 0.5%% most extreme sites based on read depth,
# not including sites with depth 0.
# MD_*: Fraction of sites with a minimum depth of 1-200.
#"""
##############################################################################
##############################################################################
class MappingToTotals:
def __init__(self, totals, region, smlbid_to_smlb):
self._region = region
self._map_by_smlbid, self._totals_src_and_dst = self._build_mappings(
totals, region.name, smlbid_to_smlb
)
self._cache = collections.defaultdict(int)
def process_counts(self, counts, last_pos, cur_pos):
start = self._region.start
end = self._region.end
# Pileups tends to contain identical stretches, so
# try to avoid repeated lookups by aggregating these
repeats = 1
last_count = None
while counts and (last_pos < cur_pos):
count = counts.popleft()
if start <= last_pos < end:
if count == last_count:
repeats += 1
else:
if last_count is not None:
self._cache[tuple(last_count)] += repeats
last_count = count
repeats = 1
last_pos += 1
if last_count is not None:
self._cache[tuple(last_count)] += repeats
if len(self._cache) > _MAX_CACHE_SIZE:
self.finalize()
def finalize(self):
"""Process cached counts."""
for (count, multiplier) in self._cache.items():
self._update_totals(count, multiplier)
self._cache.clear()
def _update_totals(self, count, multiplier=1):
for (smlbid, count) in enumerate(count):
if count:
for lst in self._map_by_smlbid[smlbid]:
lst[0] += count
for (dst_counts, src_count) in self._totals_src_and_dst:
if src_count[0]:
dst_counts[src_count[0]] += multiplier
src_count[0] = 0
@classmethod
def _build_mappings(cls, totals, name, smlbid_to_smlb):
# Accumulators mapped by sample+library IDs
totals_by_smlbid = [None] * len(smlbid_to_smlb)
# Accumulators mapped by the corresponding table keys
totals_by_table_key = {}
for (smlbid, (sm_key, lb_key)) in enumerate(smlbid_to_smlb):
keys = [
("*", "*", "*"),
(sm_key, "*", "*"),
(sm_key, "*", name),
(sm_key, lb_key, "*"),
(sm_key, lb_key, name),
]
mappings = cls._nonoverlapping_mappings(keys, totals, totals_by_table_key)
totals_by_smlbid[smlbid] = mappings
totals_src_and_dst = []
for (key, dst) in totals_by_table_key.items():
totals_src_and_dst.append((totals[key], dst))
return totals_by_smlbid, totals_src_and_dst
@classmethod
def _nonoverlapping_mappings(cls, keys, totals, totals_by_table_key):
"""Returns a tuple of accumulators for a given set of table keys. As
multiple table keys may share the same accumulator (e.g. if there is
only one sample, then sample "*" and that sample will be identical),
the tuple of accumulators may contain fewer items than keys."""
mapping = []
totals_used = set()
for key in keys:
# Check that accumulator is not already included
totals_id = id(totals[key])
if totals_id not in totals_used:
totals_used.add(totals_id)
accumulator = totals_by_table_key.setdefault(key, [0])
mapping.append(accumulator)
return tuple(mapping)
##############################################################################
##############################################################################
def calc_max_depth(counts):
counts = dict(counts)
counts.pop(0, None)
running_total = sum(counts.values())
if not running_total:
return "NA"
total = float(running_total)
for (index, count) in sorted(counts.items()):
# Stop when less than the 0.5% most extreme values are included
if running_total / total < 0.005:
# The max is inclusive, so return the depth just before this one
return index - 1
running_total -= count
return "NA"
def print_table(handle, args, totals):
lengths = collect_references(args, handle)
if args.outfile == "-":
output_handle = sys.stdout
else:
output_handle = open(args.outfile, "w")
with output_handle:
rows = build_table(args.target_name, totals, lengths)
output_handle.write(_HEADER)
output_handle.write("\n")
for line in rows:
output_handle.write("\t".join(map(str, line)))
output_handle.write("\n")
def calculate_depth_pc(counts, length):
final_counts = [0] * (_MAX_DEPTH + 1)
for (depth, count) in counts.items():
final_counts[min(_MAX_DEPTH, depth)] += count
running_total = sum(final_counts)
total = float(length)
for count in final_counts[1:]:
yield "%.4f" % (running_total / total,)
running_total -= count
def build_table(name, totals, lengths):
header = ["Name", "Sample", "Library", "Contig", "Size", "MaxDepth"]
for index in range(1, _MAX_DEPTH + 1):
header.append("MD_%03i" % (index,))
yield header
last_sm = last_lb = None
for ((sm_key, lb_key, ct_key), counts) in sorted(totals.items()):
if (sm_key != last_sm) and (last_sm is not None):
yield "#"
yield "#"
elif (lb_key != last_lb) and (last_lb is not None):
yield "#"
last_sm, last_lb = sm_key, lb_key
if ct_key == "*":
length = sum(lengths.values())
else:
length = lengths[ct_key]
row = [name, sm_key, lb_key, ct_key, str(length), str(calc_max_depth(counts))]
row.extend(calculate_depth_pc(counts, length))
yield row
##############################################################################
##############################################################################
def build_key_struct(args, handle):
structure = collections.defaultdict(set)
for readgroup in collect_readgroups(args, handle).values():
lb_key = readgroup["LB"]
sm_key = readgroup["SM"]
structure[sm_key].add(lb_key)
return structure
def build_new_dicts(totals, dst_sm, dst_lb, references):
totals[(dst_sm, dst_lb, "*")] = collections.defaultdict(int)
for contig in references:
totals[(dst_sm, dst_lb, contig)] = collections.defaultdict(int)
def reuse_dicts(totals, dst_sm, dst_lb, src_sm, src_lb, references):
totals[(dst_sm, dst_lb, "*")] = totals[(src_sm, src_lb, "*")]
for contig in references:
totals[(dst_sm, dst_lb, contig)] = totals[(src_sm, src_lb, contig)]
def build_totals_dict(args, handle):
references = tuple(collect_references(args, handle))
structure = build_key_struct(args, handle)
totals = {}
for (sm_key, libraries) in structure.items():
for lb_key in libraries:
if len(references) == 1:
key = references[0]
counts = collections.defaultdict(int)
totals[(sm_key, lb_key, key)] = counts
totals[(sm_key, lb_key, "*")] = counts
else:
build_new_dicts(totals, sm_key, lb_key, references)
if len(libraries) == 1:
key = list(libraries)[0]
reuse_dicts(totals, sm_key, "*", sm_key, key, references)
else:
build_new_dicts(totals, sm_key, "*", references)
if len(structure) == 1:
key = list(structure)[0]
reuse_dicts(totals, "*", "*", key, "*", references)
else:
build_new_dicts(totals, "*", "*", references)
return totals
def count_bases(args, counts, record, rg_to_smlbid, template):
for _ in range(record.alen - len(counts)):
counts.append(list(template))
key = rg_to_smlbid.get(args.get_readgroup_func(record))
if key is None:
# Unknown readgroups are treated as missing readgroups
key = rg_to_smlbid[None]
index = 0
for (cigar, count) in record.cigar:
if cigar in (0, 7, 8):
for counter in itertools.islice(counts, index, index + count):
counter[key] += 1
index += count
elif cigar in (2, 3, 6):
index += count
def build_rg_to_smlbid_keys(args, handle):
"""Returns a dictionary which maps a readgroup ID to an index value,
as well as a list containing a tuple (samples, library) corresponding
to each index. Typically, this list will be shorter than the map of read-
groups, as multiple read-groups will map to the same sample / library.
"""
rg_to_lbsmid = {}
lbsm_to_lbsmid = {}
lbsmid_to_smlb = []
for (key_rg, readgroup) in collect_readgroups(args, handle).items():
key_sm = readgroup["SM"]
key_lb = readgroup["LB"]
key_lbsm = (key_sm, key_lb)
if key_lbsm not in lbsm_to_lbsmid:
lbsm_to_lbsmid[key_lbsm] = len(lbsm_to_lbsmid)
lbsmid_to_smlb.append(key_lbsm)
rg_to_lbsmid[key_rg] = lbsm_to_lbsmid[key_lbsm]
return rg_to_lbsmid, lbsmid_to_smlb
def process_file(handle, args):
timer = BAMTimer(handle)
last_tid = 0
totals = build_totals_dict(args, handle)
rg_to_smlbid, smlbid_to_smlb = build_rg_to_smlbid_keys(args, handle)
template = [0] * len(smlbid_to_smlb)
for region in BAMRegionsIter(handle, args.regions):
if region.name is None:
# Trailing unmapped reads
break
elif not args.regions and (handle.nreferences > args.max_contigs):
region.name = "<Genome>"
last_pos = 0
counts = collections.deque()
mapping = MappingToTotals(totals, region, smlbid_to_smlb)
for (position, records) in region:
mapping.process_counts(counts, last_pos, position)
for record in records:
timer.increment()
count_bases(args, counts, record, rg_to_smlbid, template)
if (region.tid, position) < (last_tid, last_pos):
sys.stderr.write("ERROR: Input BAM file is unsorted\n")
return 1
last_pos = position
last_tid = region.tid
# Process columns in region after last read
mapping.process_counts(counts, last_pos, float("inf"))
mapping.finalize()
timer.finalize()
if not args.ignore_readgroups:
# Exclude counts for reads with no read-groups, if none such were seen
for (key, _, _), value in totals.items():
if key == "<NA>" and value:
break
else:
for key in list(totals):
if key[0] == "<NA>":
totals.pop(key)
print_table(handle, args, totals)
return 0
def main(argv):
return main_wrapper(process_file, argv, ".depths")
##############################################################################
##############################################################################
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gi.repository import Gtk
import os
import webbrowser
import goslate
from DetectLanguage import get_language
from Preprocessing import Preprocess
from SentiAnalisys import senti_analisys
from ExecuteAll import ExecuteAll,profile_start,profile_stop
from XML_parser import parse_XML
def on_delete_event(widget,event):
window.show_all()
widget.destroy()
def keyPress(widget, event):
val = profile_start()
if event.keyval == 65293:
sentence = widget.get_text()
sLower = sentence.lower()
ret = profile_start()
lng = get_language(sLower)
profile_stop("detect ln: ",ret)
try:
if lng != 'english':
gs = goslate.Goslate()
ret = profile_start()
translateS = gs.translate(sLower,'en')
profile_stop("translate",ret)
else:
translateS = sLower
ret = profile_start()
tokens,tokens_stemmed = Preprocess(translateS)
profile_stop("Prepocess",ret)
ret = profile_start()
sValue,moodValue = senti_analisys(tokens)
profile_stop("senti",ret)
if sValue ==1:
image.set_from_file("../images/happy.png")
elif sValue == 0:
image.set_from_file("../images/boh.png")
elif sValue == -1:
image.set_from_file("../images/sad.png")
except Exception as e:
md = Gtk.MessageDialog(None, 0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "No connection found!")
md.run()
md.destroy()
profile_stop("Totale",val)
def nameInserted(widget, event):
if event.keyval == 65293:
insertB = NewTestBuilder.get_object("button1")
insertB.set_sensitive(True)
insertB.connect('clicked', saveData)
def saveData(event):
data = NewTestBuilder.get_object("textview1")
dataName = NewTestBuilder.get_object("entry1")
name = dataName.get_text()
buffer = data.get_buffer()
str = buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter(),False)
control = True
s = str.split('\n')
for i in s:
p = i.split('|')
try:
if len(p) >= 2 and int(p[1]) in {1,-1}:
with open("../db/"+name+".txt",'w') as file:
file.writelines(str)
else:
label = NewTestBuilder.get_object("label1")
label.set_text("ERRORE, INPUT ERRATO : Inserire test nella forma frase|1 o -1|GG/MM/AAAA")
control= False
if(control):
liststore.append([name])
windowNT = NewTestBuilder.get_object("window1")
window.show_all()
windowNT.destroy()
except ValueError as e:
label = NewTestBuilder.get_object("label1")
label.set_text("ERRORE, INPUT ERRATO : Inserire test nella forma frase|1 o -1|GG/MM/AAAA")
control= False
def load_new_test(event):
NewTestBuilder.add_from_file("InsertNewTest.glade")
windowNT = NewTestBuilder.get_object("window1")
windowNT.show_all()
window.hide()
windowNT.connect("delete-event", on_delete_event)
insertB = NewTestBuilder.get_object("button1")
insertB.set_sensitive(False)
dataName = NewTestBuilder.get_object("entry1")
dataName.connect('key-press-event', nameInserted)
def open_error(event):
global last_file_open
global lang
if last_file_open != None:
NewTestBuilder.add_from_file("InsertNewTest.glade")
windowError = NewTestBuilder.get_object("window2")
text = NewTestBuilder.get_object("textview2")
windowError.show_all()
window.hide()
windowError.connect("delete-event", on_delete_event)
buffer = Gtk.TextBuffer()
buffer.set_text(parse_XML(last_file_open,lang))
text.set_buffer(buffer)
def show_XML_results (event):
global last_file_open
if last_file_open != None:
webbrowser.open(last_file_open)
def exec_test(event):
ret = profile_start()
global last_file_open
global lang
model, treeiter = treeview.get_selection().get_selected()
if treeiter != None:
fin = '../db/'+ model[treeiter][0]+".txt"
fout = '../results/'+model[treeiter][0]+".xml"
last_file_open = fout
if model[treeiter][0] == "PopeTweets100" or model[treeiter][0] == "EnPopeTweets100":
resCase,prList,reList,lang = ExecuteAll(fin,fout,True)
else:
resCase,prList,reList,lang = ExecuteAll(fin,fout,False)
buf = ''
for i in xrange(len(prList)):
buf+= resCase[i]+ '\n\t' + "PRECISION: "+str(prList[i])+ "%\n\tRECALL: "+str(reList[i])+"%\n\n"
buffer = Gtk.TextBuffer()
buffer.set_text(buf)
results.set_buffer(buffer)
profile_stop("exec",ret)
def cold_start_up():
fs,fs2 = Preprocess("this prg is beautiful")
pd = senti_analisys(fs)
if __name__ == '__main__':
ret = profile_start()
NewTestBuilder = Gtk.Builder()
builder = Gtk.Builder()
builder.add_from_file("GUI.glade")
window = builder.get_object("MainWindow")
textField = builder.get_object("entry1")
textField.connect('key-press-event', keyPress)
image = builder.get_object("image1")
#gestione liststore
treeview = builder.get_object("treeview1")
liststore = Gtk.ListStore(str)
for i in os.listdir(os.path.join("..","db")):
liststore.append([i.split(".")[0]])
treeview.set_model(model=liststore)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Dataset", renderer, text=0)
treeview.append_column(column)
#gestione bottoni
execTest = builder.get_object("button4")
execTest.connect('clicked', exec_test)
loadTest = builder.get_object("button1")
loadTest.connect('clicked', load_new_test)
SentiError = builder.get_object("button2")
SentiError.connect('clicked', open_error)
showXML = builder.get_object("button3")
showXML.connect('clicked', show_XML_results)
results = builder.get_object("textview1")
last_file_open = None
lang = None
#cold_start_up()
window.show_all()
window.connect("delete-event", Gtk.main_quit)
profile_stop("Main",ret)
Gtk.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from datetime import datetime, date
from django.db import transaction
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import CurrentUserDefault
from account.models import User
from account.serializers import IDUserSerializer, BasicUserSerializer
from agency.agencies import UNHCR
from agency.serializers import AgencySerializer, AgencyUserListSerializer
from common.consts import (
APPLICATION_STATUSES,
CFEI_TYPES,
CFEI_STATUSES,
DIRECT_SELECTION_SOURCE,
COMPLETED_REASON,
ALL_COMPLETED_REASONS,
OTHER_AGENCIES_DSR_COMPLETED_REASONS,
UNHCR_DSR_COMPLETED_REASONS,
)
from common.utils import get_countries_code_from_queryset, update_m2m_relation
from common.serializers import (
SimpleSpecializationSerializer,
PointSerializer,
CommonFileSerializer,
MixinPreventManyCommonFile,
)
from common.models import Point, Specialization
from notification.consts import NotificationType
from notification.helpers import user_received_notification_recently, send_notification_to_cfei_focal_points
from partner.serializers import PartnerSerializer, PartnerAdditionalSerializer, PartnerShortSerializer, \
PartnerSimpleSerializer
from partner.models import Partner
from project.identifiers import get_eoi_display_identifier
from project.models import EOI, Application, Assessment, ApplicationFeedback, EOIAttachment, \
ClarificationRequestQuestion, ClarificationRequestAnswerFile
from project.utilities import update_cfei_focal_points, update_cfei_reviewers
class EOIAttachmentSerializer(serializers.ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
file = CommonFileSerializer()
class Meta:
model = EOIAttachment
fields = (
'id',
'created_by',
'description',
'file',
)
class BaseProjectSerializer(serializers.ModelSerializer):
specializations = SimpleSpecializationSerializer(many=True)
agency = AgencySerializer()
created = serializers.SerializerMethodField()
country_code = serializers.SerializerMethodField()
focal_points = BasicUserSerializer(read_only=True, many=True)
class Meta:
model = EOI
fields = (
'id',
'displayID',
'title',
'created',
'country_code',
'specializations',
'agency',
'start_date',
'end_date',
'deadline_date',
'status',
'completed_date',
'focal_points',
)
def get_created(self, obj):
return obj.created.date()
def get_country_code(self, obj):
return get_countries_code_from_queryset(obj.locations)
class ApplicationsPartnerStatusSerializer(serializers.ModelSerializer):
legal_name = serializers.CharField(source="partner.legal_name")
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'legal_name',
'partner_additional',
'application_status',
'application_status_display',
)
class DirectProjectSerializer(BaseProjectSerializer):
invited_partners = serializers.SerializerMethodField()
partner_offer_status = serializers.SerializerMethodField()
selected_source_display = serializers.CharField(source='get_selected_source_display', read_only=True)
class Meta:
model = EOI
fields = (
'id',
'title',
'created',
'country_code',
'specializations',
'agency',
'invited_partners',
'start_date',
'end_date',
'deadline_date',
'status',
'selected_source',
'selected_source_display',
'partner_offer_status',
)
def get_invited_partners(self, obj):
return obj.invited_partners.values_list('legal_name', flat=True)
def get_partner_offer_status(self, obj):
queryset = Application.objects.filter(eoi=obj)
return ApplicationsPartnerStatusSerializer(queryset, many=True).data
class CreateEOISerializer(serializers.ModelSerializer):
locations = PointSerializer(many=True)
attachments = EOIAttachmentSerializer(many=True, required=False)
def validate(self, attrs):
validated_data = super(CreateEOISerializer, self).validate(attrs)
date_field_names_that_should_be_in_this_order = [
'clarification_request_deadline_date',
'deadline_date',
'notif_results_date',
'start_date',
'end_date',
]
dates = []
for field_name in date_field_names_that_should_be_in_this_order:
dates.append(validated_data.get(field_name))
dates = list(filter(None, dates))
if not dates == sorted(dates):
raise serializers.ValidationError('Dates for the project are invalid.')
today = date.today()
if not all([d >= today for d in dates]):
raise serializers.ValidationError('Dates for the project cannot be set in the past.')
validated_data['displayID'] = get_eoi_display_identifier(
validated_data['agency'].name, validated_data['locations'][0]['admin_level_1']['country_code']
)
if len(validated_data.get('attachments', [])) > 5:
raise serializers.ValidationError({
'attachments': 'Maximum of 5 attachments is allowed.'
})
return validated_data
class Meta:
model = EOI
exclude = ('cn_template', )
extra_kwargs = {
'clarification_request_deadline_date': {
'required': True,
},
'deadline_date': {
'required': True,
},
'notif_results_date': {
'required': True,
},
}
class CreateDirectEOISerializer(CreateEOISerializer):
class Meta:
model = EOI
exclude = ('cn_template', 'deadline_date', 'clarification_request_deadline_date')
class CreateDirectApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = Application
exclude = ("cn", "eoi", "agency", "submitter")
def validate_partner(self, partner):
if partner.is_hq:
raise ValidationError('HQs of International partners are not eligible for Direct Selections / Retention.')
if partner.is_locked:
raise ValidationError('Partner account has been locked and is no longer eligible for selection.')
if partner.has_red_flag:
raise ValidationError('Partner accounts with red flags are not eligible for selection.')
return partner
class CreateDirectApplicationNoCNSerializer(CreateDirectApplicationSerializer):
class Meta:
model = Application
exclude = ("cn", )
read_only_fields = ('submitter', 'eoi', 'agency',)
class ApplicationPartnerSerializer(serializers.ModelSerializer):
class Meta:
model = Application
fields = ('id', 'cn', 'created')
class ProposalEOIDetailsSerializer(serializers.Serializer):
specializations = serializers.SerializerMethodField()
title = serializers.CharField()
def get_specializations(self, obj):
return SimpleSpecializationSerializer(
Specialization.objects.filter(id__in=obj.get('specializations')), many=True
).data
class PartnerApplicationSerializer(MixinPreventManyCommonFile, serializers.ModelSerializer):
cn = CommonFileSerializer()
agency = AgencySerializer(read_only=True)
decision_date = serializers.DateField(source='partner_decision_date', read_only=True)
proposal_of_eoi_details = ProposalEOIDetailsSerializer(read_only=True)
locations_proposal_of_eoi = PointSerializer(many=True, read_only=True)
class Meta:
model = Application
editable_fields = (
'did_accept',
'did_decline',
'cn',
)
read_only_fields = (
'id',
'status',
'created',
'agency',
'did_win',
'did_withdraw',
'decision_date',
'is_published',
'published_timestamp',
'cfei_type',
'application_status',
'application_status_display',
'proposal_of_eoi_details',
'locations_proposal_of_eoi',
'is_unsolicited',
)
fields = editable_fields + read_only_fields
prevent_keys = ["cn"]
class ApplicationFullSerializer(serializers.ModelSerializer):
cn = CommonFileSerializer()
eoi_id = serializers.IntegerField(write_only=True)
partner = PartnerSerializer(read_only=True)
agency = AgencySerializer(read_only=True)
proposal_of_eoi_details = ProposalEOIDetailsSerializer(read_only=True)
locations_proposal_of_eoi = PointSerializer(many=True, read_only=True)
submitter = BasicUserSerializer(read_only=True, default=serializers.CurrentUserDefault())
is_direct = serializers.SerializerMethodField()
cfei_type = serializers.CharField(read_only=True)
application_status = serializers.CharField(read_only=True)
application_status_display = serializers.CharField(read_only=True)
assessments_is_completed = serializers.NullBooleanField(read_only=True)
assessments_marked_as_completed = serializers.NullBooleanField(read_only=True)
decision_date = serializers.DateField(source='partner_decision_date', read_only=True)
agency_decision_maker = BasicUserSerializer(read_only=True)
partner_decision_maker = BasicUserSerializer(read_only=True)
class Meta:
model = Application
exclude = (
'accept_notification',
)
read_only_fields = (
'cn',
'eoi',
'agency_decision_date',
'partner_decision_date',
'did_accept',
'did_decline',
)
def get_is_direct(self, obj):
return obj.eoi_converted is not None
def validate(self, data):
if isinstance(self.instance, Application):
app = self.instance
allowed_to_modify_status = list(app.eoi.focal_points.values_list('id', flat=True)) + [app.eoi.created_by_id]
if data.get("status") and self.context['request'].user.id not in allowed_to_modify_status:
raise serializers.ValidationError(
"Only Focal Point/Creator is allowed to pre-select/reject an application."
)
if data.get("status") == APPLICATION_STATUSES.rejected and \
Assessment.objects.filter(application=app).exists():
raise serializers.ValidationError("Since assessment has begun, application can't be rejected.")
if data.get("status") == APPLICATION_STATUSES.recommended:
if not app.status == APPLICATION_STATUSES.preselected:
raise serializers.ValidationError('Only Preselected applications can be recommended.')
if not app.assessments_is_completed:
raise serializers.ValidationError(
'Cannot recommend application before all assessments have been completed.'
)
if app.eoi.is_completed:
raise serializers.ValidationError("Since CFEI is completed, modification is forbidden.")
if data.get("did_win"):
if not app.eoi.review_summary_comment:
raise serializers.ValidationError({
'review_summary_comment': 'Review summary needs to be filled in before picking a winner.'
})
if not app.partner.is_verified:
raise serializers.ValidationError(
"You cannot award an application if the profile has not been verified yet."
)
if app.partner.has_red_flag:
raise serializers.ValidationError("You cannot award an application if the profile has red flag.")
if not app.assessments_is_completed:
raise serializers.ValidationError(
"You cannot award an application if all assessments have not been added for the application."
)
return super(ApplicationFullSerializer, self).validate(data)
def update(self, instance, validated_data):
if 'status' in validated_data:
with transaction.atomic():
eoi = EOI.objects.select_for_update().get(pk=instance.eoi_id)
preselected_partners = set(eoi.preselected_partners)
if validated_data['status'] in {APPLICATION_STATUSES.preselected, APPLICATION_STATUSES.recommended}:
preselected_partners.add(instance.partner_id)
else:
preselected_partners.discard(instance.partner_id)
eoi.preselected_partners = list(preselected_partners)
eoi.save()
return super(ApplicationFullSerializer, self).update(instance, validated_data)
class ApplicationFullEOISerializer(ApplicationFullSerializer):
eoi = BaseProjectSerializer(read_only=True)
eoi_applications_count = serializers.SerializerMethodField(allow_null=True, read_only=True)
def get_eoi_applications_count(self, application):
return application.eoi and application.eoi.applications.count()
class ManageUCNSerializer(MixinPreventManyCommonFile, serializers.Serializer):
id = serializers.CharField(source="pk", read_only=True)
locations = PointSerializer(many=True, source='locations_proposal_of_eoi')
title = serializers.CharField(source='proposal_of_eoi_details.title')
agency = serializers.CharField(source='agency.id')
specializations = serializers.ListField(source='proposal_of_eoi_details.specializations')
cn = CommonFileSerializer()
prevent_keys = ["cn"]
@transaction.atomic
def create(self, validated_data):
self.prevent_many_common_file_validator(validated_data)
partner = self.context['request'].active_partner
locations = validated_data.pop('locations_proposal_of_eoi', [])
agency = validated_data.pop('agency')
app = Application.objects.create(
is_unsolicited=True,
is_published=False,
partner_id=partner.id,
eoi=None,
agency_id=agency['id'],
submitter=self.context['request'].user,
status=APPLICATION_STATUSES.pending,
proposal_of_eoi_details=validated_data['proposal_of_eoi_details'],
cn=validated_data['cn'],
)
for location in locations:
point = Point.objects.get_point(**location)
app.locations_proposal_of_eoi.add(point)
return app
@transaction.atomic
def update(self, instance, validated_data):
self.prevent_many_common_file_validator(validated_data)
instance.agency_id = validated_data.get('agency', {}).get('id') or instance.agency_id
instance.proposal_of_eoi_details = validated_data.get(
'proposal_of_eoi_details'
) or instance.proposal_of_eoi_details
instance.cn = validated_data.get('cn') or instance.cn
locations_data = self.initial_data.get('locations', [])
if locations_data:
instance.locations_proposal_of_eoi.clear()
for location_data in locations_data:
location_serializer = PointSerializer(data=location_data)
location_serializer.is_valid(raise_exception=True)
instance.locations_proposal_of_eoi.add(location_serializer.save())
instance.save()
return instance
class CreateDirectProjectSerializer(serializers.Serializer):
eoi = CreateDirectEOISerializer()
applications = CreateDirectApplicationNoCNSerializer(many=True)
def validate(self, attrs):
validated_data = super(CreateDirectProjectSerializer, self).validate(attrs)
if len(validated_data['applications']) > 1:
raise serializers.ValidationError({
'applications': 'Only one application is allowed for DSR'
})
return validated_data
@transaction.atomic
def create(self, validated_data):
locations = validated_data['eoi'].pop('locations')
specializations = validated_data['eoi'].pop('specializations')
focal_points = validated_data['eoi'].pop('focal_points')
attachments = validated_data['eoi'].pop('attachments', [])
validated_data['eoi']['display_type'] = CFEI_TYPES.direct
eoi = EOI.objects.create(**validated_data['eoi'])
for location in locations:
point = Point.objects.get_point(**location)
eoi.locations.add(point)
for specialization in specializations:
eoi.specializations.add(specialization)
for attachment_data in attachments:
attachment_data['eoi'] = eoi
EOIAttachment.objects.create(**attachment_data)
applications = []
for application_data in validated_data['applications']:
application = Application.objects.create(
partner=application_data['partner'],
eoi=eoi,
agency=eoi.agency,
submitter=validated_data['eoi']['created_by'],
status=APPLICATION_STATUSES.pending,
did_win=True,
ds_justification_select=application_data['ds_justification_select'],
justification_reason=application_data['justification_reason'],
ds_attachment=application_data.get('ds_attachment'),
)
applications.append(application)
update_cfei_focal_points(eoi, [f.id for f in focal_points])
return {
"eoi": eoi,
"applications": applications,
}
class CreateProjectSerializer(CreateEOISerializer):
class Meta(CreateEOISerializer.Meta):
model = EOI
exclude = CreateEOISerializer.Meta.exclude + (
'created_by',
)
@transaction.atomic
def create(self, validated_data):
locations = validated_data.pop('locations')
specializations = validated_data.pop('specializations')
focal_points = validated_data.pop('focal_points')
attachments = validated_data.pop('attachments', [])
validated_data['cn_template'] = validated_data['agency'].profile.eoi_template
validated_data['created_by'] = self.context['request'].user
self.instance = EOI.objects.create(**validated_data)
for location in locations:
point = Point.objects.get_point(**location)
self.instance.locations.add(point)
for specialization in specializations:
self.instance.specializations.add(specialization)
for focal_point in focal_points:
self.instance.focal_points.add(focal_point)
for attachment_data in attachments:
attachment_data['eoi'] = self.instance
EOIAttachment.objects.create(**attachment_data)
send_notification_to_cfei_focal_points(self.instance)
return self.instance
class SelectedPartnersSerializer(serializers.ModelSerializer):
partner_id = serializers.CharField(source="partner.id")
partner_name = serializers.CharField(source="partner.legal_name")
partner_is_verified = serializers.NullBooleanField(source="partner.is_verified")
application_status_display = serializers.CharField(read_only=True)
partner_profile_is_complete = serializers.BooleanField(read_only=True, source='partner.profile_is_complete')
class Meta:
model = Application
fields = (
'id',
'partner_id',
'partner_name',
'partner_is_verified',
'partner_profile_is_complete',
'application_status',
'application_status_display',
)
class SelectedPartnersJustificationSerializer(SelectedPartnersSerializer):
ds_attachment = CommonFileSerializer(read_only=True)
class Meta(SelectedPartnersSerializer.Meta):
fields = SelectedPartnersSerializer.Meta.fields + (
'ds_justification_select',
'justification_reason',
'ds_attachment',
)
class PartnerProjectSerializer(serializers.ModelSerializer):
agency = serializers.CharField(source='agency.name')
specializations = SimpleSpecializationSerializer(many=True)
locations = PointSerializer(many=True)
is_pinned = serializers.SerializerMethodField()
application = serializers.SerializerMethodField()
attachments = EOIAttachmentSerializer(many=True, read_only=True)
# TODO - cut down on some of these fields. partners should not get back this data
# Frontend currently breaks if doesn't receive all
class Meta:
model = EOI
fields = (
'id',
'displayID',
'specializations',
'locations',
'assessments_criteria',
'created',
'start_date',
'end_date',
'clarification_request_deadline_date',
'deadline_date',
'notif_results_date',
'justification',
'completed_reason',
'completed_date',
'is_completed',
'display_type',
'status',
'title',
'agency',
'agency_office',
'cn_template',
'description',
'goal',
'other_information',
'has_weighting',
'selected_source',
'is_pinned',
'application',
'published_timestamp',
'deadline_passed',
'clarification_request_deadline_passed',
'attachments',
'population',
)
read_only_fields = fields
def get_is_pinned(self, obj):
return obj.pins.filter(partner=self.context['request'].active_partner.id).exists()
def get_application(self, obj):
qs = obj.applications.filter(partner=self.context['request'].active_partner.id)
if qs.exists():
return ApplicationPartnerSerializer(qs.get()).data
return None
class AgencyProjectSerializer(serializers.ModelSerializer):
specializations = SimpleSpecializationSerializer(many=True, read_only=True)
locations = PointSerializer(many=True, read_only=True)
direct_selected_partners = serializers.SerializerMethodField()
focal_points_detail = BasicUserSerializer(source='focal_points', read_only=True, many=True)
reviewers_detail = BasicUserSerializer(source='reviewers', read_only=True, many=True)
invited_partners = PartnerShortSerializer(many=True, read_only=True)
applications_count = serializers.SerializerMethodField(allow_null=True, read_only=True)
attachments = EOIAttachmentSerializer(many=True, read_only=True)
current_user_finished_reviews = serializers.SerializerMethodField(allow_null=True, read_only=True)
current_user_marked_reviews_completed = serializers.SerializerMethodField(allow_null=True, read_only=True)
winning_partners = PartnerSimpleSerializer(many=True, allow_null=True)
class Meta:
model = EOI
fields = (
'id',
'displayID',
'specializations',
'invited_partners',
'locations',
'assessments_criteria',
'created',
'start_date',
'end_date',
'clarification_request_deadline_date',
'deadline_date',
'notif_results_date',
'justification',
'completed_reason',
'completed_reason_display',
'completed_retention',
'completed_comment',
'completed_date',
'is_completed',
'display_type',
'status',
'title',
'agency',
'created_by',
'focal_points',
'focal_points_detail',
'agency_office',
'cn_template',
'description',
'goal',
'other_information',
'has_weighting',
'reviewers',
'reviewers_detail',
'selected_source',
'direct_selected_partners',
'created',
'contains_partner_accepted',
'applications_count',
'is_published',
'deadline_passed',
'clarification_request_deadline_passed',
'published_timestamp',
'attachments',
'sent_for_decision',
'current_user_finished_reviews',
'current_user_marked_reviews_completed',
'assessments_marked_as_completed',
'contains_recommended_applications',
'winning_partners',
'population',
)
read_only_fields = (
'created',
'completed_date',
'is_published',
'published_timestamp',
'displayID',
'sent_for_decision',
)
def get_extra_kwargs(self):
extra_kwargs = super(AgencyProjectSerializer, self).get_extra_kwargs()
if self.instance and isinstance(self.instance, EOI):
if not self.instance.is_direct:
completed_reason_choices = COMPLETED_REASON
elif self.instance.agency.name == UNHCR.name:
completed_reason_choices = UNHCR_DSR_COMPLETED_REASONS
else:
completed_reason_choices = OTHER_AGENCIES_DSR_COMPLETED_REASONS
extra_kwargs['completed_reason'] = {
'choices': completed_reason_choices
}
return extra_kwargs
def get_direct_selected_partners(self, obj):
if obj.is_direct:
request = self.context.get('request')
if obj.is_completed or request and request.agency_member.office.agency == obj.agency:
serializer_class = SelectedPartnersJustificationSerializer
else:
serializer_class = SelectedPartnersSerializer
return serializer_class(obj.applications.all(), many=True).data
def get_applications_count(self, eoi):
return eoi.applications.count()
def get_current_user_finished_reviews(self, eoi):
request = self.context.get('request')
user = request and request.user
if user and eoi.reviewers.filter(id=user.id).exists():
applications = eoi.applications.filter(status=APPLICATION_STATUSES.preselected)
return applications.count() == user.assessments.filter(application__in=applications).count()
def get_current_user_marked_reviews_completed(self, eoi):
request = self.context.get('request')
user = request and request.user
if user and eoi.reviewers.filter(id=user.id).exists():
applications = eoi.applications.filter(status=APPLICATION_STATUSES.preselected)
return applications.count() == user.assessments.filter(application__in=applications, completed=True).count()
@transaction.atomic
def update(self, eoi: EOI, validated_data):
if eoi.status == CFEI_STATUSES.closed and not set(validated_data.keys()).issubset(
{'reviewers', 'focal_points', 'completed_reason', 'justification'}
):
raise serializers.ValidationError(
"Since CFEI deadline is passed, You can only modify reviewer(s) and/or focal point(s)."
)
completed_reason = validated_data.get('completed_reason')
if completed_reason:
if not validated_data.get('justification'):
raise serializers.ValidationError({
'justification': 'This field is required'
})
if completed_reason == ALL_COMPLETED_REASONS.accepted_retention and not validated_data.get(
'completed_retention'
):
raise serializers.ValidationError({
'completed_retention': 'This field is required'
})
if completed_reason in {
COMPLETED_REASON.partners,
ALL_COMPLETED_REASONS.accepted,
ALL_COMPLETED_REASONS.accepted_retention,
} and not eoi.contains_partner_accepted:
raise serializers.ValidationError({
'completed_reason': f"You've selected '{ALL_COMPLETED_REASONS[completed_reason]}' as "
f"finalize resolution, but no partners have accepted."
})
has_just_been_completed = all([
eoi.completed_reason is None,
validated_data.get('completed_reason'),
eoi.completed_date is None,
eoi.is_completed is False
])
if has_just_been_completed:
eoi.completed_date = datetime.now()
eoi.is_completed = True
eoi = super(AgencyProjectSerializer, self).update(eoi, validated_data)
invited_partners = self.initial_data.get('invited_partners', [])
if invited_partners:
invited_partner_ids = [p['id'] for p in invited_partners]
eoi.invited_partners.through.objects.filter(eoi_id=eoi.id).exclude(
partner_id__in=invited_partner_ids
).delete()
eoi.invited_partners.add(*Partner.objects.filter(id__in=invited_partner_ids))
elif 'invited_partners' in self.initial_data:
eoi.invited_partners.clear()
specialization_ids = self.initial_data.get('specializations', [])
if specialization_ids:
eoi.specializations.through.objects.filter(eoi_id=eoi.id).exclude(
specialization_id__in=specialization_ids
).delete()
eoi.specializations.add(*Specialization.objects.filter(id__in=specialization_ids))
locations_data = self.initial_data.get('locations', [])
if locations_data:
eoi.locations.clear()
for location_data in locations_data:
location_serializer = PointSerializer(data=location_data)
location_serializer.is_valid(raise_exception=True)
eoi.locations.add(location_serializer.save())
update_cfei_reviewers(eoi, self.initial_data.get('reviewers'))
update_cfei_focal_points(eoi, self.initial_data.get('focal_points'))
update_m2m_relation(
eoi,
'attachments',
self.initial_data.get('attachments'),
EOIAttachmentSerializer,
context=self.context,
save_kwargs={
'eoi': eoi
}
)
if eoi.is_direct and self.initial_data.get('applications'):
# DSRs should only have 1 application
application_data = self.initial_data.get('applications')[0]
serializer = CreateDirectApplicationNoCNSerializer(
instance=eoi.applications.first(),
data=application_data,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return eoi
def validate(self, data):
assessments_criteria = data.get('assessments_criteria', [])
has_weighting = data.get('has_weighting', False)
if has_weighting is True and all(map(lambda x: 'weight' in x, assessments_criteria)) is False:
raise serializers.ValidationError(
"Weight criteria must be provided since `has_weighting` is selected."
)
elif has_weighting is False and any(map(lambda x: 'weight' in x, assessments_criteria)) is True:
raise serializers.ValidationError(
"Weight criteria should not be provided since `has_weighting` is unselected."
)
return super(AgencyProjectSerializer, self).validate(data)
class SimpleAssessmentSerializer(serializers.ModelSerializer):
reviewer_fullname = serializers.CharField(source='reviewer.fullname')
total_score = serializers.IntegerField()
class Meta:
model = Assessment
fields = (
'reviewer_fullname',
'note',
'total_score',
)
read_only_fields = fields
class ApplicationsListSerializer(serializers.ModelSerializer):
legal_name = serializers.CharField(source="partner.legal_name")
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
type_org = serializers.CharField(source="partner.display_type")
cn = CommonFileSerializer()
your_score = serializers.SerializerMethodField()
your_score_breakdown = serializers.SerializerMethodField()
review_progress = serializers.SerializerMethodField()
assessments_completed = serializers.SerializerMethodField()
application_status_display = serializers.CharField(read_only=True)
assessments = SimpleAssessmentSerializer(many=True, read_only=True)
completed_assessments_count = serializers.SerializerMethodField()
average_scores = serializers.SerializerMethodField()
class Meta:
model = Application
fields = (
'id',
'legal_name',
'partner_additional',
'type_org',
'status',
'cn',
'average_total_score',
'your_score',
'your_score_breakdown',
'review_progress',
'application_status_display',
'assessments',
'completed_assessments_count',
'average_scores',
'did_accept',
'did_decline',
'did_win',
'did_withdraw',
'assessments_completed',
)
def _get_review_reviewers_count(self, app):
return app.assessments.count(), app.eoi.reviewers.count()
def _get_my_assessment(self, obj):
assess_qs = obj.assessments.filter(reviewer=self.context['request'].user)
if assess_qs.exists():
return assess_qs.first()
return None
def get_your_score(self, obj):
my_assessment = self._get_my_assessment(obj)
return my_assessment.total_score if my_assessment else None
def get_your_score_breakdown(self, obj):
my_assessment = self._get_my_assessment(obj)
return my_assessment.get_scores_as_dict() if my_assessment else None
def get_review_progress(self, obj):
return '{}/{}'.format(*self._get_review_reviewers_count(obj))
def get_assessments_completed(self, obj):
return obj.eoi.reviewers.count() == self.get_completed_assessments_count(obj)
def get_completed_assessments_count(self, obj):
return obj.assessments.filter(completed=True).count()
def get_average_scores(self, obj):
scores = defaultdict(int)
total = 0
for assessment in obj.assessments.filter(completed=True):
for score in assessment.scores:
scores[score['selection_criteria']] += score['score']
total += 1
if not total:
return {}
return {
k: v / total for k, v in scores.items()
}
class ReviewersApplicationSerializer(serializers.ModelSerializer):
assessment = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = (
'id',
'fullname',
'assessment',
)
def get_assessment(self, obj):
application_id = self.context['request'].parser_context['kwargs']['application_id']
assessment = Assessment.objects.filter(application=application_id, reviewer=obj)
return ReviewerAssessmentsSerializer(assessment, many=True).data
class ReviewerAssessmentsSerializer(serializers.ModelSerializer):
total_score = serializers.IntegerField(read_only=True)
reviewer = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
created_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
modified_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
class Meta:
model = Assessment
fields = (
'id',
'reviewer',
'created_by',
'modified_by',
'application',
'scores',
'total_score',
'date_reviewed',
'is_a_committee_score',
'note',
'completed',
'completed_date',
)
read_only_fields = (
'created_by', 'modified_by', 'completed', 'completed_date',
)
def get_extra_kwargs(self):
extra_kwargs = super(ReviewerAssessmentsSerializer, self).get_extra_kwargs()
if self.instance:
extra_kwargs['application'] = {
'read_only': True
}
return extra_kwargs
def validate(self, data):
kwargs = self.context['request'].parser_context.get('kwargs', {})
application_id = kwargs.get(self.context['view'].application_url_kwarg)
app = get_object_or_404(Application.objects.select_related('eoi'), pk=application_id)
if app.eoi.status != CFEI_STATUSES.closed:
raise serializers.ValidationError("Assessment allowed once deadline is passed.")
if data.get('is_a_committee_score', False) and app.eoi.reviewers.count() > 1:
raise serializers.ValidationError({
'is_a_committee_score': 'Committee scores are only allowed on projects with one reviewer.'
})
scores = data.get('scores')
application = self.instance and self.instance.application or app
assessments_criteria = application.eoi.assessments_criteria
if scores and not {s['selection_criteria'] for s in scores} == {
ac['selection_criteria'] for ac in assessments_criteria
}:
raise serializers.ValidationError("You can score only selection criteria defined in CFEI.")
if scores and application.eoi.has_weighting:
for score in scores:
key = score.get('selection_criteria')
val = score.get('score')
criterion = list(filter(lambda x: x.get('selection_criteria') == key, assessments_criteria))
if len(criterion) == 1 and val > criterion[0].get('weight'):
raise serializers.ValidationError("The maximum score is equal to the value entered for the weight.")
elif len(criterion) != 1:
raise serializers.ValidationError("Selection criterion '{}' defined improper.".format(key))
return super(ReviewerAssessmentsSerializer, self).validate(data)
class ApplicationPartnerOpenSerializer(serializers.ModelSerializer):
project_title = serializers.CharField(source="eoi.title")
project_displayID = serializers.CharField(source="eoi.displayID")
agency_name = serializers.CharField(source="agency.name")
country = serializers.SerializerMethodField()
specializations = serializers.SerializerMethodField()
application_date = serializers.CharField(source="created")
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'id',
'project_title',
'project_displayID',
'eoi_id',
'agency_name',
'country',
'specializations',
'application_date',
'application_status',
'application_status_display',
)
def get_country(self, obj):
return get_countries_code_from_queryset(obj.eoi.locations)
def get_specializations(self, obj):
return SimpleSpecializationSerializer(obj.eoi.specializations.all(), many=True).data
class ApplicationPartnerUnsolicitedDirectSerializer(serializers.ModelSerializer):
project_title = serializers.SerializerMethodField()
agency_name = serializers.CharField(source="agency.name")
country = serializers.SerializerMethodField()
specializations = serializers.SerializerMethodField()
submission_date = serializers.DateTimeField(source="published_timestamp")
is_direct = serializers.SerializerMethodField()
partner_name = serializers.CharField(source="partner.legal_name")
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
selected_source = serializers.CharField(source="eoi.selected_source", allow_null=True)
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'id',
'project_title',
'selected_source',
'eoi_id',
'agency_name',
'country',
'specializations',
'submission_date',
'status',
'is_direct',
'partner_name',
'partner_additional',
'application_status',
'application_status_display',
)
def get_project_title(self, obj):
return obj.proposal_of_eoi_details.get('title')
def get_country(self, obj):
if obj.eoi:
# has been updated to direct selected
country = obj.eoi.locations
else:
country = obj.locations_proposal_of_eoi
if country:
# we expecting here few countries
return get_countries_code_from_queryset(country)
return None
# TODO - need to make field names between here and application details the same
# application details uses nested under proposal_of_eoi_details
def get_specializations(self, obj):
return SimpleSpecializationSerializer(
Specialization.objects.filter(id__in=obj.proposal_of_eoi_details.get('specializations')), many=True).data
def get_is_direct(self, obj):
return obj.eoi_converted is not None
class ApplicationPartnerDirectSerializer(ApplicationPartnerUnsolicitedDirectSerializer):
project_title = serializers.CharField(source="eoi.title")
specializations = serializers.SerializerMethodField()
def get_specializations(self, obj):
return SimpleSpecializationSerializer(obj.eoi.specializations.all(), many=True).data
class AgencyUnsolicitedApplicationSerializer(ApplicationPartnerUnsolicitedDirectSerializer):
has_yellow_flag = serializers.BooleanField(source="partner.has_yellow_flag")
has_red_flag = serializers.BooleanField(source="partner.has_red_flag")
is_ds_converted = serializers.SerializerMethodField()
class Meta:
model = Application
fields = ApplicationPartnerUnsolicitedDirectSerializer.Meta.fields + (
'has_red_flag',
'has_yellow_flag',
'partner_is_verified',
'is_ds_converted',
)
def get_is_ds_converted(self, obj):
return obj.eoi_converted is not None
class ApplicationFeedbackSerializer(serializers.ModelSerializer):
provider = AgencyUserListSerializer(read_only=True)
class Meta:
model = ApplicationFeedback
fields = ('id', 'feedback', 'provider', 'created')
class ConvertUnsolicitedSerializer(serializers.Serializer):
RESTRICTION_MSG = 'Unsolicited concept note already converted to a direct selection project.'
ds_justification_select = serializers.ListField()
justification = serializers.CharField(source="eoi.justification")
focal_points = IDUserSerializer(many=True, source="eoi.focal_points", read_only=True)
description = serializers.CharField(source="eoi.description")
other_information = serializers.CharField(
source="eoi.other_information", required=False, allow_blank=True, allow_null=True)
start_date = serializers.DateField(source="eoi.start_date")
end_date = serializers.DateField(source="eoi.end_date")
class Meta:
model = Application
def validate(self, data):
id = self.context['request'].parser_context.get('kwargs', {}).get('pk')
if Application.objects.get(id=id).eoi_converted is not None:
raise serializers.ValidationError(self.RESTRICTION_MSG)
return super(ConvertUnsolicitedSerializer, self).validate(data)
@transaction.atomic
def create(self, validated_data):
ds_justification_select = validated_data.pop('ds_justification_select')
focal_points = self.initial_data.get('focal_points', [])
submitter = self.context['request'].user
app_id = self.context['request'].parser_context['kwargs']['pk']
application: Application = get_object_or_404(
Application,
id=app_id,
is_unsolicited=True,
eoi_converted__isnull=True
)
if not application.locations_proposal_of_eoi.first():
raise serializers.ValidationError('Invalid application, no locations specified.')
eoi: EOI = EOI(**validated_data['eoi'])
eoi.displayID = get_eoi_display_identifier(
application.agency.name, application.locations_proposal_of_eoi.first().admin_level_1.country_code
)
eoi.created_by = submitter
eoi.display_type = CFEI_TYPES.direct
eoi.title = application.proposal_of_eoi_details.get('title')
eoi.agency = application.agency
# we can use get direct because agent have one agency office
eoi.agency_office = submitter.agency_members.get().office
eoi.selected_source = DIRECT_SELECTION_SOURCE.ucn
eoi.is_published = True
eoi.save()
for specialization in application.proposal_of_eoi_details.get('specializations', []):
eoi.specializations.add(specialization)
for location in application.locations_proposal_of_eoi.all():
eoi.locations.add(location)
application.ds_justification_select = ds_justification_select
application.eoi_converted = eoi
application.save()
ds_app = Application.objects.create(
partner=application.partner,
eoi=eoi,
agency=eoi.agency,
submitter=application.submitter,
status=APPLICATION_STATUSES.pending,
did_win=True,
did_accept=False,
ds_justification_select=ds_justification_select,
justification_reason=application.justification_reason
)
update_cfei_focal_points(eoi, focal_points)
return ds_app
class ReviewSummarySerializer(MixinPreventManyCommonFile, serializers.ModelSerializer):
review_summary_attachment = CommonFileSerializer(required=False, allow_null=True)
class Meta:
model = EOI
fields = (
'review_summary_comment',
'review_summary_attachment',
)
prevent_keys = ['review_summary_attachment']
def update(self, instance, validated_data):
self.prevent_many_common_file_validator(self.initial_data)
return super(ReviewSummarySerializer, self).update(instance, validated_data)
class EOIReviewersAssessmentsSerializer(serializers.ModelSerializer):
user_id = serializers.CharField(source='id')
user_name = serializers.CharField(source='get_fullname')
assessments = serializers.SerializerMethodField()
class Meta:
model = User
fields = (
'user_id',
'user_name',
'assessments',
)
def get_assessments(self, user):
lookup_field = self.context['view'].lookup_field
eoi_id = self.context['request'].parser_context['kwargs'][lookup_field]
eoi = get_object_or_404(EOI, id=eoi_id)
applications = eoi.applications.filter(status__in=[
APPLICATION_STATUSES.preselected, APPLICATION_STATUSES.recommended,
])
applications_count = applications.count()
assessments_count = Assessment.objects.filter(reviewer=user, application__in=applications).count()
reminder_sent_recently = user_received_notification_recently(user, eoi, NotificationType.CFEI_REVIEW_REQUIRED)
return {
'counts': "{}/{}".format(assessments_count, applications_count),
'send_reminder': not (applications_count == assessments_count) and not reminder_sent_recently,
'eoi_id': eoi_id, # use full for front-end to easier construct send reminder url
}
class AwardedPartnersSerializer(serializers.ModelSerializer):
partner_id = serializers.CharField(source='partner.id')
partner_name = serializers.CharField(source='partner.legal_name')
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
application_id = serializers.CharField(source='id')
cn = CommonFileSerializer()
partner_notified = serializers.SerializerMethodField()
agency_decision_maker = BasicUserSerializer(read_only=True)
partner_decision_maker = BasicUserSerializer(read_only=True)
body = serializers.SerializerMethodField()
class Meta:
model = Application
fields = (
'partner_id',
'partner_name',
'partner_additional',
'application_id',
'did_win',
'did_withdraw',
'withdraw_reason',
'did_decline',
'did_accept',
'cn',
'partner_notified',
'agency_decision_date',
'agency_decision_maker',
'partner_decision_date',
'partner_decision_maker',
'body',
)
def get_body(self, obj):
assessments_count = obj.assessments.count()
assessments = obj.assessments.all()
notes = []
for assessment in assessments:
notes.append({
'note': assessment.note,
'reviewer': assessment.reviewer.get_fullname(),
})
return {
'criteria': obj.get_scores_by_selection_criteria(),
'notes': notes,
'avg_total_score': obj.average_total_score,
'assessment_count': assessments_count,
}
def get_partner_notified(self, obj):
return obj.accept_notification and obj.accept_notification.created
class CompareSelectedSerializer(serializers.ModelSerializer):
partner_id = serializers.IntegerField(source='partner.id')
partner_name = serializers.CharField(source='partner.legal_name')
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
year_establishment = serializers.IntegerField(source='partner.profile.year_establishment')
total_assessment_score = serializers.IntegerField(source='average_total_score')
verification_status = serializers.BooleanField(source="partner.is_verified")
flagging_status = serializers.JSONField(source="partner.flagging_status")
annual_budget = serializers.SerializerMethodField()
un_exp = serializers.SerializerMethodField()
class Meta:
model = Application
fields = (
'partner_id',
'partner_name',
'partner_additional',
'year_establishment',
'eoi_id',
'total_assessment_score',
'verification_status',
'flagging_status',
'un_exp',
'annual_budget',
'verification_status',
'flagging_status',
'did_win',
'did_withdraw',
'assessments_is_completed',
'assessments_marked_as_completed',
)
def get_annual_budget(self, obj):
return obj.partner.profile.annual_budget
def get_un_exp(self, obj):
return ", ".join(obj.partner.collaborations_partnership.all().values_list('agency__name', flat=True))
class SubmittedCNSerializer(serializers.ModelSerializer):
cn_id = serializers.IntegerField(source='id')
agency_name = serializers.CharField(source="agency.name")
specializations = serializers.SerializerMethodField()
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'cn_id',
'project_title',
'cfei_type',
'agency_name',
'countries',
'specializations',
'application_status',
'application_status_display',
'eoi_id'
)
def get_specializations(self, obj):
if obj.is_unsolicited:
queryset = Specialization.objects.filter(id__in=obj.proposal_of_eoi_details.get('specializations'))
else:
queryset = obj.eoi.specializations.all()
return SimpleSpecializationSerializer(queryset, many=True).data
class PendingOffersSerializer(SubmittedCNSerializer):
class Meta:
model = Application
fields = (
'cn_id',
'project_title',
'cfei_type',
'agency_name',
'countries',
'specializations',
'eoi_id'
)
class ClarificationRequestQuestionSerializer(serializers.ModelSerializer):
created_by = BasicUserSerializer(read_only=True)
partner = PartnerSimpleSerializer(read_only=True)
class Meta:
model = ClarificationRequestQuestion
fields = (
'id',
'created',
'created_by',
'partner',
'question',
)
read_only_fields = (
'created',
)
class ClarificationRequestAnswerFileSerializer(serializers.ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
file = CommonFileSerializer()
class Meta:
model = ClarificationRequestAnswerFile
fields = (
'id',
'created_by',
'title',
'file',
)
|
|
"""Stand-alone entry point for running Pulsar without a web server.
In its simplest form, this method will check the current directory for an
app.yml and run the corresponding configuration as a standalone applciation.
This makes sense when ``app.yml`` contains a ``message_queue_url`` option so
Pulsar is configured to listen to a message queue and doesn't require a web
server.
The following commands can be used to bootstrap such a setup.::
mkdir pulsar-mq-config
cd pulsar-mq-config
pulsar-config --mq
pulsar-main
This script can be used in a standalone fashion, but it is generally better to
run the ``pulsar`` script with ``--mode webless`` - which will in turn
delegate to this script.
"""
import logging
from logging.config import fileConfig
import os
import functools
import time
import sys
import configparser
try:
import yaml
except ImportError:
yaml = None # type: ignore
try:
from daemonize import Daemonize
except ImportError:
Daemonize = None
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
log = logging.getLogger(__name__)
REQUIRES_DAEMONIZE_MESSAGE = "Attempted to use Pulsar in daemon mode, but daemonize is unavailable."
PULSAR_ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if "PULSAR_CONFIG_DIR" in os.environ:
PULSAR_CONFIG_DIR = os.path.abspath(os.environ["PULSAR_CONFIG_DIR"])
else:
PULSAR_CONFIG_DIR = PULSAR_ROOT_DIR
DEFAULT_INI_APP = "main"
DEFAULT_INI = "server.ini"
DEFAULT_APP_YAML = "app.yml"
DEFAULT_MANAGER = "_default_"
DEFAULT_PID = "pulsar.pid"
DEFAULT_VERBOSE = True
HELP_CONFIG_DIR = "Default directory to search for relevant Pulsar configuration files (e.g. app.yml, server.ini)."
HELP_INI_PATH = "Specify an explicit path to Pulsar's server.ini configuration file."
HELP_APP_CONF_PATH = "Specify an explicit path to Pulsar's app.yml configuration file."
HELP_APP_CONF_BASE64 = "Specify an application configuration as a base64 encoded JSON blob."
HELP_DAEMONIZE = "Daemonzie process (requires daemonize library)."
CONFIG_PREFIX = "PULSAR_CONFIG_"
LOGGING_CONFIG_DEFAULT = {
'version': 1,
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'loggers': {
'pulsar': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': 0,
'qualname': 'pulsar',
},
'galaxy': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': 0,
'qualname': 'pulsar',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
'stream': 'ext://sys.stderr',
},
},
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s'
},
},
}
def load_pulsar_app(
config_builder,
config_env=False,
log=None,
**kwds
):
# Allow specification of log so daemon can reuse properly configured one.
if log is None:
log = logging.getLogger(__name__)
# If called in daemon mode, set the ROOT directory and ensure Pulsar is on
# sys.path.
if config_env:
try:
os.chdir(PULSAR_ROOT_DIR)
except Exception:
log.exception("Failed to chdir")
raise
try:
sys.path.append(PULSAR_ROOT_DIR)
except Exception:
log.exception("Failed to add Pulsar to sys.path")
raise
config_builder.setup_file_logging()
config = config_builder.load()
config.update(kwds)
import pulsar.core
pulsar_app = pulsar.core.PulsarApp(**config)
return pulsar_app
def app_loop(args, log, config_env):
pulsar_app = _app(args, log, config_env)
sleep = True
while sleep:
try:
time.sleep(5)
except KeyboardInterrupt:
sleep = False
except SystemExit:
sleep = False
except Exception:
pass
try:
pulsar_app.shutdown()
except Exception:
log.exception("Failed to shutdown Pulsar application")
raise
def _app(args, log, config_env):
try:
config_builder = PulsarConfigBuilder(args)
pulsar_app = load_pulsar_app(
config_builder,
config_env=config_env,
log=log,
)
except BaseException:
log.exception("Failed to initialize Pulsar application")
raise
return pulsar_app
def absolute_config_path(path, config_dir):
if path and not os.path.isabs(path):
path = os.path.join(config_dir, path)
return path
def _find_default_app_config(*config_dirs):
for config_dir in config_dirs:
app_config_path = os.path.join(config_dir, DEFAULT_APP_YAML)
if os.path.exists(app_config_path):
return app_config_path
return None
def apply_env_overrides_and_defaults(conf):
override_prefix = "%sOVERRIDE_" % CONFIG_PREFIX
for key in os.environ:
if key == 'PULSAR_CONFIG_DIR':
conf['config_dir'] = os.environ[key]
elif key.startswith(override_prefix):
config_key = key[len(override_prefix):].lower()
conf[config_key] = os.environ[key]
elif key.startswith(CONFIG_PREFIX):
config_key = key[len(CONFIG_PREFIX):].lower()
if config_key not in conf:
conf[config_key] = os.environ[key]
return conf
def load_app_configuration(ini_path=None, app_conf_path=None, app_name=None, local_conf=None, config_dir=PULSAR_CONFIG_DIR):
"""
"""
if ini_path and local_conf is None:
from pulsar.util.pastescript.loadwsgi import ConfigLoader
local_conf = ConfigLoader(ini_path).app_context(app_name).config()
local_conf = local_conf or {}
local_conf['config_dir'] = config_dir
if app_conf_path is None and "app_config" in local_conf:
app_conf_path = absolute_config_path(local_conf["app_config"], config_dir)
if not os.path.exists(app_conf_path) and os.path.exists(app_conf_path + ".sample"):
app_conf_path = app_conf_path + ".sample"
elif ini_path:
# If not explicit app.yml file found - look next to server.ini -
# be it in pulsar root, some temporary staging directory, or /etc.
app_conf_path = _find_default_app_config(
os.path.dirname(ini_path),
)
if app_conf_path:
if yaml is None:
raise Exception("Cannot load configuration from file %s, pyyaml is not available." % app_conf_path)
with open(app_conf_path) as f:
app_conf = yaml.safe_load(f) or {}
local_conf.update(app_conf)
return apply_env_overrides_and_defaults(local_conf)
def find_ini(supplied_ini, config_dir):
if supplied_ini:
return supplied_ini
# If not explicitly supplied an ini, check server.ini and then
# just resort to sample if that has not been configured.
for guess in ["server.ini", "server.ini.sample"]:
ini_path = os.path.join(config_dir, guess)
if os.path.exists(ini_path):
return ini_path
return guess
class PulsarConfigBuilder:
""" Generate paste-like configuration from supplied command-line arguments.
"""
def __init__(self, args=None, **kwds):
config_dir = kwds.get("config_dir", None) or (args and args.config_dir) or PULSAR_CONFIG_DIR
ini_path = kwds.get("ini_path", None) or (args and args.ini_path)
app_conf_path = kwds.get("app_conf_path", None) or (args and args.app_conf_path)
app_conf_base64 = args and args.app_conf_base64
if not app_conf_base64 and not app_conf_path:
# If given app_conf_path - use that - else we need to ensure we have an
# ini path.
ini_path = find_ini(ini_path, config_dir)
ini_path = absolute_config_path(ini_path, config_dir=config_dir)
self.config_dir = config_dir
self.ini_path = ini_path
self.app_conf_path = app_conf_path
self.app_conf_base64 = app_conf_base64
self.app_name = kwds.get("app") or (args and args.app) or DEFAULT_INI_APP
@classmethod
def populate_options(cls, arg_parser):
arg_parser.add_argument("-c", "--config_dir", default=None, help=HELP_CONFIG_DIR)
arg_parser.add_argument("--ini_path", default=None, help=HELP_INI_PATH)
arg_parser.add_argument("--app_conf_path", default=None, help=HELP_APP_CONF_PATH)
arg_parser.add_argument("--app_conf_base64", default=None, help=HELP_APP_CONF_BASE64)
arg_parser.add_argument("--app", default=DEFAULT_INI_APP)
# daemon related options...
arg_parser.add_argument("-d", "--daemonize", default=False, help=HELP_DAEMONIZE, action="store_true")
arg_parser.add_argument("--daemon-log-file", default=None, help="Log file for daemon, if --daemonize supplied.")
arg_parser.add_argument("--pid-file", default=DEFAULT_PID, help="Pid file for daemon, if --daemonize supplied (default is %s)." % DEFAULT_PID)
def load(self):
load_kwds = dict(
app_name=self.app_name,
config_dir=self.config_dir,
)
if self.app_conf_base64:
from pulsar.client.util import from_base64_json
local_conf = from_base64_json(self.app_conf_base64)
self.setup_dict_logging(local_conf)
load_kwds["local_conf"] = local_conf
else:
load_kwds.update(dict(
config_dir=self.config_dir,
ini_path=self.ini_path,
app_conf_path=self.app_conf_path,
))
return load_app_configuration(**load_kwds)
def setup_file_logging(self):
if self.ini_path:
raw_config = configparser.ConfigParser()
raw_config.read([self.ini_path])
# https://github.com/mozilla-services/chaussette/pull/32/files
if raw_config.has_section('loggers'):
config_file = os.path.abspath(self.ini_path)
fileConfig(
config_file,
dict(__file__=config_file, here=os.path.dirname(config_file))
)
def setup_dict_logging(self, config):
logging_conf = config.get('logging', None)
if logging_conf is None:
# if using the default logging config, honor the log_level setting
logging_conf = LOGGING_CONFIG_DEFAULT
logging.config.dictConfig(logging_conf)
def to_dict(self):
return dict(
config_dir=self.config_dir,
ini_path=self.ini_path,
app_conf_path=self.app_conf_path,
app=self.app_name
)
class PulsarManagerConfigBuilder(PulsarConfigBuilder):
def __init__(self, args=None, **kwds):
super().__init__(args=args, **kwds)
self.manager = kwds.get("manager", None) or (args and args.manager) or DEFAULT_MANAGER
def to_dict(self):
as_dict = super().to_dict()
as_dict["manager"] = self.manager
return as_dict
@classmethod
def populate_options(cls, arg_parser):
PulsarConfigBuilder.populate_options(arg_parser)
arg_parser.add_argument("--manager", default=DEFAULT_MANAGER)
def main(argv=None, config_env=False):
mod_docstring = sys.modules[__name__].__doc__
arg_parser = ArgumentParser(
description=mod_docstring,
formatter_class=RawDescriptionHelpFormatter,
)
PulsarConfigBuilder.populate_options(arg_parser)
args = arg_parser.parse_args(argv)
pid_file = args.pid_file
log.setLevel(logging.DEBUG)
log.propagate = False
if args.daemonize:
if Daemonize is None:
raise ImportError(REQUIRES_DAEMONIZE_MESSAGE)
keep_fds = []
if args.daemon_log_file:
fh = logging.FileHandler(args.daemon_log_file, "w")
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
keep_fds.append(fh.stream.fileno())
else:
fh = logging.StreamHandler(sys.stderr)
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
daemon = Daemonize(
app="pulsar",
pid=pid_file,
action=functools.partial(app_loop, args, log, config_env),
verbose=DEFAULT_VERBOSE,
logger=log,
keep_fds=keep_fds,
)
daemon.start()
else:
app_loop(args, log, config_env)
if __name__ == "__main__":
main(config_env=True)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import math
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import gen_audio_ops as audio_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
tf.compat.v1.disable_eager_execution()
# If it's available, load the specialized feature generator. If this doesn't
# work, try building with bazel instead of running the Python script directly.
try:
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op # pylint:disable=g-import-not-at-top
except ImportError:
frontend_op = None
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
BACKGROUND_NOISE_DIR_NAME = '_background_noise_'
RANDOM_SEED = 59185
def prepare_words_list(wanted_words):
"""Prepends common tokens to the custom word list.
Args:
wanted_words: List of strings containing the custom words.
Returns:
List with the standard silence and unknown tokens added.
"""
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def load_wav_file(filename):
"""Loads an audio file and returns a float PCM-encoded array of samples.
Args:
filename: Path to the .wav file to load.
Returns:
Numpy array holding the sample data as floats between -1.0 and 1.0.
"""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
return sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: filename}).audio.flatten()
def save_wav_file(filename, wav_data, sample_rate):
"""Saves audio sample data to a .wav audio file.
Args:
filename: Path to save the file to.
wav_data: 2D array of float PCM-encoded audio data.
sample_rate: Samples per second to encode in the file.
"""
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
sample_rate_placeholder = tf.compat.v1.placeholder(tf.int32, [])
wav_data_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 1])
wav_encoder = tf.audio.encode_wav(wav_data_placeholder,
sample_rate_placeholder)
wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)
sess.run(
wav_saver,
feed_dict={
wav_filename_placeholder: filename,
sample_rate_placeholder: sample_rate,
wav_data_placeholder: np.reshape(wav_data, (-1, 1))
})
def get_features_range(model_settings):
"""Returns the expected min/max for generated features.
Args:
model_settings: Information about the current model being trained.
Returns:
Min/max float pair holding the range of features.
Raises:
Exception: If preprocessing mode isn't recognized.
"""
# TODO(petewarden): These values have been derived from the observed ranges
# of spectrogram and MFCC inputs. If the preprocessing pipeline changes,
# they may need to be updated.
if model_settings['preprocess'] == 'average':
features_min = 0.0
features_max = 127.5
elif model_settings['preprocess'] == 'mfcc':
features_min = -247.0
features_max = 30.0
elif model_settings['preprocess'] == 'micro':
features_min = 0.0
features_max = 26.0
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (model_settings['preprocess']))
return features_min, features_max
class AudioProcessor(object):
"""Handles loading, partitioning, and preparing audio training data."""
def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
wanted_words, validation_percentage, testing_percentage,
model_settings, summaries_dir):
if data_dir:
self.data_dir = data_dir
self.maybe_download_and_extract_dataset(data_url, data_dir)
self.prepare_data_index(silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage)
self.prepare_background_data()
self.prepare_processing_graph(model_settings, summaries_dir)
def maybe_download_and_extract_dataset(self, data_url, dest_directory):
"""Download and extract data set tar file.
If the data set we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a
directory.
If the data_url is none, don't download anything and expect the data
directory to contain the correct files already.
Args:
data_url: Web location of the tar file containing the data set.
dest_directory: File path to extract data to.
"""
if not data_url:
return
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
try:
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
except:
tf.compat.v1.logging.error(
'Failed to download URL: %s to folder: %s', data_url, filepath)
tf.compat.v1.logging.error(
'Please make sure you have enough free space and'
' an internet connection')
raise
print()
statinfo = os.stat(filepath)
tf.compat.v1.logging.info('Successfully downloaded %s (%d bytes)',
filename, statinfo.st_size)
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def prepare_data_index(self, silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage):
"""Prepares a list of the samples organized by set and label.
The training loop needs a list of all the available data, organized by
which partition it should belong to, and with ground truth labels attached.
This function analyzes the folders below the `data_dir`, figures out the
right
labels for each file based on the name of the subdirectory it belongs to,
and uses a stable hash to assign it to a data set partition.
Args:
silence_percentage: How much of the resulting data should be background.
unknown_percentage: How much should be audio outside the wanted classes.
wanted_words: Labels of the classes we want to be able to recognize.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
Dictionary containing a list of file information for each set partition,
and a lookup map for each class to determine its numeric index.
Raises:
Exception: If expected files are not found.
"""
# Make sure the shuffling and picking of unknowns is deterministic.
random.seed(RANDOM_SEED)
wanted_words_index = {}
for index, wanted_word in enumerate(wanted_words):
wanted_words_index[wanted_word] = index + 2
self.data_index = {'validation': [], 'testing': [], 'training': []}
unknown_index = {'validation': [], 'testing': [], 'training': []}
all_words = {}
# Look through all the subfolders to find audio samples
search_path = os.path.join(self.data_dir, '*', '*.wav')
for wav_path in gfile.Glob(search_path):
_, word = os.path.split(os.path.dirname(wav_path))
word = word.lower()
# Treat the '_background_noise_' folder as a special case, since we expect
# it to contain long audio samples we mix in to improve training.
if word == BACKGROUND_NOISE_DIR_NAME:
continue
all_words[word] = True
set_index = which_set(wav_path, validation_percentage, testing_percentage)
# If it's a known class, store its detail, otherwise add it to the list
# we'll use to train the unknown label.
if word in wanted_words_index:
self.data_index[set_index].append({'label': word, 'file': wav_path})
else:
unknown_index[set_index].append({'label': word, 'file': wav_path})
if not all_words:
raise Exception('No .wavs found at ' + search_path)
for index, wanted_word in enumerate(wanted_words):
if wanted_word not in all_words:
raise Exception('Expected to find ' + wanted_word +
' in labels but only found ' +
', '.join(all_words.keys()))
# We need an arbitrary file to load as the input for the silence samples.
# It's multiplied by zero later, so the content doesn't matter.
silence_wav_path = self.data_index['training'][0]['file']
for set_index in ['validation', 'testing', 'training']:
set_size = len(self.data_index[set_index])
silence_size = int(math.ceil(set_size * silence_percentage / 100))
for _ in range(silence_size):
self.data_index[set_index].append({
'label': SILENCE_LABEL,
'file': silence_wav_path
})
# Pick some unknowns to add to each partition of the data set.
random.shuffle(unknown_index[set_index])
unknown_size = int(math.ceil(set_size * unknown_percentage / 100))
self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])
# Make sure the ordering is random.
for set_index in ['validation', 'testing', 'training']:
random.shuffle(self.data_index[set_index])
# Prepare the rest of the result data structure.
self.words_list = prepare_words_list(wanted_words)
self.word_to_index = {}
for word in all_words:
if word in wanted_words_index:
self.word_to_index[word] = wanted_words_index[word]
else:
self.word_to_index[word] = UNKNOWN_WORD_INDEX
self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
def prepare_background_data(self):
"""Searches a folder for background noise audio, and loads it into memory.
It's expected that the background audio samples will be in a subdirectory
named '_background_noise_' inside the 'data_dir' folder, as .wavs that match
the sample rate of the training data, but can be much longer in duration.
If the '_background_noise_' folder doesn't exist at all, this isn't an
error, it's just taken to mean that no background noise augmentation should
be used. If the folder does exist, but it's empty, that's treated as an
error.
Returns:
List of raw PCM-encoded audio samples of background noise.
Raises:
Exception: If files aren't found in the folder.
"""
self.background_data = []
background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
if not os.path.exists(background_dir):
return self.background_data
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)
search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
'*.wav')
for wav_path in gfile.Glob(search_path):
wav_data = sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()
self.background_data.append(wav_data)
if not self.background_data:
raise Exception('No background wav files were found in ' + search_path)
def prepare_processing_graph(self, model_settings, summaries_dir):
"""Builds a TensorFlow graph to apply the input distortions.
Creates a graph that loads a WAVE file, decodes it, scales the volume,
shifts it in time, adds in background noise, calculates a spectrogram, and
then builds an MFCC fingerprint from that.
This must be called with an active TensorFlow session running, and it
creates multiple placeholder inputs, and one output:
- wav_filename_placeholder_: Filename of the WAV to load.
- foreground_volume_placeholder_: How loud the main clip should be.
- time_shift_padding_placeholder_: Where to pad the clip.
- time_shift_offset_placeholder_: How much to move the clip in time.
- background_data_placeholder_: PCM sample data for background noise.
- background_volume_placeholder_: Loudness of mixed-in background.
- output_: Output 2D fingerprint of processed audio.
Args:
model_settings: Information about the current model being trained.
summaries_dir: Path to save training summary information to.
Raises:
ValueError: If the preprocessing mode isn't recognized.
Exception: If the preprocessor wasn't compiled in.
"""
with tf.compat.v1.get_default_graph().name_scope('data'):
desired_samples = model_settings['desired_samples']
self.wav_filename_placeholder_ = tf.compat.v1.placeholder(
tf.string, [], name='wav_filename')
wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
wav_decoder = tf.audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
# Allow the audio sample's volume to be adjusted.
self.foreground_volume_placeholder_ = tf.compat.v1.placeholder(
tf.float32, [], name='foreground_volume')
scaled_foreground = tf.multiply(wav_decoder.audio,
self.foreground_volume_placeholder_)
# Shift the sample's start position, and pad any gaps with zeros.
self.time_shift_padding_placeholder_ = tf.compat.v1.placeholder(
tf.int32, [2, 2], name='time_shift_padding')
self.time_shift_offset_placeholder_ = tf.compat.v1.placeholder(
tf.int32, [2], name='time_shift_offset')
padded_foreground = tf.pad(
tensor=scaled_foreground,
paddings=self.time_shift_padding_placeholder_,
mode='CONSTANT')
sliced_foreground = tf.slice(padded_foreground,
self.time_shift_offset_placeholder_,
[desired_samples, -1])
# Mix in background noise.
self.background_data_placeholder_ = tf.compat.v1.placeholder(
tf.float32, [desired_samples, 1], name='background_data')
self.background_volume_placeholder_ = tf.compat.v1.placeholder(
tf.float32, [], name='background_volume')
background_mul = tf.multiply(self.background_data_placeholder_,
self.background_volume_placeholder_)
background_add = tf.add(background_mul, sliced_foreground)
background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
# Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
spectrogram = audio_ops.audio_spectrogram(
background_clamp,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
tf.compat.v1.summary.image(
'spectrogram', tf.expand_dims(spectrogram, -1), max_outputs=1)
# The number of buckets in each FFT row in the spectrogram will depend on
# how many input samples there are in each window. This can be quite
# large, with a 160 sample window producing 127 buckets for example. We
# don't need this level of detail for classification, so we often want to
# shrink them down to produce a smaller result. That's what this section
# implements. One method is to use average pooling to merge adjacent
# buckets, but a more sophisticated approach is to apply the MFCC
# algorithm to shrink the representation.
if model_settings['preprocess'] == 'average':
self.output_ = tf.nn.pool(
input=tf.expand_dims(spectrogram, -1),
window_shape=[1, model_settings['average_window_width']],
strides=[1, model_settings['average_window_width']],
pooling_type='AVG',
padding='SAME')
tf.compat.v1.summary.image('shrunk_spectrogram',
self.output_,
max_outputs=1)
elif model_settings['preprocess'] == 'mfcc':
self.output_ = audio_ops.mfcc(
spectrogram,
wav_decoder.sample_rate,
dct_coefficient_count=model_settings['fingerprint_width'])
tf.compat.v1.summary.image(
'mfcc', tf.expand_dims(self.output_, -1), max_outputs=1)
elif model_settings['preprocess'] == 'micro':
if not frontend_op:
raise Exception(
'Micro frontend op is currently not available when running'
' TensorFlow directly from Python, you need to build and run'
' through Bazel')
sample_rate = model_settings['sample_rate']
window_size_ms = (model_settings['window_size_samples'] *
1000) / sample_rate
window_step_ms = (model_settings['window_stride_samples'] *
1000) / sample_rate
int16_input = tf.cast(tf.multiply(background_clamp, 32768), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(
int16_input,
sample_rate=sample_rate,
window_size=window_size_ms,
window_step=window_step_ms,
num_channels=model_settings['fingerprint_width'],
out_scale=1,
out_type=tf.float32)
self.output_ = tf.multiply(micro_frontend, (10.0 / 256.0))
tf.compat.v1.summary.image(
'micro',
tf.expand_dims(tf.expand_dims(self.output_, -1), 0),
max_outputs=1)
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc", '
' "average", or "micro")' %
(model_settings['preprocess']))
# Merge all the summaries and write them out to /tmp/retrain_logs (by
# default)
self.merged_summaries_ = tf.compat.v1.summary.merge_all(scope='data')
if summaries_dir:
self.summary_writer_ = tf.compat.v1.summary.FileWriter(
summaries_dir + '/data', tf.compat.v1.get_default_graph())
def set_size(self, mode):
"""Calculates the number of samples in the dataset partition.
Args:
mode: Which partition, must be 'training', 'validation', or 'testing'.
Returns:
Number of samples in the partition.
"""
return len(self.data_index[mode])
def get_data(self, how_many, offset, model_settings, background_frequency,
background_volume_range, time_shift, mode, sess):
"""Gather samples from the data set, applying transformations as needed.
When the mode is 'training', a random selection of samples will be returned,
otherwise the first N clips in the partition will be used. This ensures that
validation always uses the same samples, reducing noise in the metrics.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
offset: Where to start when fetching deterministically.
model_settings: Information about the current model being trained.
background_frequency: How many clips will have background noise, 0.0 to
1.0.
background_volume_range: How loud the background noise will be.
time_shift: How much to randomly shift the clips by in time.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
sess: TensorFlow session that was active when processor was created.
Returns:
List of sample data for the transformed samples, and list of label indexes
Raises:
ValueError: If background samples are too short.
"""
# Pick one of the partitions to choose samples from.
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = max(0, min(how_many, len(candidates) - offset))
# Data and labels will be populated and returned.
data = np.zeros((sample_count, model_settings['fingerprint_size']))
labels = np.zeros(sample_count)
desired_samples = model_settings['desired_samples']
use_background = self.background_data and (mode == 'training')
pick_deterministically = (mode != 'training')
# Use the processing graph we created earlier to repeatedly to generate the
# final output sample data we'll use in training.
for i in xrange(offset, offset + sample_count):
# Pick which audio sample to use.
if how_many == -1 or pick_deterministically:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
# If we're time shifting, set up the offset for this sample.
if time_shift > 0:
time_shift_amount = np.random.randint(-time_shift, time_shift)
else:
time_shift_amount = 0
if time_shift_amount > 0:
time_shift_padding = [[time_shift_amount, 0], [0, 0]]
time_shift_offset = [0, 0]
else:
time_shift_padding = [[0, -time_shift_amount], [0, 0]]
time_shift_offset = [-time_shift_amount, 0]
input_dict = {
self.wav_filename_placeholder_: sample['file'],
self.time_shift_padding_placeholder_: time_shift_padding,
self.time_shift_offset_placeholder_: time_shift_offset,
}
# Choose a section of background noise to mix in.
if use_background or sample['label'] == SILENCE_LABEL:
background_index = np.random.randint(len(self.background_data))
background_samples = self.background_data[background_index]
if len(background_samples) <= model_settings['desired_samples']:
raise ValueError(
'Background sample is too short! Need more than %d'
' samples but only %d were found' %
(model_settings['desired_samples'], len(background_samples)))
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_clipped = background_samples[background_offset:(
background_offset + desired_samples)]
background_reshaped = background_clipped.reshape([desired_samples, 1])
if sample['label'] == SILENCE_LABEL:
background_volume = np.random.uniform(0, 1)
elif np.random.uniform(0, 1) < background_frequency:
background_volume = np.random.uniform(0, background_volume_range)
else:
background_volume = 0
else:
background_reshaped = np.zeros([desired_samples, 1])
background_volume = 0
input_dict[self.background_data_placeholder_] = background_reshaped
input_dict[self.background_volume_placeholder_] = background_volume
# If we want silence, mute out the main sample but leave the background.
if sample['label'] == SILENCE_LABEL:
input_dict[self.foreground_volume_placeholder_] = 0
else:
input_dict[self.foreground_volume_placeholder_] = 1
# Run the graph to produce the output audio.
summary, data_tensor = sess.run(
[self.merged_summaries_, self.output_], feed_dict=input_dict)
self.summary_writer_.add_summary(summary)
data[i - offset, :] = data_tensor.flatten()
label_index = self.word_to_index[sample['label']]
labels[i - offset] = label_index
return data, labels
def get_features_for_wav(self, wav_filename, model_settings, sess):
"""Applies the feature transformation process to the input_wav.
Runs the feature generation process (generally producing a spectrogram from
the input samples) on the WAV file. This can be useful for testing and
verifying implementations being run on other platforms.
Args:
wav_filename: The path to the input audio file.
model_settings: Information about the current model being trained.
sess: TensorFlow session that was active when processor was created.
Returns:
Numpy data array containing the generated features.
"""
desired_samples = model_settings['desired_samples']
input_dict = {
self.wav_filename_placeholder_: wav_filename,
self.time_shift_padding_placeholder_: [[0, 0], [0, 0]],
self.time_shift_offset_placeholder_: [0, 0],
self.background_data_placeholder_: np.zeros([desired_samples, 1]),
self.background_volume_placeholder_: 0,
self.foreground_volume_placeholder_: 1,
}
# Run the graph to produce the output audio.
data_tensor = sess.run([self.output_], feed_dict=input_dict)
return data_tensor
def get_unprocessed_data(self, how_many, model_settings, mode):
"""Retrieve sample data for the given partition, with no transformations.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
model_settings: Information about the current model being trained.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
Returns:
List of sample data for the samples, and list of labels in one-hot form.
"""
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = how_many
desired_samples = model_settings['desired_samples']
words_list = self.words_list
data = np.zeros((sample_count, desired_samples))
labels = []
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = tf.audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
foreground_volume_placeholder = tf.compat.v1.placeholder(tf.float32, [])
scaled_foreground = tf.multiply(wav_decoder.audio,
foreground_volume_placeholder)
for i in range(sample_count):
if how_many == -1:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
input_dict = {wav_filename_placeholder: sample['file']}
if sample['label'] == SILENCE_LABEL:
input_dict[foreground_volume_placeholder] = 0
else:
input_dict[foreground_volume_placeholder] = 1
data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()
label_index = self.word_to_index[sample['label']]
labels.append(words_list[label_index])
return data, labels
|
|
# 6.00x Problem Set 6
#
# Part 1 - HAIL CAESAR!
import string
import random
WORDLIST_FILENAME = "words.txt"
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
inFile = open(WORDLIST_FILENAME, 'r')
wordList = inFile.read().split()
print " ", len(wordList), "words loaded."
return wordList
def isWord(wordList, word):
"""
Determines if word is a valid word.
wordList: list of words in the dictionary.
word: a possible word.
returns True if word is in wordList.
Example:
>>> isWord(wordList, 'bat') returns
True
>>> isWord(wordList, 'asdf') returns
False
"""
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\\:;'<>?,./\"")
return word in wordList
def randomWord(wordList):
"""
Returns a random word.
wordList: list of words
returns: a word from wordList at random
"""
return random.choice(wordList)
def randomString(wordList, n):
"""
Returns a string containing n random words from wordList
wordList: list of words
returns: a string of random words separated by spaces.
"""
return " ".join([randomWord(wordList) for _ in range(n)])
def randomScrambled(wordList, n):
"""
Generates a test string by generating an n-word random string
and encrypting it with a sequence of random shifts.
wordList: list of words
n: number of random words to generate and scamble
returns: a scrambled string of n random words
NOTE:
This function will ONLY work once you have completed your
implementation of applyShifts!
"""
s = randomString(wordList, n) + " "
shifts = [(i, random.randint(0, 25)) for i in range(len(s)) if s[i-1] == ' ']
return applyShifts(s, shifts)[:-1]
def getStoryString():
"""
Returns a story in encrypted text.
"""
return open("story.txt", "r").read()
# (end of helper code)
# -----------------------------------
#
# Problem 1: Encryption
#
def buildCoder(shift):
"""
Returns a dict that can apply a Caesar cipher to a letter.
The cipher is defined by the shift value. Ignores non-letter characters
like punctuation, numbers and spaces.
shift: 0 <= int < 26
returns: dict
"""
### TODO.
assert(0 <= shift < 26)
lower = string.ascii_lowercase
upper = string.ascii_uppercase
result = {}
for c in lower:
idx = lower.index(c) + shift
if (idx >= 26):
idx = idx - 26
result[c] = lower[idx]
for c in upper:
idx = upper.index(c) + shift
if (idx >= 26):
idx = idx - 26
result[c] = upper[idx]
return result;
def applyCoder(text, coder):
"""
Applies the coder to the text. Returns the encoded text.
text: string
coder: dict with mappings of characters to shifted characters
returns: text after mapping coder chars to original text
"""
### TODO.
result = ''
for t in text:
if t in coder:
result = result + coder[t]
else:
result = result + t
return result
def applyShift(text, shift):
"""
Given a text, returns a new text Caesar shifted by the given shift
offset. Lower case letters should remain lower case, upper case
letters should remain upper case, and all other punctuation should
stay as it is.
text: string to apply the shift to
shift: amount to shift the text (0 <= int < 26)
returns: text after being shifted by specified amount.
"""
### TODO.
### HINT: This is a wrapper function.
assert(0 <= shift < 26)
return applyCoder(text, buildCoder(shift))
#
# Problem 2: Decryption
#
def findBestShift(wordList, text):
"""
Finds a shift key that can decrypt the encoded text.
text: string
returns: 0 <= int < 26
"""
### TODO
bestShift = 0
maxWords = 0
for s in range(26):
shifted = applyShift(text, s).split(' ')
numWords = 0
for w in shifted:
if isWord(wordList, w):
numWords += 1
if numWords > maxWords:
maxWords = numWords
bestShift = s
return bestShift
def decryptStory():
"""
Using the methods you created in this problem set,
decrypt the story given by the function getStoryString().
Use the functions getStoryString and loadWords to get the
raw data you need.
returns: string - story in plain text
"""
### TODO.
story = getStoryString()
bestShift = findBestShift(loadWords(), story)
return applyShift(story, bestShift)
#
# Build data structures used for entire session and run encryption
#
if __name__ == '__main__':
# To test findBestShift:
wordList = loadWords()
s = applyShift('Hello, world!', 8)
bestShift = findBestShift(wordList, s)
assert applyShift(s, bestShift) == 'Hello, world!'
# To test decryptStory, comment the above four lines and uncomment this line:
# decryptStory()
|
|
import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
from scipy.optimize import curve_fit
from scipy import stats
from SP2_particle_record_UTC import ParticleRecord
from struct import *
import hk_new
import hk_new_no_ts_LEO
from scipy import linspace, polyval, polyfit, sqrt, stats
import math
from datetime import datetime
import calendar
###FLag definintions
# 0 good
# 1 no_scattering
# 2 neg_LEO
# 3 no_convergence
# 4 flat_fit
# 5 baseline_mismatch
# 6 LEO_value_over_sat_level
#######********************************************************************************************************************************************
#event 2 20120612 to 20120613
event_name = '20120612_20120613_moteki_RI_2p26'
data_dir = 'C:/Users/Sarah Hanna/Documents/Data/LEO fitting/LJ-EC-SP2/Measurements/LJ Event 2/'
start_time_dt=datetime.strptime('2012/06/12 20:45', '%Y/%m/%d %H:%M')
end_time_dt = datetime.strptime('2012/06/13 11:35', '%Y/%m/%d %H:%M')
start_time = calendar.timegm(start_time_dt.utctimetuple())
end_time = calendar.timegm(end_time_dt.utctimetuple())
####event 3 20120617 to 20120618
#event_name = '20120617_20120618_moteki_RI_2p26'
#data_dir = 'C:/Users/Sarah Hanna/Documents/Data/LEO fitting/LJ-EC-SP2/Measurements/LJ Event 3/'
#start_time_dt=datetime.strptime('2012/06/18 02:12', '%Y/%m/%d %H:%M')
#end_time_dt = datetime.strptime('2012/06/18 07:52', '%Y/%m/%d %H:%M')
#start_time = calendar.timegm(start_time_dt.utctimetuple())
#end_time = calendar.timegm(end_time_dt.utctimetuple())
#event_name = '2010_BB'
#data_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/SP2_2010/BB period 2010/EC_SP2/20100726/'
#start_time_dt=datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M') #jason's BC clear report
#end_time_dt = datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')
#start_time = calendar.timegm(start_time_dt.utctimetuple())
#end_time = calendar.timegm(end_time_dt.utctimetuple())
#*****inputs******
records_to_use = 4000 #use 'all' to use all records
show_LEO_fit = False
#######********************************************************************************************************************************************
zeroX_to_LEO_limit = 65 #65 for 5% (amp=20) of laser intensity (LI), 60 for 10% (amp=10) of LI, 52 for 20% (amp=5) of LI, 42 for 50% (amp=2) of LI, 21 for 100% (amp=1) of LI
LEO_amplification_factor = 10
#scattering peak conditions
min_peakheight = 10 #threshold for attempting a LEO fit to the scatter signal (below this threshold particles are consdiered to be bare BC) 5 is ~3xsd of the baseline noise
min_peakpos = 20
max_peakpos = 150
#declare the average zero-cross to peak distance (from PSL calibration)
avg_cross_to_peak = 21.91 #La Jolla = 21.91, WHI UBC 2010 = -24.258, WHI EC 2010 = 1.269 WHI
print 'avg zero-crossing to peak time: ', avg_cross_to_peak
#declare the average gauss width (from PSL calibration)
avg_gauss_width = 19.14 #La Jolla = 19.14, WHI UBC 2010 = 16.998, WHI EC 2010 = 21.57
print 'avg Gauss width: ',avg_gauss_width
#incandescence to BC mass calibration factors # for EC WHI 2010 0.156+0.000606x+ 6.3931e-7, LJ = calib1 = 0.23523 calib2 = 0.00235 calib3 = 1.4928e-8, LJ Aquadag scaled = 0.2152+0.002401x
calib1 = 0.2152
calib2 = 0.002401
calib3 = 0
BC_density = 1.8 #density of ambient BC in g/cc
BC_det_limit = 0.25 #in fg (0.23fg = 62nm VED)
parameters = {
'acq_rate': 5000000,
#file i/o
'directory':data_dir,
#date and time
'timezone':-7,
}
#********** Mie calc lookup table ********
lookup_dir = 'C:/Users/Sarah Hanna/Documents/Data/LEO fitting/LJ-EC-SP2/lookup tables/'
#lookup_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/SP2_2010/BB period 2010/EC_SP2/lookup tables/'
os.chdir(lookup_dir)
for lookup_file in os.listdir('.'):
if lookup_file.endswith('.lupckl'):
print lookup_file
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
os.chdir(data_dir)
#*********initialize variables********
LEO_data = []
total_particles = 0
particles_used = 0
LEO_fail = 0
no_scatter = 0
saturated_particles = 0
#*************Particle analysis************
ooh_boy = 0
first_file = True
for file in os.listdir('.'):
if file.endswith('.sp2b'):
path = data_dir + str(file)
file_bytes = os.path.getsize(path) #size of entire file in bytes
record_size = 1498 #size of a single particle record in bytes(UBC_SP2 = 1498, EC_SP2 in 2009 and 2010 = 2458)
number_of_records = (file_bytes/record_size)-1
if records_to_use == 'all':
number_records_to_show = number_of_records
else:
number_records_to_show = records_to_use
f2 = open(file, 'rb')
print file
#Grab the particle records
record_index = 0
while record_index < number_records_to_show:
##Import and parse binary
record = f2.read(record_size)
particle_record = ParticleRecord(record, parameters['acq_rate'], parameters['timezone'])
event_time = particle_record.timestamp
#if the particle event time is after the period we're interested in we can skip the rest of the analysis
if event_time > end_time:
break
#check if the event_time is within the cloud event
if event_time >= start_time and event_time <= end_time:
total_particles+=1
#run the scatteringPeakInfo method to retrieve various scattering peak attributes
particle_record.scatteringPeakInfo(LEO_amplification_factor)
scattering_pk_pos = particle_record.scatteringMaxPos
#run the incandPeakInfo method to retrieve various incandescence peak attributes
particle_record.incandPeakInfo()
incand_pk_amp = particle_record.incandMax
BC_mass_fg = calib1 + calib2*incand_pk_amp + calib3*(incand_pk_amp**2)
incand_pk_pos = particle_record.incandMaxPos
lag_time = (incand_pk_pos - scattering_pk_pos)*0.2
saturated = particle_record.incandIsSat
if saturated == True:
saturated_particles += 1
#get the zero-crossing (note: a neg value indicates an exception was thrown when determining the zero-crossing)
zero_crossing_pt_LEO = particle_record.zeroCrossingPosSlope()
if zero_crossing_pt_LEO == -1:
ooh_boy +=1
#if particle record fits criteria for a good record, continue with analysis
if BC_mass_fg > BC_det_limit and saturated == False:
LEO_coating_thickness = np.nan
LEO_pt_amp = np.nan
center = np.nan
coating_volume = np.nan
scat_amp = particle_record.scatteringMax
BC_VED = (((BC_mass_fg/(10**15*BC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
#get data for BC particles with no scatter signal
if particle_record.scatteringMax < min_peakheight:
flag = 1
LEO_amp = np.NaN
LEO_coating_thickness = 0.0
#get data for BC particles with some scatter signal
if particle_record.scatteringMax >= min_peakheight:
#set up the LEO fitting
try:
LEO_max_index = int(zero_crossing_pt_LEO - zeroX_to_LEO_limit) #sets the x-limit for fitting based on the desired magnification factor particle_record.LEOMaxIndex #
except:
print 'LEO_max_index failure', zero_crossing_pt_LEO, zeroX_to_LEO_limit
#if leading edge distance that we're fitting is negative we can't do a LEO fit and we use this in our error calculation
if LEO_max_index <= 0:
flag = 2
LEO_amp = np.NaN
#only proceed if the leading edge distance that we're fitting isn't negative
if LEO_max_index > 0:
scatteringBaseline = particle_record.scatteringBaseline
LEO_min_index = 0
center = zero_crossing_pt_LEO-(avg_cross_to_peak)
width_to_use = avg_gauss_width
#set range of values to fit
x_vals_all = np.array(particle_record.getAcqPoints())
x_vals_to_use = x_vals_all[LEO_min_index:LEO_max_index]
y_vals_all = np.array(particle_record.getScatteringSignal())
y_vals_to_use = y_vals_all[LEO_min_index:LEO_max_index]
try:
if np.max(y_vals_to_use)>3600:
print np.max(y_vals_to_use)
counter+=1
except:
print 'yval err',record_index
LEO_pt_amp = y_vals_all[LEO_max_index]-scatteringBaseline
#incandescence data (for plotting if desired)
y_vals_incand = particle_record.getWidebandIncandSignal()
#split detector signal
y_vals_split = particle_record.getSplitDetectorSignal()
p_guess = (scat_amp, -2016)
def LEOGauss(x_vals, a, b):
return b+a*np.exp((-np.power((x_vals-center),2))/(2*np.power(width_to_use,2)))
try:
popt, pcov = curve_fit(LEOGauss, x_vals_to_use, y_vals_to_use, p0 = p_guess)
#if the fitting procedure fails we fold this into our error
except:
popt, pcov = None, None
flag = 3
LEO_amp = np.NaN
fit_result = []
for x in range(0,300):
fit_result.append(LEOGauss(x,scat_amp,-2016))
#if the fitting procedure succeeds we continue
if popt != None:
LEO_amp = popt[0]
LEO_baseline = popt[1]
#if the fit was good, we give a flag = 0
flag = 0
#check that the fit didn't just produce a flat line and overwrite LEO_amp as NaN if it did
if LEO_amp < 5:#min_peakheight:
flag = 4
#LEO_amp = np.NaN
i=0
#if it's not a flat line check if there is a big baseline mismatch, if there is we have a LEO failure and this goes into our error
if LEO_amp >= min_peakheight:
#get baseline and check for mismatch
#limit for baseline mismatch of LEO fit vs real baseline is 10
max_baseline_diff = 100
baseline_diff = math.fabs(LEO_baseline-scatteringBaseline)
if baseline_diff > max_baseline_diff:
flag = 5
#LEO_amp = np.NaN
####Now we get and write the coatig info to file
#get the coating thicknesses from the lookup table which is a dictionary of dictionaries, the 1st keyed with BC core size and the second being coating thicknesses keyed with calc scat amps
#get the core size first regardless of a valid LEO fit
core_diameters = sorted(lookup_table.keys())
prev_diameter = core_diameters[0]
for core_diameter in core_diameters:
if core_diameter > BC_VED:
core_dia_to_use = prev_diameter
break
prev_diameter = core_diameter
#now get the coating thickness for the scat_amp this is the coating thickness based on the raw scattering max
scattering_amps = sorted(lookup_table[core_dia_to_use].keys())
prev_amp = scattering_amps[0]
for scattering_amp in scattering_amps:
if scat_amp < scattering_amp:
scat_amp_to_use = prev_amp
break
prev_amp = scattering_amp
scat_coating_thickness = lookup_table[core_dia_to_use].get(scat_amp_to_use, np.nan) # returns value for the key, or none
#this is the coating thickness based on the LEO_amp,
#if we got a LEO fit find the coatign thickness fro this
if np.isnan(LEO_amp) != True:
#get the calc coating thickness from the LEO_amp
prev_amp = scattering_amps[0]
scatter_below_saturation_level = False #initialize to false, but set to true asoon as we find a scattering match in our table, this is only an issue with the LEO scat since we checked the raw scattering for saturation earlier
for scattering_amp in scattering_amps:
if LEO_amp < scattering_amp:
amp_to_use = prev_amp
scatter_below_saturation_level = True
break
prev_amp = scattering_amp
if scatter_below_saturation_level == False:
flag = 6
#if scatter_below_saturation_level == True:
LEO_coating_thickness = lookup_table[core_dia_to_use].get(amp_to_use, np.nan) # returns value for the key, or none
coated_VED = BC_VED + 2*LEO_coating_thickness
coating_volume = ((4.0/3)*math.pi*(coated_VED/2.0)**3) - ((4.0/3)*math.pi*(BC_VED/2.0)**3) #nm3
#***stops to show us the leo fit if we want##########################################
if show_LEO_fit == True and scat_amp >= 95 and scat_amp <=105:
print '\n'
print 'record: ',record_index
print 'core VED: ', BC_VED
print 'center', center
print 'LEO_pt_amp, LEO_amp', LEO_pt_amp, LEO_amp
print 'coating', LEO_coating_thickness
print 'flag', flag
print 'scat_amp', scat_amp
print 'scat_amp_to_use',scat_amp_to_use
fit_result = LEOGauss(x_vals_all,LEO_amp,LEO_baseline)
LEO_used = LEOGauss(x_vals_to_use,LEO_amp,LEO_baseline)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x_vals_all,y_vals_all,'o', markeredgecolor='blue',markerfacecolor='None')
ax1.plot(x_vals_all,fit_result, 'blue')
ax1.plot(x_vals_to_use,y_vals_to_use, color = 'black',linewidth=3)
ax1.plot(x_vals_all, y_vals_incand, 'o', markeredgecolor='red', markerfacecolor='None')
#ax1.plot(x_vals_all, y_vals_split, 'green')
plt.ylim(-2500,1800)
plt.axvline(x=zero_crossing_pt_LEO, ymin=0, ymax=1, color= 'red')
plt.axvline(x=center, ymin=0, ymax=1)
plt.text(0.55, 0.8,file + ' index: ' +str(record_index) + '\n' + 'core VED (nm): ' + str(round(BC_VED,2)) + '\n' + 'coating: ' +str(LEO_coating_thickness), transform = ax1.transAxes)
plt.show()
#write our data to file
newline = [LEO_amp, scattering_pk_pos, BC_VED, incand_pk_pos, lag_time, LEO_coating_thickness, event_time, flag, scat_amp, scat_coating_thickness, LEO_pt_amp, center, coating_volume]
LEO_data.append(newline)
record_index+=1
#write to file after each sp2bfile/loop to avoid bogging down from holding huge array in memory
print 'to file'
file = open(event_name +' testeroo'+'.coattxt', 'a')
if first_file == True:
file.truncate(0)
file.write('LEO_amp'+ '\t' + 'scattering_pk_pos'+ '\t' + ' BC_VED_nm'+ '\t' + ' incand_pk_pos'+ '\t' + ' lag_time_us'+ '\t' + ' LEO_coating_thickness_nm'+ '\t' + ' event_time'+ '\t' + ' flag'+ '\t' + ' scat_amp'+ '\t' + ' scat_coating_thickness_nm'+ '\t'+' LEO_pt_amp'+ '\t'+' beam_center_pos'+ '\t'+' coating_volume_nm3' + '\n')
first_file = False
for row in LEO_data:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
LEO_data = []
f2.close()
print ooh_boy
print 'LEO fail', LEO_fail
print 'no scattering', no_scatter
print 'total particles:', total_particles
print 'particles used:', particles_used
print '# of saturated incandesence',saturated_particles
#create numpy array
LEO_data_np = np.array(LEO_data)
#*******Plotting***********
fig = plt.figure()
ax1 = fig.add_subplot(111)
LF_scattering_amp = LEO_data_np[:,0]
scattering_pk_position = LEO_data_np[:,1]
BC_VED = LEO_data_np[:,2]
incand_pk_position = LEO_data_np[:,3]
lag = LEO_data_np[:,4]
LEO_coating_thickness = LEO_data_np[:,5]
timestamp = LEO_data_np[:,6]
###timeseries###
#plt.scatter(timestamp, BC_VED, c=LEO_coating_thickness, cmap=cm.jet)
#plt.xlabel('timestamp')
#plt.ylabel('BC_VED')
#cb = plt.colorbar()
#cb.set_label('LEO_coating_thickness')
####hexbin###
plt.hexbin(BC_VED, LEO_coating_thickness, cmap=cm.jet, gridsize = 50, bins='log')#, norm= norm) #bins='log', norm=norm
plt.xlabel('BC_VED')
plt.ylabel('LEO_coating_thickness')
#cb = plt.colorbar()
#cb.set_label('frequency')
#plt.savefig(event_name + 'CT VS BCVED', bbox_inches='tight',pad_inches=0.25)
plt.show()
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import textwrap
from paddle.utils import gast
from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import get_name_ids
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import NodeVarType
from paddle.fluid.dygraph.dygraph_to_static.utils import is_control_flow_to_transform
class TestGetNameIds(unittest.TestCase):
"""
Test for parsing the ast.Name list from the ast.Nodes
"""
def setUp(self):
self.source = """
def test_fn(x):
return x+1
"""
self.all_name_ids = {'x': [gast.Param(), gast.Load()]}
def test_get_name_ids(self):
source = textwrap.dedent(self.source)
root = gast.parse(source)
all_name_ids = get_name_ids([root])
self.assertDictEqual(
self.transfer_dict(self.all_name_ids),
self.transfer_dict(all_name_ids))
def transfer_dict(self, name_ids_dict):
new_dict = {}
for name, ctxs in name_ids_dict.items():
new_dict[name] = [type(ctx) for ctx in ctxs]
return new_dict
class TestGetNameIds2(TestGetNameIds):
def setUp(self):
self.source = """
def test_fn(x, y):
a = 1
x = y + a
if x > y:
z = x * x
z = z + a
else:
z = y * y
return z
"""
self.all_name_ids = {
'x': [
gast.Param(), gast.Store(), gast.Load(), gast.Load(),
gast.Load()
],
'a': [gast.Store(), gast.Load(), gast.Load()],
'y': [
gast.Param(),
gast.Load(),
gast.Load(),
gast.Load(),
gast.Load(),
],
'z': [
gast.Store(),
gast.Load(),
gast.Store(),
gast.Store(),
gast.Load(),
]
}
class TestGetNameIds3(TestGetNameIds):
def setUp(self):
self.source = """
def test_fn(x, y):
z = 1
if x > y:
z = x * x
z = z + y
return z
"""
self.all_name_ids = {
'x': [
gast.Param(),
gast.Load(),
gast.Load(),
gast.Load(),
],
'y': [
gast.Param(),
gast.Load(),
gast.Load(),
],
'z': [
gast.Store(),
gast.Store(),
gast.Load(),
gast.Store(),
gast.Load(),
]
}
class TestIsControlFlowIf(unittest.TestCase):
def check_false_case(self, code):
code = textwrap.dedent(code)
node = gast.parse(code)
node_test = node.body[0].value
self.assertFalse(is_control_flow_to_transform(node_test))
def test_expr(self):
# node is not ast.Compare
self.check_false_case("a+b")
def test_expr2(self):
# x is a Tensor.
node = gast.parse("a + x.numpy()")
node_test = node.body[0].value
self.assertTrue(is_control_flow_to_transform(node_test))
def test_is_None(self):
self.check_false_case("x is None")
def test_is_None2(self):
self.check_false_case("fluid.layers.sum(x) is None")
def test_is_None3(self):
self.check_false_case("fluid.layers.sum(x).numpy() != None")
def test_is_None4(self):
node = gast.parse("fluid.layers.sum(x) and 2>1")
node_test = node.body[0].value
self.assertTrue(is_control_flow_to_transform(node_test))
def test_if(self):
node = gast.parse("x.numpy()[1] > 1")
node_test = node.body[0].value
self.assertTrue(is_control_flow_to_transform(node_test))
def test_if_with_and(self):
node = gast.parse("x and 1 < x.numpy()[1]")
node_test = node.body[0].value
self.assertTrue(is_control_flow_to_transform(node_test))
def test_if_with_or(self):
node = gast.parse("1 < fluid.layers.sum(x).numpy()[2] or x+y < 0")
node_test = node.body[0].value
self.assertTrue(is_control_flow_to_transform(node_test))
def test_shape(self):
code = """
def foo(x):
batch_size = fluid.layers.shape(x)
if batch_size[0] > 16:
x = x + 1
return x
"""
code = textwrap.dedent(code)
node = gast.parse(code)
static_analysis_visitor = StaticAnalysisVisitor(node)
test_node = node.body[0].body[1].test
self.assertTrue(
is_control_flow_to_transform(test_node, static_analysis_visitor))
def test_shape_with_andOr(self):
code = """
def foo(x):
batch_size = fluid.layers.shape(x)
if x is not None and batch_size[0] > 16 or 2 > 1:
x = x + 1
return x
"""
code = textwrap.dedent(code)
node = gast.parse(code)
static_analysis_visitor = StaticAnalysisVisitor(node)
test_node = node.body[0].body[1].test
self.assertTrue(
is_control_flow_to_transform(test_node, static_analysis_visitor))
def test_paddle_api(self):
code = """
def foo(x):
if fluid.layers.shape(x)[0] > 16:
x = x + 1
return x
"""
code = textwrap.dedent(code)
node = gast.parse(code)
static_analysis_visitor = StaticAnalysisVisitor(node)
test_node = node.body[0].body[0].test
self.assertTrue(
is_control_flow_to_transform(test_node, static_analysis_visitor))
def test_paddle_api_with_andOr(self):
code_or = """
def foo(x):
if 2 > 1 and fluid.layers.shape(x)[0] > 16 or x is not None :
x = x + 1
return x
"""
code_and = """
def foo(x):
if 2 > 1 and fluid.layers.shape(x)[0] > 16 and x is not None :
x = x + 1
return x
"""
for code in [code_or, code_and]:
code = textwrap.dedent(code)
node = gast.parse(code)
static_analysis_visitor = StaticAnalysisVisitor(node)
test_node = node.body[0].body[0].test
self.assertTrue(
is_control_flow_to_transform(test_node,
static_analysis_visitor))
def test_with_node_var_type_map(self):
node = gast.parse("x > 1")
node_test = node.body[0].value
# if x is a Tensor
var_name_to_type = {"x": {NodeVarType.TENSOR}}
self.assertTrue(
is_control_flow_to_transform(
node_test, var_name_to_type=var_name_to_type))
# if x is not a Tensor
var_name_to_type = {"x": {NodeVarType.NUMPY_NDARRAY}}
self.assertFalse(
is_control_flow_to_transform(
node_test, var_name_to_type=var_name_to_type))
def test_raise_error(self):
node = "a + b"
with self.assertRaises(Exception) as e:
self.assertRaises(TypeError, is_control_flow_to_transform(node))
self.assertTrue(
"The type of input node must be gast.AST" in str(e.exception))
if __name__ == '__main__':
unittest.main()
|
|
"""Base classes for all plug-ins"""
import os
import imp
import re
from vb2py.utils import modulePath
from vb2py.config import VB2PYConfig
Config = VB2PYConfig()
from vb2py import logger
log = logger.getLogger("PlugInLoader")
def loadAllPlugins():
"""Load all plug-ins from the plug-in directory and return a list of all
the classes"""
from vb2py import plugins
mods = []
for mod in plugins.mods:
log.info("Checking '%s' for plugins" % mod)
#
filename = os.path.join(modulePath(), "plugins", "%s.py" % mod)
f = open(filename, "r")
try:
try:
m = imp.load_module(mod, f, filename, ('*.py', 'r', 1))
finally:
f.close()
except Exception, err:
log.warn("Error importing '%s' (%s). Module skipped" % (mod, err))
continue
#
for name in dir(m):
cls = getattr(m, name)
# import pdb; pdb.set_trace()
try:
is_plugin = cls.__is_plugin__
except AttributeError:
is_plugin = 0
if is_plugin:
try:
p = cls()
log.info("Added new plug-in: '%s" % p.name)
mods.append(p)
except Exception, err:
log.warn(
"Error creating plugin '%s' (%s). Class skipped" %
(cls, err))
#
# Now sort
mods.sort()
return mods
def disableLogging():
"""Disable logging in all plugins"""
#
# Disable the main logger
log.setLevel(0)
#
# Now do so for pluging
BasePlugin.logging_level = 0
class BasePlugin(object):
"""All plug-ins should inherit from this base class or define
__is_plugin__"""
__is_plugin__ = 1 # Set to true if you want to be loaded plug-in
system_plugin = 0 # True if you are a system plugin
__enabled = 1 # If false the plugin will not be called
order = 1000 # Determines order of execution. lower = earlier
logging_level = int(Config["General", "LoggingLevel"])
def __init__(self):
"""Initialize the plugin
This method should always be called by subclasses as it is required to
set up logging etc
"""
if not hasattr(self, "name"):
self.name = self.__class__.__name__
self.log = logger.getLogger(self.name)
self.log.setLevel(self.logging_level)
def preProcessVBText(self, text):
"""Process raw VB text prior to any conversion
This method should return a new version of the text with any changes
made to it. If there is no preprocessing required then do not define
this method.
"""
return text
def postProcessPythonText(self, text):
"""Process Python text following the conversion
This method should return a new version of the text with any changes
made to it. If there is no postprocessing required then do not define
this method.
"""
return text
def disable(self):
"""Disable the plugin"""
self.__enabled = 0
def isEnabled(self):
"""Return 1 if plugin is enabled"""
return self.__enabled
def __cmp__(self, other):
"""Used to allow plugins to be sorted to run in a certain order"""
return cmp(self.order, other.order)
class RETextMarkup(BasePlugin):
"""A utility class to apply regular expression based text markup
The plug-in allows simple re text replacements as a pre and post conversion
passes simple by reading from lists of replacements defined as class
methods.
Users can simply create instances of their own classes to handle whatever
markup they desire.
"""
name = "RETextMarkup"
re_flags = 0 # Put re flags here if you need them
# Define your patterns by assigning to these properties in the sub-class
pre_process_patterns = ()
post_process_patterns = ()
def preProcessVBText(self, text):
"""Process raw VB text prior to any conversion"""
if self.pre_process_patterns:
self.log.info("Processing pre patterns")
return self.processText(text, self.pre_process_patterns)
def postProcessPythonText(self, text):
"""Process Python text following the conversion"""
if self.post_process_patterns:
self.log.info("Processing post patterns")
return self.processText(text, self.post_process_patterns)
def processText(self, text, patterns):
"""Process the text and mark it up"""
for re_pattern, replace in patterns:
def doSub(match):
self.log.info("Replacing '%s' with %s, %s" %
(re_pattern, replace, match.groupdict()))
return replace % match.groupdict()
r = re.compile(re_pattern, self.re_flags)
text = r.sub(doSub, text)
return text
class RenderHookPlugin(BasePlugin):
"""A utility plugin to hook a render method and apply markup after the
render
The plugin replaces the specified objects normal renderCode method with one
which calls the plugins addMarkup method when it is complete.
"""
name = "RenderHookPlugin"
hooked_class_name = None # Name of class should go here
def __init__(self):
"""Initialize the plugin
This method should always be called by subclasses as it is required to
set up logging etc
"""
super(RenderHookPlugin, self).__init__()
#
# Look for class and replace its renderAsCode method
import parserclasses
self.hooked_class = getattr(parserclasses, self.hooked_class_name)
old_render_method = self.hooked_class.renderAsCode
#
def newRender(obj, indent=0):
ret = old_render_method(obj, indent)
return self.addMarkup(indent, ret)
#
self.hooked_class.renderAsCode = newRender
def addMarkup(self, indent, text):
"""Add markup to the rendered text"""
return text
class SystemPlugin(BasePlugin):
"""Special kind of plug-in which is used by the system and cannot be
disabled"""
system_plugin = 1
class SystemPluginREPlugin(RETextMarkup):
"""Special kind of plug-in which is used by the system and cannot be
disabled"""
system_plugin = 1
if __name__ == "__main__":
loadAllPlugins()
|
|
#!/usr/bin/env python
""" This script reconstructs the results presented in Tables 2.x.
"""
import warnings
warnings.filterwarnings("ignore")
import pickle as pkl
import numpy as np
import copy
import os
np.random.seed(123)
import respy
from respy.python.shared.shared_auxiliary import transform_disturbances
from respy.python.shared.shared_auxiliary import dist_class_attributes
from respy.python.shared.shared_auxiliary import dist_model_paras
from respy.python.shared.shared_auxiliary import get_total_value
from respy.python.shared.shared_auxiliary import create_draws
from auxiliary_shared import process_command_line
from auxiliary_shared import send_notification
from auxiliary_shared import enter_results_dir
from auxiliary_correct import write_correct
from auxiliary_shared import EXACT_DIR
from auxiliary_shared import SPEC_DIR
from auxiliary_shared import cleanup
from auxiliary_shared import mkdir_p
def run(is_debug, task, num_procs):
""" Run a single request.
"""
# Distribute task
which, num_draws_emax, num_points = task
dir_ = 'data_' + which + '/' + '%03.4d' % num_draws_emax
if not num_points == 'all':
dir_ += '_' + '%03.4d' % num_points
else:
dir_ += '_all'
mkdir_p(dir_), os.chdir(dir_)
# Read the baseline specification.
respy_obj = respy.RespyCls(SPEC_DIR + '/data_' + which + '.ini')
# Ensure a speedy execution (if possible).
respy_obj.unlock()
respy_obj.set_attr('num_procs', num_procs)
respy_obj.set_attr('is_parallel', (num_procs > 1))
respy_obj.lock()
# Get the solutions to the exact solution from previous results.
fname = EXACT_DIR + '/data_' + which + '/solution.respy.pkl'
respy_obj_exact = pkl.load(open(fname, 'rb'))
selected_periods = [1, 10, 20, 30, 40]
selected_ranges = [(1, 10), (11, 35), (36, 38), (39, 39), (40, 40)]
# Solve the dynamic programming model for only a subset of states.
respy_obj.attr['num_draws_emax'] = num_draws_emax
if not num_points == 'all':
respy_obj.attr['num_points_interp'] = num_points
respy_obj.attr['is_interpolated'] = True
else:
respy_obj.attr['is_interpolated'] = False
# Debugging setup
if is_debug:
respy_obj.attr['is_interpolated'] = True
respy_obj.attr['num_draws_emax'] = 10
respy_obj.attr['num_points_interp'] = 11
# If I am running the first specification, I need to make sure that the limited interpolation
# model is used.
if which == 'one':
open('.structRecomputation.tmp', 'a').close()
respy_obj.write_out()
respy_obj = respy.simulate(respy_obj)
respy_obj_inte = copy.deepcopy(respy_obj)
# Distribute all class attributes that are independent of the solution method.
periods_payoffs_systematic, mapping_state_idx, model_paras, num_periods, num_agents_sim, \
states_all, edu_start, seed_sim, edu_max, delta = dist_class_attributes(respy_obj,
'periods_payoffs_systematic', 'mapping_state_idx', 'model_paras', 'num_periods',
'num_agents_sim', 'states_all', 'edu_start', 'seed_sim', 'edu_max', 'delta')
# Auxiliary objects
shocks_cholesky = dist_model_paras(model_paras, True)[4]
# Extract the expected future values from the exact and approximated solution.
periods_emax_exact = respy_obj_exact.get_attr('periods_emax')
periods_emax_inter = respy_obj_inte.get_attr('periods_emax')
# Draw draws for the simulation.
periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, True)
# Standard deviates transformed to the distributions relevant for the agents actual decision
# making as traversing the tree.
dimension = (num_periods, num_agents_sim, 4)
periods_draws_sims_transformed = np.tile(np.nan, dimension)
for period in range(num_periods):
periods_draws_sims_transformed[period, :, :] = transform_disturbances(
periods_draws_sims[period, :, :], shocks_cholesky)
# Simulate a synthetic agent population and compare the implied decisions based on the exact
# and approximate decision at each of the decision nodes.
success_indicators = np.tile(np.nan, (num_agents_sim, num_periods))
for i in range(num_agents_sim):
current_state = states_all[0, 0, :].copy()
# Iterate over each period for the agent
for period in range(num_periods):
# Distribute state space
exp_a, exp_b, edu, edu_lagged = current_state
k = mapping_state_idx[period, exp_a, exp_b, edu, edu_lagged]
# Select relevant subset
payoffs_systematic = periods_payoffs_systematic[period, k, :]
draws = periods_draws_sims_transformed[period, i, :]
# Get total value of admissible states
total_payoffs_exact = get_total_value(period, num_periods, delta, payoffs_systematic,
draws, edu_max, edu_start, mapping_state_idx, periods_emax_exact, k, states_all)
total_payoffs_inter = get_total_value(period, num_periods, delta, payoffs_systematic,
draws, edu_max, edu_start, mapping_state_idx, periods_emax_inter, k, states_all)
# Determine optimal choices and record whether the implications agree between the
# exact and approximate solutions.
max_idx_exact = np.argmax(total_payoffs_exact)
max_idx_inter = np.argmax(total_payoffs_inter)
success_indicators[i, period] = (max_idx_exact == max_idx_inter)
# Update work experiences, level of education, and lagged education according to
# exact solution.
if max_idx_exact == 0:
current_state[0] += 1
elif max_idx_exact == 1:
current_state[1] += 1
elif max_idx_exact == 2:
current_state[2] += 1
if max_idx_exact == 2:
current_state[3] = 1
else:
current_state[3] = 0
# Return to request directories.
os.chdir('../../')
# Write out results for all requests to the a corresponding table.
args = tuple()
args += (num_points, num_draws_emax, success_indicators)
args += (selected_periods, which, num_agents_sim, num_periods)
args += (selected_ranges,)
write_correct(*args)
''' Execution of module as script.
'''
if __name__ == '__main__':
# Process the command line arguments.
description = 'Assess correct decisions.'
is_debug, num_procs = process_command_line(description)
# Switch to RSLT_DIR. This separate the results form the source files and eases the updating
# from the compute servers.
source_dir = enter_results_dir('correct_choices')
# Create task grid for parallel processing.
data = ['one', 'two', 'three']
tasks, requests = list(), list()
requests += [(2000, 'all'), (1000, 'all'), (250, 'all'), (2000, 2000), (2000, 500)]
# Debugging setup
if is_debug:
requests = [(20, 200)]
data = ['one']
for which in data:
for request in requests:
task = (which, ) + request
tasks += [task]
# Execute the tasks in parallel
cleanup()
for task in tasks:
run(is_debug, task, num_procs)
send_notification('correct')
os.chdir(source_dir)
|
|
#!/usr/bin/python
# Copyright (C) 2014 - Evan Mjelde
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tile components -- These are the fundamental components used form a layout."""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from pytiles.errors import TilesError
class TileType(object):
"""A page component that can be evaluated to a string when ready to be displayed."""
def __init__(self, name, resolver=None, renderer=None, role=None, in_role=lambda role: True):
"""Keyword arguments:
resolver -- Function that will resolve Tile Type into the current execution state.
renderer -- Function that will render Tile Type to a string.
role -- Role which can be evaluated for rendering display permission.
in_role -- Function to use to evaluate roles which must return a bool type,
If no function supplied default will always return True.
"""
self.name = name
self.resolver = resolver
self.renderer = renderer
self.role = role
# Alternative way to specify in role function.
if 'tiles_in_role' in globals():
self.in_role = globals()['tiles_in_role']
else:
self.in_role = in_role
def resolve(self, definition_context):
"""Resolve Tile Type component
Arguments:
definition_context -- Encapsulation of the current state of execution.
"""
if self.resolver is None:
definition_context.add_attribute(self)
else:
try:
self.resolver.resolve(self, definition_context)
except AttributeError:
if self.resolver is None:
raise NotImplementedError("Should have implemented a resolver for {0}".format(self.name))
else:
raise
def render(self):
"""Render Tile Type component."""
try:
if self.permit():
return self.renderer.render(self)
except AttributeError:
if self.renderer is None:
raise NotImplementedError("Should have implemented a renderer for {0}".format(self.name))
else:
raise
return ''
def permit(self):
"""Returns a boolean value for permission given to render the component."""
in_role = self.in_role(self.role)
if type(in_role) is not bool:
raise TilesError("Function '{0}' must return a boolean value.".format(self.in_role.__name__))
return in_role
def __repr__(self):
return "{0}({1!r})".format(self.__class__, self.__dict__);
def __str__(self):
return self.render()
class String(TileType):
"""A very simple single value page component."""
def __init__(self, name, value, **kwargs):
self.value = value
super(String, self).__init__(name, **kwargs)
def render(self):
if self.permit():
return self.value
return ''
class List(TileType):
"""A collection of other Tile Types."""
def __init__(self, name, items=None, inherit=True, **kwargs):
"""Keyword arguments:
items -- Array of initial items.
inherit -- Lets you know if it should inherit items on merge. (Default: True)
"""
self.items = items if items is not None else []
self.inherit = inherit
super(List, self).__init__(name, **kwargs)
def add(self, item, order = 'append'):
"""Add Item to list.
Keyword arguments:
item -- If item is instance of TileType.List the list items will be
merged, otherwise items will be processed as normal.
order -- order values can be 'append' or 'prepend', which will do excatly
what you would expect (Default: append).
"""
try:
item = item.items
except AttributeError:
item = [item]
self.items = {
'prepend': item + self.items,
'append': self.items + item
}[order]
def can_inherit(self):
return self.inherit
def render(self):
"""Generate unordered list of items. (For LOLs)."""
if self.can_render():
output = '<ul>'
for item in self.items:
output += "<li>{0}</li>".format(item)
return output + '</ul>'
return ''
class Page(TileType):
"""Page component support in rendering of pages, it is responsible for
holding Attributes (rendered Tile Types)
which can later be used to fill the page.
"""
def __init__(self, name, resource, view_type, **kwargs):
"""Keyword arguments:
resource -- Page resource (file path, stream, string, etc)
viewtype -- View Type class instance
"""
self.resource = resource
self.view_type = view_type
self.attributes = {}
super(Page, self).__init__(name, **kwargs)
def add_attributes(self, attributes):
"""Add attributes merges with present attributes."""
self.attributes = dict(self.attributes, **attributes)
def render(self):
if self.permit():
return self.view_type.process(self.resource, self.attributes)
return ''
class Definition(TileType):
"""Encapsulates other Tile Types in for a complete template composition.
It can function as an abstract definition where no template is defined,
in which it will extend/or inherit other definitions.
"""
def __init__(self, name, extends=None, template=None, **kwargs):
"""Keyword arguments:
extends -- Definition Tile Type parent of this class
template -- Page Tile Type
resolver -- Resolver (see pytiles.resolvers.DefinitionResolver)
renderer -- Renderer (see pytiles.renderers.DefinitionRenderer)
"""
self._parent = extends
self.template = template
self.attributes = OrderedDict()
self.preparers = []
self.__resolved = False
super(Definition, self).__init__(name, **kwargs)
@property
def extends(self):
"""Parent definition."""
return self._parent
@extends.setter
def extends(self, parent):
"""Set definition to extend."""
self._parent = parent
self.__resolved = False
def add_preparer(self, view_preparer):
"""Add to View Preparers."""
self.preparers.append(classpath)
def add_attribute(self, attribute, key=None):
"""Add attributes, attribute can be an instance of TileType,
or some other value as long as a key is given.
Keyword arguments:
key -- String key to store the attribute into.
"""
if isinstance(attribute, TileType):
key = attribute.name if key is None else key
self.attributes[key] = attribute
else:
key = key if key is not None else str(id(attribute))
self.attributes[key] = String(key, attribute)
def override_template(self, template, merge=True):
"""Keyword arguments:
template -- Page Tile Type
merge -- If True attributes of given template will be merged
with the overridden template
"""
if merge and self.template is not None:
template.addattributes(self.template.attributes)
self.template = template
def is_extended(self):
"""Check if definition has any parents, therefore has been extended."""
return self._parent is not None
def resolve(self, definition_context=None):
"""Passes the Definition to a resolver, the expected result should be a
definition that has been merged with it's parents."""
definition_context = self if definition_context is None else definition_context
super(Definition, self).resolve(definition_context)
# All parent definitions should now be "merged" into
# this definition, parent no longer needed.
self._parent = None
self.__resolved = True
def render(self, attributes=None):
"""Render Definition, if the definition has not been resolved, the
resolver will be called before rendering.
Keyword arguments:
attributes -- Extra attributes to add to the template.
"""
if not self.__resolved:
self.resolve(self)
if attributes is not None:
for key, value in attributes.items():
self.add_attribute(value, key=key)
return super(Definition, self).render()
|
|
"""Extension API for adding custom tags and behavior."""
import pprint
import re
import typing as t
from markupsafe import Markup
from . import defaults
from . import nodes
from .environment import Environment
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .runtime import concat # type: ignore
from .runtime import Context
from .runtime import Undefined
from .utils import import_string
from .utils import pass_context
if t.TYPE_CHECKING:
import typing_extensions as te
from .lexer import Token
from .lexer import TokenStream
from .parser import Parser
class _TranslationsBasic(te.Protocol):
def gettext(self, message: str) -> str:
...
def ngettext(self, singular: str, plural: str, n: int) -> str:
pass
class _TranslationsContext(_TranslationsBasic):
def pgettext(self, context: str, message: str) -> str:
...
def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
...
_SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
# I18N functions available in Jinja templates. If the I18N library
# provides ugettext, it will be assigned to gettext.
GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
"_",
"gettext",
"ngettext",
"pgettext",
"npgettext",
)
_ws_re = re.compile(r"\s*\n\s*")
class Extension:
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
identifier: t.ClassVar[str]
def __init_subclass__(cls) -> None:
cls.identifier = f"{cls.__module__}.{cls.__name__}"
#: if this extension parses this is the list of tags it's listening to.
tags: t.Set[str] = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment: Environment) -> None:
self.environment = environment
def bind(self, environment: Environment) -> "Extension":
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(
self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
) -> str:
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(
self, stream: "TokenStream"
) -> t.Union["TokenStream", t.Iterable["Token"]]:
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
"""
return stream
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(
self, name: str, lineno: t.Optional[int] = None
) -> nodes.ExtensionAttribute:
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(
self,
name: str,
args: t.Optional[t.List[nodes.Expr]] = None,
kwargs: t.Optional[t.List[nodes.Keyword]] = None,
dyn_args: t.Optional[nodes.Expr] = None,
dyn_kwargs: t.Optional[nodes.Expr] = None,
lineno: t.Optional[int] = None,
) -> nodes.Call:
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(
self.attr(name, lineno=lineno),
args,
kwargs,
dyn_args,
dyn_kwargs,
lineno=lineno,
)
@pass_context
def _gettext_alias(
__context: Context, *args: t.Any, **kwargs: t.Any
) -> t.Union[t.Any, Undefined]:
return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
@pass_context
def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, even if there are no
# variables. This makes translation strings more consistent
# and predictable. This requires escaping
return rv % variables # type: ignore
return gettext
def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
@pass_context
def ngettext(
__context: Context,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return ngettext
def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
@pass_context
def pgettext(
__context: Context, __string_ctx: str, __string: str, **variables: t.Any
) -> str:
variables.setdefault("context", __string_ctx)
rv = __context.call(func, __string_ctx, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return pgettext
def _make_new_npgettext(
func: t.Callable[[str, str, str, int], str]
) -> t.Callable[..., str]:
@pass_context
def npgettext(
__context: Context,
__string_ctx: str,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
variables.setdefault("context", __string_ctx)
variables.setdefault("num", __num)
rv = __context.call(func, __string_ctx, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return npgettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja."""
tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment: Environment) -> None:
super().__init__(environment)
environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False,
)
def _install(
self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
) -> None:
# ugettext and ungettext are preferred in case the I18N library
# is providing compatibility with older Python versions.
gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
pgettext = getattr(translations, "pgettext", None)
npgettext = getattr(translations, "npgettext", None)
self._install_callables(
gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
)
def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
import gettext
translations = gettext.NullTranslations()
if hasattr(translations, "pgettext"):
# Python < 3.8
pgettext = translations.pgettext # type: ignore
else:
def pgettext(c: str, s: str) -> str:
return s
if hasattr(translations, "npgettext"):
npgettext = translations.npgettext # type: ignore
else:
def npgettext(c: str, s: str, p: str, n: int) -> str:
return s if n == 1 else p
self._install_callables(
gettext=translations.gettext,
ngettext=translations.ngettext,
newstyle=newstyle,
pgettext=pgettext,
npgettext=npgettext,
)
def _install_callables(
self,
gettext: t.Callable[[str], str],
ngettext: t.Callable[[str, str, int], str],
newstyle: t.Optional[bool] = None,
pgettext: t.Optional[t.Callable[[str, str], str]] = None,
npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
) -> None:
if newstyle is not None:
self.environment.newstyle_gettext = newstyle # type: ignore
if self.environment.newstyle_gettext: # type: ignore
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
if pgettext is not None:
pgettext = _make_new_pgettext(pgettext)
if npgettext is not None:
npgettext = _make_new_npgettext(npgettext)
self.environment.globals.update(
gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
)
def _uninstall(self, translations: "_SupportedTranslations") -> None:
for key in ("gettext", "ngettext", "pgettext", "npgettext"):
self.environment.globals.pop(key, None)
def _extract(
self,
source: t.Union[str, nodes.Template],
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
) -> t.Iterator[
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
]:
if isinstance(source, str):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
context = None
context_token = parser.stream.next_if("string")
if context_token is not None:
context = context_token.value
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr: t.Optional[nodes.Expr] = None
plural_expr_assignment: t.Optional[nodes.Assign] = None
num_called_num = False
variables: t.Dict[str, nodes.Expr] = {}
trimmed = None
while parser.stream.current.type != "block_end":
if variables:
parser.stream.expect("comma")
# skip colon for python compatibility
if parser.stream.skip_if("colon"):
break
token = parser.stream.expect("name")
if token.value in variables:
parser.fail(
f"translatable variable {token.value!r} defined twice.",
token.lineno,
exc=TemplateAssertionError,
)
# expressions
if parser.stream.current.type == "assign":
next(parser.stream)
variables[token.value] = var = parser.parse_expression()
elif trimmed is None and token.value in ("trimmed", "notrimmed"):
trimmed = token.value == "trimmed"
continue
else:
variables[token.value] = var = nodes.Name(token.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name("_trans", "load")
variables[token.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name("_trans", "store"), var
)
else:
plural_expr = var
num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], "load")
num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
if parser.stream.current.type != "block_end":
token = parser.stream.expect("name")
if token.value not in variables:
parser.fail(
f"unknown variable {token.value!r} for pluralization",
token.lineno,
exc=TemplateAssertionError,
)
plural_expr = variables[token.value]
num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for name in referenced:
if name not in variables:
variables[name] = nodes.Name(name, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail("pluralize without variables", lineno)
if trimmed is None:
trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(
singular,
plural,
context,
variables,
plural_expr,
bool(referenced),
num_called_num and have_plural,
)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
return _ws_re.sub(" ", string.strip())
def _parse_block(
self, parser: "Parser", allow_pluralize: bool
) -> t.Tuple[t.List[str], str]:
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while True:
if parser.stream.current.type == "data":
buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
elif parser.stream.current.type == "variable_begin":
next(parser.stream)
name = parser.stream.expect("name").value
referenced.append(name)
buf.append(f"%({name})s")
parser.stream.expect("variable_end")
elif parser.stream.current.type == "block_begin":
next(parser.stream)
if parser.stream.current.test("name:endtrans"):
break
elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
parser.fail(
"a translatable section can have only one pluralize section"
)
parser.fail(
"control structures in translatable sections are not allowed"
)
elif parser.stream.eos:
parser.fail("unclosed translation block")
else:
raise RuntimeError("internal parser error")
return referenced, concat(buf)
def _make_node(
self,
singular: str,
plural: t.Optional[str],
context: t.Optional[str],
variables: t.Dict[str, nodes.Expr],
plural_expr: t.Optional[nodes.Expr],
vars_referenced: bool,
num_called_num: bool,
) -> nodes.Output:
"""Generates a useful node from the data provided."""
newstyle = self.environment.newstyle_gettext # type: ignore
node: nodes.Expr
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not newstyle:
singular = singular.replace("%%", "%")
if plural:
plural = plural.replace("%%", "%")
func_name = "gettext"
func_args: t.List[nodes.Expr] = [nodes.Const(singular)]
if context is not None:
func_args.insert(0, nodes.Const(context))
func_name = f"p{func_name}"
if plural_expr is not None:
func_name = f"n{func_name}"
func_args.extend((nodes.Const(plural), plural_expr))
node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if newstyle:
for key, value in variables.items():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(
node,
nodes.Dict(
[
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]
),
)
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
tags = {"do"}
def parse(self, parser: "Parser") -> nodes.ExprStmt:
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = {"break", "continue"}
def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
token = next(parser.stream)
if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class DebugExtension(Extension):
"""A ``{% debug %}`` tag that dumps the available variables,
filters, and tests.
.. code-block:: html+jinja
<pre>{% debug %}</pre>
.. code-block:: text
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
...,
'namespace': <class 'jinja2.utils.Namespace'>},
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
.. versionadded:: 2.11.0
"""
tags = {"debug"}
def parse(self, parser: "Parser") -> nodes.Output:
lineno = parser.stream.expect("name:debug").lineno
context = nodes.ContextReference()
result = self.call_method("_render", [context], lineno=lineno)
return nodes.Output([result], lineno=lineno)
def _render(self, context: Context) -> str:
result = {
"context": context.get_all(),
"filters": sorted(self.environment.filters.keys()),
"tests": sorted(self.environment.tests.keys()),
}
# Set the depth since the intent is to show the top few names.
return pprint.pformat(result, depth=3, compact=True)
def extract_from_ast(
ast: nodes.Template,
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
babel_style: bool = True,
) -> t.Iterator[
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
]:
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string, or a tuple of strings for functions
with multiple string arguments.
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
for node in ast.find_all(nodes.Call):
if (
not isinstance(node.node, nodes.Name)
or node.node.name not in gettext_functions
):
continue
strings: t.List[t.Optional[str]] = []
for arg in node.args:
if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
strings.append(arg.value)
else:
strings.append(None)
for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
out = tuple(x for x in strings if x is not None)
if not out:
continue
else:
if len(strings) == 1:
out = strings[0]
else:
out = tuple(strings)
yield node.lineno, node.node.name, out
class _CommentFinder:
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(
self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
) -> None:
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset: int) -> t.List[str]:
try:
for _, token_type, token_value in reversed(
self.tokens[self.offset : offset]
):
if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno: int) -> t.List[str]:
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(
fileobj: t.BinaryIO,
keywords: t.Sequence[str],
comment_tags: t.Sequence[str],
options: t.Dict[str, t.Any],
) -> t.Iterator[
t.Tuple[
int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
]
]:
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions: t.Dict[t.Type[Extension], None] = {}
for extension_name in options.get("extensions", "").split(","):
extension_name = extension_name.strip()
if not extension_name:
continue
extensions[import_string(extension_name)] = None
if InternationalizationExtension not in extensions:
extensions[InternationalizationExtension] = None
def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
silent = getbool(options, "silent", True)
environment = Environment(
options.get("block_start_string", defaults.BLOCK_START_STRING),
options.get("block_end_string", defaults.BLOCK_END_STRING),
options.get("variable_start_string", defaults.VARIABLE_START_STRING),
options.get("variable_end_string", defaults.VARIABLE_END_STRING),
options.get("comment_start_string", defaults.COMMENT_START_STRING),
options.get("comment_end_string", defaults.COMMENT_END_STRING),
options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
defaults.NEWLINE_SEQUENCE,
getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
tuple(extensions),
cache_size=0,
auto_reload=False,
)
if getbool(options, "trimmed"):
environment.policies["ext.i18n.trimmed"] = True
if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True # type: ignore
source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
debug = DebugExtension
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:24749")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:24749")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a CryptoPowers address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a CryptoPowers address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RunConfig tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.platform import test
_TEST_DIR = 'test_dir'
_MASTER = 'master_'
_NOT_SUPPORTED_REPLACE_PROPERTY_MSG = 'Replacing .*is not supported'
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
_MODEL_DIR_ERR = 'model_dir should be non-empty'
_SAVE_SUMMARY_STEPS_ERR = 'save_summary_steps should be >= 0'
_SAVE_CKPT_STEPS_ERR = 'save_checkpoints_steps should be >= 0'
_SAVE_CKPT_SECS_ERR = 'save_checkpoints_secs should be >= 0'
_SESSION_CONFIG_ERR = 'session_config must be instance of ConfigProto'
_KEEP_CKPT_MAX_ERR = 'keep_checkpoint_max should be >= 0'
_KEEP_CKPT_HOURS_ERR = 'keep_checkpoint_every_n_hours should be > 0'
_TF_RANDOM_SEED_ERR = 'tf_random_seed must be integer'
_ONE_CHIEF_ERR = 'The "cluster" in TF_CONFIG must have only one "chief" node.'
_ONE_MASTER_ERR = 'The "cluster" in TF_CONFIG must have only one "master" node.'
_MISSING_CHIEF_ERR = 'If "cluster" is set .* it must have one "chief" node'
_MISSING_TASK_TYPE_ERR = 'If "cluster" is set .* task type must be set'
_MISSING_TASK_ID_ERR = 'If "cluster" is set .* task index must be set'
_INVALID_TASK_INDEX_ERR = 'is not a valid task_id'
_NEGATIVE_TASK_INDEX_ERR = 'Task index must be non-negative number.'
_INVALID_TASK_TYPE_ERR = 'is not a valid task_type'
_INVALID_TASK_TYPE_FOR_LOCAL_ERR = (
'If "cluster" is not set in TF_CONFIG, task type must be WORKER.')
_INVALID_TASK_INDEX_FOR_LOCAL_ERR = (
'If "cluster" is not set in TF_CONFIG, task index must be 0.')
_INVALID_EVALUATOR_IN_CLUSTER_WITH_MASTER_ERR = (
'If `master` node exists in `cluster`, task_type `evaluator` is not '
'supported.')
_INVALID_CHIEF_IN_CLUSTER_WITH_MASTER_ERR = (
'If `master` node exists in `cluster`, job `chief` is not supported.')
_INVALID_SERVICE_TYPE_ERR = (
'If "service" is set in TF_CONFIG, it must be a dict. Given')
def _create_run_config_with_cluster_spec(tf_config, **kwargs):
with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
return run_config_lib.RunConfig(**kwargs)
class RunConfigTest(test.TestCase):
def test_default_property_values(self):
config = run_config_lib.RunConfig()
self.assertIsNone(config.model_dir)
self.assertIsNone(config.session_config)
self.assertIsNone(config.tf_random_seed)
self.assertEqual(100, config.save_summary_steps)
self.assertEqual(600, config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertEqual(5, config.keep_checkpoint_max)
self.assertEqual(10000, config.keep_checkpoint_every_n_hours)
self.assertIsNone(config.service)
def test_model_dir(self):
empty_config = run_config_lib.RunConfig()
self.assertIsNone(empty_config.model_dir)
new_config = empty_config.replace(model_dir=_TEST_DIR)
self.assertEqual(_TEST_DIR, new_config.model_dir)
def test_replace_with_allowed_properties(self):
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = run_config_lib.RunConfig().replace(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_secs=14,
session_config=session_config,
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(session_config, config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
def test_replace_none_value(self):
config = run_config_lib.RunConfig().replace(
tf_random_seed=None,
model_dir=None,
save_summary_steps=None,
save_checkpoints_secs=None,
save_checkpoints_steps=None,
session_config=None,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None)
self.assertIsNone(config.tf_random_seed)
self.assertIsNone(config.model_dir)
self.assertIsNone(config.save_summary_steps)
self.assertIsNone(config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertIsNone(config.session_config)
self.assertIsNone(config.keep_checkpoint_max)
self.assertIsNone(config.keep_checkpoint_every_n_hours)
def test_replace_with_disallowallowed_properties(self):
config = run_config_lib.RunConfig()
with self.assertRaises(ValueError):
# tf_random_seed is not allowed to be replaced.
config.replace(master='_master')
with self.assertRaises(ValueError):
config.replace(some_undefined_property=123)
def test_replace(self):
config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(
ValueError, _NOT_SUPPORTED_REPLACE_PROPERTY_MSG):
# master is not allowed to be replaced.
config.replace(master=_MASTER)
with self.assertRaisesRegexp(
ValueError, _NOT_SUPPORTED_REPLACE_PROPERTY_MSG):
config.replace(some_undefined_property=_MASTER)
def test_replace_invalid_values(self):
config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
config.replace(model_dir='')
with self.assertRaisesRegexp(ValueError, _SAVE_SUMMARY_STEPS_ERR):
config.replace(save_summary_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_STEPS_ERR):
config.replace(save_checkpoints_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_SECS_ERR):
config.replace(save_checkpoints_secs=-1)
with self.assertRaisesRegexp(ValueError, _SESSION_CONFIG_ERR):
config.replace(session_config={})
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_MAX_ERR):
config.replace(keep_checkpoint_max=-1)
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_HOURS_ERR):
config.replace(keep_checkpoint_every_n_hours=0)
with self.assertRaisesRegexp(ValueError, _TF_RANDOM_SEED_ERR):
config.replace(tf_random_seed=1.0)
def test_init_with_allowed_properties(self):
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
config = run_config_lib.RunConfig(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_secs=14,
session_config=session_config,
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(session_config, config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
def test_init_none_value(self):
config = run_config_lib.RunConfig(
tf_random_seed=None,
model_dir=None,
save_summary_steps=None,
save_checkpoints_secs=None,
save_checkpoints_steps=None,
session_config=None,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None)
self.assertIsNone(config.tf_random_seed)
self.assertIsNone(config.model_dir)
self.assertIsNone(config.save_summary_steps)
self.assertIsNone(config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertIsNone(config.session_config)
self.assertIsNone(config.keep_checkpoint_max)
self.assertIsNone(config.keep_checkpoint_every_n_hours)
def test_init_invalid_values(self):
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
run_config_lib.RunConfig(model_dir='')
with self.assertRaisesRegexp(ValueError, _SAVE_SUMMARY_STEPS_ERR):
run_config_lib.RunConfig(save_summary_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_STEPS_ERR):
run_config_lib.RunConfig(save_checkpoints_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_SECS_ERR):
run_config_lib.RunConfig(save_checkpoints_secs=-1)
with self.assertRaisesRegexp(ValueError, _SESSION_CONFIG_ERR):
run_config_lib.RunConfig(session_config={})
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_MAX_ERR):
run_config_lib.RunConfig(keep_checkpoint_max=-1)
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_HOURS_ERR):
run_config_lib.RunConfig(keep_checkpoint_every_n_hours=0)
with self.assertRaisesRegexp(ValueError, _TF_RANDOM_SEED_ERR):
run_config_lib.RunConfig(tf_random_seed=1.0)
class RunConfigDistributedSettingTest(test.TestCase):
def _assert_distributed_properties(self, run_config,
expected_cluster_spec,
expected_task_type,
expected_task_id,
expected_master,
expected_evaluation_master,
expected_is_chief,
expected_num_worker_replicas,
expected_num_ps_replicas):
self.assertEqual(expected_cluster_spec, run_config.cluster_spec.as_dict())
self.assertEqual(expected_task_type, run_config.task_type)
self.assertEqual(expected_task_id, run_config.task_id)
self.assertEqual(expected_master, run_config.master)
self.assertEqual(expected_evaluation_master, run_config.evaluation_master)
self.assertEqual(expected_is_chief, run_config.is_chief)
self.assertEqual(expected_num_worker_replicas,
run_config.num_worker_replicas)
self.assertEqual(expected_num_ps_replicas, run_config.num_ps_replicas)
def test_default_values(self):
self._assert_distributed_properties(
run_config=run_config_lib.RunConfig(),
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=0,
expected_master='',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_tf_config_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=0,
expected_master='',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_invalid_task_type_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_FOR_LOCAL_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_invalid_task_index_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_INDEX_FOR_LOCAL_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_chief_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.CHIEF,
expected_task_id=0,
expected_master='grpc://host0:0',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_fail_with_multiple_chief_nodes(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0', 'host:6:6'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
}
with self.assertRaisesRegexp(ValueError, _ONE_CHIEF_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_missing_chief_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
}
with self.assertRaisesRegexp(ValueError, _MISSING_CHIEF_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_single_chief_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.CHIEF,
expected_task_id=0,
expected_master='grpc://host0:0',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_fail_with_missing_task_type_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_missing_task_index_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
}
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_ID_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_index_is_too_large(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_index(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': -1
}
}
with self.assertRaisesRegexp(ValueError, _NEGATIVE_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_type(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_worker_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=1,
expected_master='grpc://host4:4',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_ps_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.PS,
expected_task_id=0,
expected_master='grpc://host1:1',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_evaluator_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 12
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.EVALUATOR,
expected_task_id=12,
expected_master='',
expected_evaluation_master='',
expected_is_chief=False, # evaluator is never chief.
expected_num_worker_replicas=0, # evaluator is not in training cluster.
expected_num_ps_replicas=0)
def test_fail_with_invalid_task_index_for_evaluator(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': -1
}
}
with self.assertRaisesRegexp(ValueError, _NEGATIVE_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
class RunConfigDistributedSettingWithMasterTest(test.TestCase):
def _assert_distributed_properties(self, run_config,
expected_cluster_spec,
expected_task_type,
expected_task_id,
expected_master,
expected_evaluation_master,
expected_is_chief,
expected_num_worker_replicas,
expected_num_ps_replicas):
self.assertEqual(expected_cluster_spec, run_config.cluster_spec.as_dict())
self.assertEqual(expected_task_type, run_config.task_type)
self.assertEqual(expected_task_id, run_config.task_id)
self.assertEqual(expected_master, run_config.master)
self.assertEqual(expected_evaluation_master, run_config.evaluation_master)
self.assertEqual(expected_is_chief, run_config.is_chief)
self.assertEqual(expected_num_worker_replicas,
run_config.num_worker_replicas)
self.assertEqual(expected_num_ps_replicas, run_config.num_ps_replicas)
def test_invalid_task_type_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_FOR_LOCAL_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_master_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.MASTER,
expected_task_id=0,
expected_master='grpc://host0:0',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_fail_with_multiple_master_nodes(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0', 'host:6:6'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
}
with self.assertRaisesRegexp(ValueError, _ONE_MASTER_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_single_master_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.MASTER,
expected_task_id=0,
expected_master='grpc://host0:0',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_fail_with_missing_task_type_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_missing_task_index_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
}
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_ID_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_index_is_too_large(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_index(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': -1
}
}
with self.assertRaisesRegexp(ValueError, _NEGATIVE_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_type(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_worker_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=1,
expected_master='grpc://host4:4',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_ps_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.PS,
expected_task_id=0,
expected_master='grpc://host1:1',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_fail_with_evaluator(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError,
_INVALID_EVALUATOR_IN_CLUSTER_WITH_MASTER_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_chief(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.CHIEF: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError,
_INVALID_CHIEF_IN_CLUSTER_WITH_MASTER_ERR):
_create_run_config_with_cluster_spec(tf_config)
class RunConfigSaveCheckpointsTest(test.TestCase):
def test_save_checkpoint(self):
empty_config = run_config_lib.RunConfig()
self.assertEqual(600, empty_config.save_checkpoints_secs)
self.assertIsNone(empty_config.save_checkpoints_steps)
config_with_steps = empty_config.replace(save_checkpoints_steps=100)
del empty_config
self.assertEqual(100, config_with_steps.save_checkpoints_steps)
self.assertIsNone(config_with_steps.save_checkpoints_secs)
config_with_secs = config_with_steps.replace(save_checkpoints_secs=200)
del config_with_steps
self.assertEqual(200, config_with_secs.save_checkpoints_secs)
self.assertIsNone(config_with_secs.save_checkpoints_steps)
def test_save_checkpoint_both_steps_and_secs_are_not_none(self):
empty_config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_ERR):
empty_config.replace(save_checkpoints_steps=100,
save_checkpoints_secs=200)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_ERR):
run_config_lib.RunConfig(save_checkpoints_steps=100,
save_checkpoints_secs=200)
def test_save_checkpoint_both_steps_and_secs_are_none(self):
config_with_secs = run_config_lib.RunConfig()
config_without_ckpt = config_with_secs.replace(
save_checkpoints_steps=None, save_checkpoints_secs=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
def test_save_checkpoint_flip_secs_to_none(self):
config_with_secs = run_config_lib.RunConfig()
config_without_ckpt = config_with_secs.replace(save_checkpoints_secs=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
def test_save_checkpoint_flip_steps_to_none(self):
config_with_steps = run_config_lib.RunConfig().replace(
save_checkpoints_steps=100)
config_without_ckpt = config_with_steps.replace(save_checkpoints_steps=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
class RunConfigServiceKeyTest(test.TestCase):
def test_arbitrary_key_value_pairs(self):
tf_config = {
'service': {
'key1': [1, 2],
'key2': {'a': 3, 'b': 4},
'key3': 789,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(tf_config['service'], run_config.service)
def test_missing_service_key(self):
tf_config = {
'model_dir': '/tmp/123',
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertIsNone(run_config.service)
def test_fail_with_non_dict(self):
tf_config = {
'service': 789,
}
with self.assertRaisesRegexp(TypeError, _INVALID_SERVICE_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
if __name__ == '__main__':
test.main()
|
|
import gevent
import sys
import pickle
from JumpScale import j
from gevent.server import StreamServer
#from MessageRouter import MessageRouter
class TCPSession():
"""
is the baseclass to do socket handling for the worker or manhole
"""
def __init__(self, addr, port, socket):
self.addr = addr
self.port = port
self.socket = socket
self.socket.timeout = 10
self.active = True
self.whoami = ""
self.type = None
self.sessionnr = 0
self.dataleftover = ""
def ready(self):
print(("%s active %s : %s" % (self.type, self.whoami, self.sessionnr)))
def read(self):
print("read classic 4k block & wait")
return self.socket.recv(4096)
def kill(self):
self.fileobj.close()
self.socket.close()
print(("kill\n%s" % self))
def write(self, msg):
try:
self.socket.sendall(msg)
except Exception as e:
print("failed to send")
print(e)
def sendread(self, msg):
self.write(msg)
return self.read()
def __repr__(self):
s = "type:%s " % self.type
s += "nr:%s ip:%s port:%s " % (self.sessionnr, self.addr, self.port)
s += "active:%s " % self.active
s += "whoami:%s\n" % self.whoami
return s
__str__ = __repr__
class WorkerSession(TCPSession):
def __init__(self, addr, port, socket):
"""
"""
Session.__init__(self, addr, port, socket)
dtype, length, epoch, gid, nid, pid, cmd = self.read(False)
tags = j.data.tags.getObject(cmd)
self.key = tags.tagGet("key")
self.whoami = tags.tagGet("whoami")
if str(tags.tagGet("type")) == "executor":
self.executor = True # means is executing commands for us
else:
self.executor = False # means this channel will be used as client connection from worker out
self.write("OK")
if out:
pass
else:
self.run()
def run(self):
# keeps on checking for incoming messages
# try:
while True:
print("loopstart")
dtype, length, epoch, gid, nid, pid, data = self.read()
print("loopend")
j.portal.server.active.messagerouter.queue(gid, nid, pid, data)
# except Exception,e:
# print("read error in appserver6 workergreenlet %s\n" % self.sessionnr
# print(e
# gevent.sleep(1)
# self.kill()
def read(self, rpc=True):
"""
@return type,length,epoch,gid,nid,pid,data
"""
data = self.dataleftover
while len(data) < 5:
data += self.socket.recv(4096)
# length, we are ok
size = j.core.messagehandler.getMessageSize(data)
print(("rpc:%s size:%s" % (rpc, size)))
while len(data) < size:
print((1))
data += self.socket.recv(4096)
print((2))
dataOut = data[0:size]
self.dataleftover = data[size:]
if rpc:
dtype, length, epoch, gid, nid, pid, data = j.core.messagehandler.unPackMessage(dataOut)
if dtype == 11:
data = pickle.loads(data)
else:
dtype, length, epoch, gid, nid, pid, data = j.core.messagehandler.unPackMessage(data)
print(("data:%s" % data))
return dtype, length, epoch, gid, nid, pid, data
def ping(self):
result = self.sendmessage("ping")
if result != "ping":
return False
else:
return True
class TCPSessionLog(TCPSession):
def __init__(self, addr, port, socket):
Session.__init__(self, addr, port, socket)
self.type = "manhole"
def run(self):
while True:
line = self.read()
def process(self, line):
from JumpScale.core.Shell import ipshell
print("DEBUG NOW logger on tcpsession")
ipshell()
class ManholeSession(TCPSession):
def __init__(self, addr, port, socket):
TCPSession.__init__(self, addr, port, socket)
self.type = "manhole"
self.cmds = j.portal.server.active.tcpservercmds
self.socket.settimeout(None)
def run(self):
while True:
lines = self.read()
if lines == "":
continue
lines = lines.split("\n")
for line in lines:
result = self.process(line)
if result != "" and result is not None:
if result[-1] != "\n":
result += "\n"
# print("***%s*END*"%result
self.write(result)
def read(self):
return self.socket.recv(4096)
def process(self, line):
# print(line
cmd = line.strip()
result = """\
commands:
- ping
- killall
- list
"""
if cmd.find(" ") != -1:
args = " ".join(cmd.split(" ")[1:])
cmd = cmd.split(" ")[0]
else:
args = ""
if cmd == "":
return ""
if cmd in self.cmds:
try:
cmdgreenlet = self.cmds[cmd](cmd, args)
except Exception as e:
return "**ERROR** %s" % (str(e).replace("\n", "--"))
cmdgreenlet.start()
cmdgreenlet.waiter.wait()
result = cmdgreenlet.result
if result is not None:
result = str(result)
return result
return ""
if cmd == "ipshell":
from JumpScale.core.Shell import ipshellDebug, ipshell
print("DEBUG NOW manhole local")
ipshell()
# if cmd.find("exec")==0:
# cmd=cmd[5:]
# self.sockets
if cmd.find("ping") == 0:
cmd = cmd[5:]
session = self.getsession(int(cmd))
if session.ping():
self.write("OK")
else:
self.write("Ping failed to %s\n" % session)
if cmd.find("killall") == 0:
return self.killallsessions()
if cmd.find("list") == 0:
return self.listsessions()
for key in list(self.cmds.keys()):
result += "- %s\n" % key
return result
def getsession(self, id):
if id not in j.portal.server.active.sessions:
self.send("Could not find session with id %s" % id)
return False
return j.portal.server.active.sessions[id]
def killallsessions(self):
result = ""
for key in list(j.portal.server.active.sessions.keys()):
session = j.portal.server.active.sessions[key]
if session.type != "manhole":
session.active = False
session.kill()
result += "killed %s\n" % session.whoami
result += "Kill DONE\n"
return result
def listsessions(self):
result = ""
for key in list(j.portal.server.active.sessions.keys()):
session = j.portal.server.active.sessions[key]
result += "%s" % session
return result
# TODO: don't understand this client, some weird test maybe, can never work
class TCPClient():
def __init__(self, addr='127.0.0.1', key="1234"):
self.addr = addr
self.key = key
self.init()
self.dataleftover = ""
def init(self):
for t in range(100000):
if self._init() == True:
return True
raise RuntimeError("Connection timed out to master server %s" % addr)
def _init(self):
print(("try to connect to %s:%s" % (self.addr, 6000)))
self.socketout = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socketin = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.sender.settimeout(2)
dataout = 'type:executor whoami:%s key:%s' % (j.application.whoAmI, self.key)
msgout = j.core.messagehandler.data2Message(20, dataout)
datain = 'type:client whoami:%s key:%s' % (j.application.whoAmI, self.key)
msgin = j.core.messagehandler.data2Message(20, datain)
try:
self.socketout.connect((self.addr, 6000))
self.socketin.connect((self.addr, 6000))
# try to init the out channel
self.socketout.sendall(msgout)
if self.socketout.recv(2) != "OK":
raise RuntimeError("bad result, could not connect the out channel")
# try to init the in channel
self.socketin.sendall(msgin)
if self.socketin.recv(2) != "OK":
raise RuntimeError("bad result")
except Exception as e:
print(e)
try:
self.socketout.close()
self.socketin.close()
except:
pass
time.sleep(5)
return False
print("connected")
return True
def sendcmd(self, appName, actorName, instance, method, params, timeout=0, sync=True):
msg = j.core.messagehandler.getRPCMessage(appName, actorName, instance, method, params, timeout, sync)
self.send(msg)
print("sent")
return self.read()
def read(self):
"""
@return type,length,epoch,gid,nid,pid,data
"""
data = self.dataleftover
while len(data) < 5:
data += self.socket.recv(4096)
# length, we are ok
size = j.core.messagehandler.getMessageSize(data)
while len(data) < size:
data += self.socket.recv(4096)
dataOut = data[0:size]
self.dataleftover = data[size:]
dtype, length, epoch, gid, nid, pid, data = j.core.messagehandler.unPackMessage(dataOut)
if dtype == 11:
data = pickle.loads(data)
return type, length, epoch, gid, nid, pid, data
def process(self, line):
print(line)
def send(self, message, maxtry=10):
run = 1
while run < maxtry:
try:
self.socket.sendall(message)
return True
except Exception as e:
self._init()
raise RuntimeError("could not send message, could not reach after %s times" % maxtry)
|
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / norm_const / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class AllocatorMemoryException(Exception):
'''The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
'''
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator(object):
'''Buffer space allocation implementation.'''
def __init__(self, capacity):
'''Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
'''
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = []
self.sizes = []
def set_capacity(self, size):
'''Resize the maximum buffer size.
The capacity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
'''
assert size > self.capacity
self.capacity = size
def alloc(self, size):
'''Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
'''
assert size > 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i+1]
del self.sizes[i+1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
'''Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
'''
assert size > 0 and new_size > 0
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
# Find which block it lives in
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
if not (p >= 0 and size <= alloc_size - p):
print zip(self.starts, self.sizes)
print start, size, new_size
print p, alloc_start, alloc_size
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if size == alloc_size - p:
# Region is at end of block. Find how much free space is after
# it.
is_final_block = i == len(self.starts) - 1
if not is_final_block:
free_size = self.starts[i + 1] - (start + size)
else:
free_size = self.capacity - (start + size)
# TODO If region is an entire block being an island in free space,
# can possibly extend in both directions.
if free_size == new_size - size and not is_final_block:
# Merge block with next (region is expanded in place to
# exactly fill the free space)
self.sizes[i] += free_size + self.sizes[i + 1]
del self.starts[i + 1]
del self.sizes[i + 1]
return start
elif free_size > new_size - size:
# Expand region in place
self.sizes[i] += new_size - size
return start
# The block must be repositioned. Dealloc then alloc.
# But don't do this! If alloc fails, we've already silently dealloc'd
# the original block.
# self.dealloc(start, size)
# return self.alloc(new_size)
# It must be alloc'd first. We're not missing an optimisation
# here, because if freeing the block would've allowed for the block to
# be placed in the resulting free space, one of the above in-place
# checks would've found it.
result = self.alloc(new_size)
self.dealloc(start, size)
return result
def dealloc(self, start, size):
'''Free a region of the buffer.
:Parameters:
`start` : int
Starting index of the region.
`size` : int
Size of the region.
'''
assert size > 0
assert self.starts
# Find which block needs to be split
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
# Assert we left via the break
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if p == 0 and size == alloc_size:
# Remove entire block
del self.starts[i]
del self.sizes[i]
elif p == 0:
# Truncate beginning of block
self.starts[i] += size
self.sizes[i] -= size
elif size == alloc_size - p:
# Truncate end of block
self.sizes[i] -= size
else:
# Reduce size of left side, insert block at right side
# $ = dealloc'd block, # = alloc'd region from same block
#
# <------8------>
# <-5-><-6-><-7->
# 1 2 3 4
# #####$$$$$#####
#
# 1 = alloc_start
# 2 = start
# 3 = start + size
# 4 = alloc_start + alloc_size
# 5 = start - alloc_start = p
# 6 = size
# 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
# 8 = alloc_size
#
self.sizes[i] = p
self.starts.insert(i + 1, start + size)
self.sizes.insert(i + 1, alloc_size - (p + size))
def get_allocated_regions(self):
'''Get a list of (aggregate) allocated regions.
The result of this method is ``(starts, sizes)``, where ``starts`` is
a list of starting indices of the regions and ``sizes`` their
corresponding lengths.
:rtype: (list, list)
'''
# return (starts, sizes); len(starts) == len(sizes)
return (self.starts, self.sizes)
def get_fragmented_free_size(self):
'''Returns the amount of space unused, not including the final
free block.
:rtype: int
'''
if not self.starts:
return 0
# Variation of search for free block.
total_free = 0
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
total_free += alloc_start - free_start
free_start = alloc_start + alloc_size
return total_free
def get_free_size(self):
'''Return the amount of space unused.
:rtype: int
'''
if not self.starts:
return self.capacity
free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
return self.get_fragmented_free_size() + free_end
def get_usage(self):
'''Return fraction of capacity currently allocated.
:rtype: float
'''
return 1. - self.get_free_size() / float(self.capacity)
def get_fragmentation(self):
'''Return fraction of free space that is not expandable.
:rtype: float
'''
free_size = self.get_free_size()
if free_size == 0:
return 0.
return self.get_fragmented_free_size() / float(self.get_free_size())
def _is_empty(self):
return not self.starts
def __str__(self):
return 'allocs=' + repr(zip(self.starts, self.sizes))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
|
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of Prioritized Experience Replay (PER).
This implementation is based on the paper "Prioritized Experience Replay"
by Tom Schaul et al. (2015). Many thanks to Tom Schaul, John Quan, and Matteo
Hessel for providing useful pointers on the algorithm and its implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dopamine.replay_memory import circular_replay_buffer
from dopamine.replay_memory import sum_tree
from dopamine.replay_memory.circular_replay_buffer import ReplayElement
import numpy as np
import tensorflow as tf
import gin.tf
@gin.configurable
class OutOfGraphPrioritizedReplayBuffer(
circular_replay_buffer.OutOfGraphReplayBuffer):
"""An out-of-graph Replay Buffer for Prioritized Experience Replay.
See circular_replay_buffer.py for details.
"""
def __init__(self,
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon=1,
gamma=0.99,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes OutOfGraphPrioritizedReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
"""
super(OutOfGraphPrioritizedReplayBuffer, self).__init__(
observation_shape=observation_shape,
stack_size=stack_size,
replay_capacity=replay_capacity,
batch_size=batch_size,
update_horizon=update_horizon,
gamma=gamma,
max_sample_attempts=max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
terminal_dtype=terminal_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
self.sum_tree = sum_tree.SumTree(replay_capacity)
def get_add_args_signature(self):
"""The signature of the add function.
The signature is the same as the one for OutOfGraphReplayBuffer, with an
added priority.
Returns:
list of ReplayElements defining the type of the argument signature needed
by the add function.
"""
parent_add_signature = super(OutOfGraphPrioritizedReplayBuffer,
self).get_add_args_signature()
add_signature = parent_add_signature + [
ReplayElement('priority', (), np.float32)
]
return add_signature
def _add(self, *args):
"""Internal add method to add to the underlying memory arrays.
The arguments need to match add_arg_signature.
If priority is none, it is set to the maximum priority ever seen.
Args:
*args: All the elements in a transition.
"""
self._check_args_length(*args)
# Use Schaul et al.'s (2015) scheme of setting the priority of new elements
# to the maximum priority so far.
# Picks out 'priority' from arguments and adds it to the sum_tree.
transition = {}
for i, element in enumerate(self.get_add_args_signature()):
if element.name == 'priority':
priority = args[i]
else:
transition[element.name] = args[i]
self.sum_tree.set(self.cursor(), priority)
super(OutOfGraphPrioritizedReplayBuffer, self)._add_transition(transition)
def sample_index_batch(self, batch_size):
"""Returns a batch of valid indices sampled as in Schaul et al. (2015).
Args:
batch_size: int, number of indices returned.
Returns:
list of ints, a batch of valid indices sampled uniformly.
Raises:
Exception: If the batch was not constructed after maximum number of tries.
"""
# Sample stratified indices. Some of them might be invalid.
indices = self.sum_tree.stratified_sample(batch_size)
allowed_attempts = self._max_sample_attempts
for i in range(len(indices)):
if not self.is_valid_transition(indices[i]):
if allowed_attempts == 0:
raise RuntimeError(
'Max sample attempts: Tried {} times but only sampled {}'
' valid indices. Batch size is {}'.
format(self._max_sample_attempts, i, batch_size))
index = indices[i]
while not self.is_valid_transition(index) and allowed_attempts > 0:
# If index i is not valid keep sampling others. Note that this
# is not stratified.
index = self.sum_tree.sample()
allowed_attempts -= 1
indices[i] = index
return indices
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions with extra storage and the priorities.
The extra storage are defined through the extra_storage_types constructor
argument.
When the transition is terminal next_state_batch has undefined contents.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or list of ints, the indices of every transition in the
batch. If None, sample the indices uniformly.
Returns:
transition_batch: tuple of np.arrays with the shape and type as in
get_transition_elements().
"""
transition = (super(OutOfGraphPrioritizedReplayBuffer, self).
sample_transition_batch(batch_size, indices))
transition_elements = self.get_transition_elements(batch_size)
transition_names = [e.name for e in transition_elements]
probabilities_index = transition_names.index('sampling_probabilities')
indices_index = transition_names.index('indices')
indices = transition[indices_index]
# The parent returned an empty array for the probabilities. Fill it with the
# contents of the sum tree.
transition[probabilities_index][:] = self.get_priority(indices)
return transition
def set_priority(self, indices, priorities):
"""Sets the priority of the given elements according to Schaul et al.
Args:
indices: np.array with dtype int32, of indices in range
[0, replay_capacity).
priorities: float, the corresponding priorities.
"""
assert indices.dtype == np.int32, ('Indices must be integers, '
'given: {}'.format(indices.dtype))
# Convert JAX arrays to NumPy arrays first, since it is faster to iterate
# over the entirety of a NumPy array than a JAX array.
priorities = np.asarray(priorities)
for index, priority in zip(indices, priorities):
self.sum_tree.set(index, priority)
def get_priority(self, indices):
"""Fetches the priorities correspond to a batch of memory indices.
For any memory location not yet used, the corresponding priority is 0.
Args:
indices: np.array with dtype int32, of indices in range
[0, replay_capacity).
Returns:
priorities: float, the corresponding priorities.
"""
assert indices.shape, 'Indices must be an array.'
assert indices.dtype == np.int32, ('Indices must be int32s, '
'given: {}'.format(indices.dtype))
batch_size = len(indices)
priority_batch = np.empty((batch_size), dtype=np.float32)
for i, memory_index in enumerate(indices):
priority_batch[i] = self.sum_tree.get(memory_index)
return priority_batch
def get_transition_elements(self, batch_size=None):
"""Returns a 'type signature' for sample_transition_batch.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
Returns:
signature: A namedtuple describing the method's return type signature.
"""
parent_transition_type = (
super(OutOfGraphPrioritizedReplayBuffer,
self).get_transition_elements(batch_size))
probablilities_type = [
ReplayElement('sampling_probabilities', (batch_size,), np.float32)
]
return parent_transition_type + probablilities_type
@gin.configurable(
denylist=['observation_shape', 'stack_size', 'update_horizon', 'gamma'])
class WrappedPrioritizedReplayBuffer(
circular_replay_buffer.WrappedReplayBuffer):
"""Wrapper of OutOfGraphPrioritizedReplayBuffer with in-graph sampling.
Usage:
* To add a transition: Call the add function.
* To sample a batch: Query any of the tensors in the transition dictionary.
Every sess.run that requires any of these tensors will
sample a new transition.
"""
def __init__(self,
observation_shape,
stack_size,
use_staging=False,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedPrioritizedReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
use_staging: bool, when True it would use a staging area to prefetch
the next sampling batch.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
wrapped_memory: The 'inner' memory data structure. If None, use the
default prioritized replay.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
Raises:
ValueError: If update_horizon is not positive.
ValueError: If discount factor is not in [0, 1].
"""
if wrapped_memory is None:
wrapped_memory = OutOfGraphPrioritizedReplayBuffer(
observation_shape, stack_size, replay_capacity, batch_size,
update_horizon, gamma, max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
super(WrappedPrioritizedReplayBuffer, self).__init__(
observation_shape,
stack_size,
use_staging,
replay_capacity,
batch_size,
update_horizon,
gamma,
wrapped_memory=wrapped_memory,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
terminal_dtype=terminal_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
def tf_set_priority(self, indices, priorities):
"""Sets the priorities for the given indices.
Args:
indices: tf.Tensor with dtype int32 and shape [n].
priorities: tf.Tensor with dtype float and shape [n].
Returns:
A tf op setting the priorities for prioritized sampling.
"""
return tf.numpy_function(
self.memory.set_priority, [indices, priorities], [],
name='prioritized_replay_set_priority_py_func')
def tf_get_priority(self, indices):
"""Gets the priorities for the given indices.
Args:
indices: tf.Tensor with dtype int32 and shape [n].
Returns:
priorities: tf.Tensor with dtype float and shape [n], the priorities at
the indices.
"""
return tf.numpy_function(
self.memory.get_priority, [indices],
tf.float32,
name='prioritized_replay_get_priority_py_func')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState",
("cell_state", "log_probs", "finished", "lengths",
"accumulated_attention_probs"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
`output_time_major` is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape.dims[0].value * multiplier
if t.shape.dims[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(tiled,
array_ops.concat(
([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape.dims[0].value or array_ops.shape(parent_ids)[0]
batch_size = parent_ids.shape.dims[1].value or array_ops.shape(parent_ids)[1]
beam_width = parent_ids.shape.dims[2].value or array_ops.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = array_ops.expand_dims(
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
in_bound_steps = array_ops.transpose(
array_ops.sequence_mask(sequence_length, maxlen=max_time),
perm=[2, 0, 1])
sorted_beam_ids = array_ops.where(
in_bound_steps, x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = array_ops.tile(array_ops.reshape(
math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = array_ops.shape(gather_from)
gather_from = array_ops.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = array_ops.gather_nd(gather_from, indices)
ordered = array_ops.reshape(ordered, final_shape)
return ordered
def _check_maybe(t):
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1].
"""
reshaped_shape = tensor_shape.TensorShape([batch_size, beam_width, None])
if (batch_size is not None and shape.dims[0].value is not None
and (shape[0] != batch_size * beam_width
or (shape.ndims >= 2 and shape.dims[1].value is not None
and (shape[0] != batch_size or shape[1] != beam_width)))):
tf_logging.warn("TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search."
% (reshaped_shape, shape))
return False
return True
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t.name))
rank = t.shape.ndims
shape = array_ops.shape(t)
if rank == 2:
condition = math_ops.equal(shape[1], batch_size * beam_width)
else:
condition = math_ops.logical_or(
math_ops.equal(shape[1], batch_size * beam_width),
math_ops.logical_and(
math_ops.equal(shape[1], batch_size),
math_ops.equal(shape[2], beam_width)))
return control_flow_ops.Assert(condition, [error_message])
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Meanwhile, with `AttentionWrapper`, coverage penalty is suggested to use
when computing scores(https://arxiv.org/pdf/1609.08144.pdf). It encourages
the translation to cover all inputs.
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
coverage_penalty_weight=0.0,
reorder_tensor_arrays=True):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell) # pylint: disable=protected-access
if (output_layer is not None and
not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
self._reorder_tensor_arrays = reorder_tensor_arrays
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._coverage_penalty_weight = coverage_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.one_hot(
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
dtype = nest.flatten(self._initial_cell_state)[0].dtype
log_probs = array_ops.one_hot( # shape(batch_sz, beam_sz)
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=ops.convert_to_tensor(0.0, dtype=dtype),
off_value=ops.convert_to_tensor(-np.Inf, dtype=dtype),
dtype=dtype)
init_attention_probs = get_attention_probs(
self._initial_cell_state, self._coverage_penalty_weight)
if init_attention_probs is None:
init_attention_probs = ()
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64),
accumulated_attention_probs=init_attention_probs)
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(final_state.lengths, axis=1))
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
if self._reorder_tensor_arrays:
final_state = final_state._replace(cell_state=nest.map_structure(
lambda t: self._maybe_sort_array_beams(
t, outputs.parent_ids, final_state.lengths),
final_state.cell_state))
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None
if static_batch_size is None else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size * self._beam_width], t_shape[2:]),
0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size, self._beam_width], t_shape[1:]),
0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?" %
(reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
"""Maybe sorts beams within a `TensorArray`.
Args:
t: A `TensorArray` of size `max_time` that contains `Tensor`s of shape
`[batch_size, beam_width, s]` or `[batch_size * beam_width, s]` where
`s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `TensorArray` where beams are sorted in each `Tensor` or `t` itself if
it is not a `TensorArray` or does not meet shape requirements.
"""
if not isinstance(t, tensor_array_ops.TensorArray):
return t
# pylint: disable=protected-access
if (not t._infer_shape or not t._element_shape
or t._element_shape[0].ndims is None
or t._element_shape[0].ndims < 1):
shape = (
t._element_shape[0] if t._infer_shape and t._element_shape
else tensor_shape.TensorShape(None))
tf_logging.warn("The TensorArray %s in the cell state is not amenable to "
"sorting based on the beam search result. For a "
"TensorArray to be sorted, its elements shape must be "
"defined and have at least a rank of 1, but saw shape: %s"
% (t.handle.name, shape))
return t
shape = t._element_shape[0]
# pylint: enable=protected-access
if not _check_static_batch_beam_maybe(
shape, tensor_util.constant_value(self._batch_size), self._beam_width):
return t
t = t.stack()
with ops.control_dependencies(
[_check_batch_beam(t, self._batch_size, self._beam_width)]):
return gather_tree_from_array(t, parent_ids, sequence_length)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
coverage_penalty_weight = self._coverage_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state,
self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight,
coverage_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
not_finished = math_ops.logical_not(previously_finished)
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape.dims[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = math_ops.to_int64(not_finished)
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the accumulated attention probabilities if coverage penalty is
# enabled.
accumulated_attention_probs = None
attention_probs = get_attention_probs(
next_cell_state, coverage_penalty_weight)
if attention_probs is not None:
attention_probs *= array_ops.expand_dims(math_ops.to_float(not_finished), 2)
accumulated_attention_probs = (
beam_state.accumulated_attention_probs + attention_probs)
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
finished=previously_finished,
accumulated_attention_probs=accumulated_attention_probs)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_flat = array_ops.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width")
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = math_ops.to_int32(raw_next_word_ids)
next_beam_ids = math_ops.to_int32(
word_indices / vocab_size, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(
previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.to_int64(math_ops.logical_not(previously_finished))
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
next_accumulated_attention_probs = ()
if accumulated_attention_probs is not None:
next_accumulated_attention_probs = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=accumulated_attention_probs,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1],
name="next_accumulated_attention_probs")
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished,
accumulated_attention_probs=next_accumulated_attention_probs)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def get_attention_probs(next_cell_state, coverage_penalty_weight):
"""Get attention probabilities from the cell state.
Args:
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
The attention probabilities with shape `[batch_size, beam_width, max_time]`
if coverage penalty is enabled. Otherwise, returns None.
Raises:
ValueError: If no cell is attentional but coverage penalty is enabled.
"""
if coverage_penalty_weight == 0.0:
return None
# Attention probabilities of each attention layer. Each with shape
# `[batch_size, beam_width, max_time]`.
probs_per_attn_layer = []
if isinstance(next_cell_state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer = [attention_probs_from_attn_state(next_cell_state)]
elif isinstance(next_cell_state, tuple):
for state in next_cell_state:
if isinstance(state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer.append(attention_probs_from_attn_state(state))
if not probs_per_attn_layer:
raise ValueError(
"coverage_penalty_weight must be 0.0 if no cell is attentional.")
if len(probs_per_attn_layer) == 1:
attention_probs = probs_per_attn_layer[0]
else:
# Calculate the average attention probabilities from all attention layers.
attention_probs = [
array_ops.expand_dims(prob, -1) for prob in probs_per_attn_layer]
attention_probs = array_ops.concat(attention_probs, -1)
attention_probs = math_ops.reduce_mean(attention_probs, -1)
return attention_probs
def _get_scores(log_probs, sequence_lengths, length_penalty_weight,
coverage_penalty_weight, finished, accumulated_attention_probs):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
accumulated_attention_probs: Accumulated attention probabilities up to the
current time step, with shape `[batch_size, beam_width, max_time]` if
coverage_penalty_weight is not 0.0.
Returns:
The scores normalized by the length_penalty and coverage_penalty.
Raises:
ValueError: accumulated_attention_probs is None when coverage penalty is
enabled.
"""
length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
scores = log_probs / length_penalty_
coverage_penalty_weight = ops.convert_to_tensor(
coverage_penalty_weight, name="coverage_penalty_weight")
if coverage_penalty_weight.shape.ndims != 0:
raise ValueError("coverage_penalty_weight should be a scalar, "
"but saw shape: %s" % coverage_penalty_weight.shape)
if tensor_util.constant_value(coverage_penalty_weight) == 0.0:
return scores
if accumulated_attention_probs is None:
raise ValueError(
"accumulated_attention_probs can be None only if coverage penalty is "
"disabled.")
# Add source sequence length mask before computing coverage penalty.
accumulated_attention_probs = array_ops.where(
math_ops.equal(accumulated_attention_probs, 0.0),
array_ops.ones_like(accumulated_attention_probs),
accumulated_attention_probs)
# coverage penalty =
# sum over `max_time` {log(min(accumulated_attention_probs, 1.0))}
coverage_penalty = math_ops.reduce_sum(
math_ops.log(math_ops.minimum(accumulated_attention_probs, 1.0)), 2)
# Apply coverage penalty to finished predictions.
coverage_penalty *= math_ops.to_float(finished)
weighted_coverage_penalty = coverage_penalty * coverage_penalty_weight
# Reshape from [batch_size, beam_width] to [batch_size, beam_width, 1]
weighted_coverage_penalty = array_ops.expand_dims(
weighted_coverage_penalty, 2)
return scores + weighted_coverage_penalty
def attention_probs_from_attn_state(attention_state):
"""Calculates the average attention probabilities.
Args:
attention_state: An instance of `AttentionWrapperState`.
Returns:
The attention probabilities in the given AttentionWrapperState.
If there're multiple attention mechanisms, return the average value from
all attention mechanisms.
"""
# Attention probabilities over time steps, with shape
# `[batch_size, beam_width, max_time]`.
attention_probs = attention_state.alignments
if isinstance(attention_probs, tuple):
attention_probs = [
array_ops.expand_dims(prob, -1) for prob in attention_probs]
attention_probs = array_ops.concat(attention_probs, -1)
attention_probs = math_ops.reduce_mean(attention_probs, -1)
return attention_probs
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=ops.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if isinstance(gather_from, tensor_array_ops.TensorArray):
return gather_from
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (
tensor_shape.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""I totally stole most of this from melange, thx guys!!!"""
import re
from oslo_concurrency import processutils
from oslo_log import log as logging
from trove.common import base_exception as openstack_exception
from trove.common.i18n import _
ClientConnectionError = openstack_exception.ClientConnectionError
ProcessExecutionError = processutils.ProcessExecutionError
DatabaseMigrationError = openstack_exception.DatabaseMigrationError
LOG = logging.getLogger(__name__)
wrap_exception = openstack_exception.wrap_exception
def safe_fmt_string(text):
return re.sub(r'%([0-9]+)', r'\1', text)
class TroveError(openstack_exception.OpenstackException):
"""Base exception that all custom trove app exceptions inherit from."""
internal_message = None
def __init__(self, message=None, **kwargs):
if message is not None:
self.message = message
if self.internal_message is not None:
try:
LOG.error(safe_fmt_string(self.internal_message) % kwargs)
except Exception:
LOG.error(self.internal_message)
self.message = safe_fmt_string(self.message)
super(TroveError, self).__init__(**kwargs)
class DBConstraintError(TroveError):
message = _("Failed to save %(model_name)s because: %(error)s.")
class InvalidRPCConnectionReuse(TroveError):
message = _("Invalid RPC Connection Reuse.")
class NotFound(TroveError):
message = _("Resource %(uuid)s cannot be found.")
class CapabilityNotFound(NotFound):
message = _("Capability '%(capability)s' cannot be found.")
class CapabilityDisabled(TroveError):
message = _("Capability '%(capability)s' is disabled.")
class FlavorNotFound(TroveError):
message = _("Resource %(uuid)s cannot be found.")
class UserNotFound(NotFound):
message = _("User %(uuid)s cannot be found on the instance.")
class DatabaseNotFound(NotFound):
message = _("Database %(uuid)s cannot be found on the instance.")
class ComputeInstanceNotFound(NotFound):
internal_message = _("Cannot find compute instance %(server_id)s for "
"instance %(instance_id)s.")
message = _("Resource %(instance_id)s can not be retrieved.")
class DnsRecordNotFound(NotFound):
message = _("DnsRecord with name= %(name)s not found.")
class DatastoreNotFound(NotFound):
message = _("Datastore '%(datastore)s' cannot be found.")
class DatastoreVersionNotFound(NotFound):
message = _("Datastore version '%(version)s' cannot be found.")
class DatastoresNotFound(NotFound):
message = _("Datastores cannot be found.")
class DatastoreFlavorAssociationNotFound(NotFound):
message = _("Flavor %(flavor_id)s is not supported for datastore "
"%(datastore)s version %(datastore_version)s")
class DatastoreFlavorAssociationAlreadyExists(TroveError):
message = _("Flavor %(flavor_id)s is already associated with "
"datastore %(datastore)s version %(datastore_version)s")
class DatastoreNoVersion(TroveError):
message = _("Datastore '%(datastore)s' has no version '%(version)s'.")
class DatastoreVersionInactive(TroveError):
message = _("Datastore version '%(version)s' is not active.")
class DatastoreDefaultDatastoreNotFound(TroveError):
message = _("Please specify datastore. Default datastore "
"'%(datastore)s' cannot be found.")
class DatastoreDefaultDatastoreNotDefined(TroveError):
message = _("Please specify datastore. No default datastore "
"is defined.")
class DatastoreDefaultVersionNotFound(TroveError):
message = _("Default version for datastore '%(datastore)s' not found.")
class InvalidDatastoreManager(TroveError):
message = _("Datastore manager %(datastore_manager)s cannot be found.")
class DatastoreOperationNotSupported(TroveError):
message = _("The '%(operation)s' operation is not supported for "
"the '%(datastore)s' datastore.")
class NoUniqueMatch(TroveError):
message = _("Multiple matches found for '%(name)s', "
"use an UUID to be more specific.")
class OverLimit(TroveError):
internal_message = _("The server rejected the request due to its size or "
"rate.")
class QuotaExceeded(TroveError):
message = _("Quota exceeded for resources: %(overs)s.")
class VolumeQuotaExceeded(QuotaExceeded):
message = _("Instance volume quota exceeded.")
class GuestError(TroveError):
message = _("An error occurred communicating with the guest: "
"%(original_message)s.")
class GuestTimeout(TroveError):
message = _("Timeout trying to connect to the Guest Agent.")
class BadRequest(TroveError):
message = _("The server could not comply with the request since it is "
"either malformed or otherwise incorrect.")
class MissingKey(BadRequest):
message = _("Required element/key - %(key)s was not specified.")
class DatabaseAlreadyExists(BadRequest):
message = _('A database with the name "%(name)s" already exists.')
class UserAlreadyExists(BadRequest):
message = _('A user with the name "%(name)s" already exists.')
class InstanceAssignedToConfiguration(BadRequest):
message = _('A configuration group cannot be deleted if it is '
'associated with one or more non-terminated instances. '
'Detach the configuration group from all non-terminated '
'instances and please try again.')
class UnprocessableEntity(TroveError):
message = _("Unable to process the contained request.")
class ConfigurationNotSupported(UnprocessableEntity):
message = _("Configuration groups not supported by the datastore.")
class CannotResizeToSameSize(TroveError):
message = _("No change was requested in the size of the instance.")
class VolumeAttachmentsNotFound(NotFound):
message = _("Cannot find the volumes attached to compute "
"instance %(server_id)s.")
class VolumeCreationFailure(TroveError):
message = _("Failed to create a volume in Nova.")
class VolumeSizeNotSpecified(BadRequest):
message = _("Volume size was not specified.")
class LocalStorageNotSpecified(BadRequest):
message = _("Local storage not specified in flavor ID: %(flavor)s.")
class LocalStorageNotSupported(TroveError):
message = _("Local storage support is not enabled.")
class VolumeNotSupported(TroveError):
message = _("Volume support is not enabled.")
class ReplicationNotSupported(TroveError):
message = _("Replication is not supported for "
"the '%(datastore)s' datastore.")
class ReplicationSlaveAttachError(TroveError):
message = _("Exception encountered attaching slave to new replica source.")
class TaskManagerError(TroveError):
message = _("An error occurred communicating with the task manager: "
"%(original_message)s.")
class BadValue(TroveError):
message = _("Value could not be converted: %(msg)s.")
class PollTimeOut(TroveError):
message = _("Polling request timed out.")
class Forbidden(TroveError):
message = _("User does not have admin privileges.")
class PolicyNotAuthorized(Forbidden):
message = _("Policy doesn't allow %(action)s to be performed.")
class InvalidModelError(TroveError):
message = _("The following values are invalid: %(errors)s.")
class ModelNotFoundError(NotFound):
message = _("Not Found.")
class UpdateGuestError(TroveError):
message = _("Failed to update instances.")
class ConfigNotFound(NotFound):
message = _("Config file not found.")
class PasteAppNotFound(NotFound):
message = _("Paste app not found.")
class QuotaNotFound(NotFound):
message = _("Quota could not be found.")
class TenantQuotaNotFound(QuotaNotFound):
message = _("Quota for tenant %(tenant_id)s could not be found.")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class BackupUploadError(TroveError):
message = _("Unable to upload Backup to swift.")
class BackupDownloadError(TroveError):
message = _("Unable to download Backup from swift")
class BackupCreationError(TroveError):
message = _("Unable to create Backup.")
class BackupUpdateError(TroveError):
message = _("Unable to update Backup table in database.")
class SecurityGroupCreationError(TroveError):
message = _("Failed to create Security Group.")
class SecurityGroupDeletionError(TroveError):
message = _("Failed to delete Security Group.")
class SecurityGroupRuleCreationError(TroveError):
message = _("Failed to create Security Group Rule.")
class SecurityGroupRuleDeletionError(TroveError):
message = _("Failed to delete Security Group Rule.")
class MalformedSecurityGroupRuleError(TroveError):
message = _("Error creating security group rules."
" Malformed port(s). Port must be an integer."
" FromPort = %(from)s greater than ToPort = %(to)s.")
class BackupNotCompleteError(TroveError):
message = _("Unable to create instance because backup %(backup_id)s is "
"not completed. Actual state: %(state)s.")
class BackupFileNotFound(NotFound):
message = _("Backup file in %(location)s was not found in the object "
"storage.")
class BackupDatastoreMismatchError(TroveError):
message = _("The datastore from which the backup was taken, "
"%(datastore1)s, does not match the destination"
" datastore of %(datastore2)s.")
class SwiftAuthError(TroveError):
message = _("Swift account not accessible for tenant %(tenant_id)s.")
class SwiftNotFound(TroveError):
message = _("Swift is disabled for tenant %(tenant_id)s.")
class DatabaseForUserNotInDatabaseListError(TroveError):
message = _("The request indicates that user %(user)s should have access "
"to database %(database)s, but database %(database)s is not "
"included in the initial databases list.")
class DatabaseInitialDatabaseDuplicateError(TroveError):
message = _("Two or more databases share the same name in the initial "
"databases list. Please correct the names or remove the "
"duplicate entries.")
class DatabaseInitialUserDuplicateError(TroveError):
message = _("Two or more users share the same name and host in the "
"initial users list. Please correct the names or remove the "
"duplicate entries.")
class RestoreBackupIntegrityError(TroveError):
message = _("Current Swift object checksum does not match original "
"checksum for backup %(backup_id)s.")
class ConfigKeyNotFound(NotFound):
message = _("%(key)s is not a supported configuration parameter.")
class NoConfigParserFound(NotFound):
message = _("No configuration parser found for datastore "
"%(datastore_manager)s.")
class ConfigurationDatastoreNotMatchInstance(TroveError):
message = _("Datastore Version on Configuration "
"%(config_datastore_version)s does not "
"match the Datastore Version on the instance "
"%(instance_datastore_version)s.")
class ConfigurationParameterDeleted(TroveError):
message = _("%(parameter_name)s parameter can no longer be "
"set as of %(parameter_deleted_at)s.")
class ConfigurationParameterAlreadyExists(TroveError):
message = _("%(parameter_name)s parameter already exists "
"for datastore version %(datastore_version)s.")
class ConfigurationAlreadyAttached(TroveError):
message = _("Instance %(instance_id)s already has a "
"Configuration Group attached: %(configuration_id)s.")
class InvalidInstanceState(TroveError):
message = _("The operation you have requested cannot be executed because "
"the instance status is currently: %(status)s.")
class NoServiceEndpoint(TroveError):
"""Could not find requested endpoint in Service Catalog."""
message = _("Endpoint not found for service_type=%(service_type)s, "
"endpoint_type=%(endpoint_type)s, "
"endpoint_region=%(endpoint_region)s.")
class EmptyCatalog(NoServiceEndpoint):
"""The service catalog is empty."""
message = _("Empty catalog.")
class IncompatibleReplicationStrategy(TroveError):
message = _("Instance with replication strategy %(guest_strategy)s "
"cannot replicate from instance with replication strategy "
"%(replication_strategy)s.")
class InsufficientSpaceForReplica(TroveError):
message = _("The target instance has only %(slave_volume_size)sG free, "
"but the replication snapshot contains %(dataset_size)sG "
"of data.")
class InsufficientSpaceForBackup(TroveError):
message = _("The instance has only %(free)sG free while the estimated "
"backup size is %(backup_size)sG.")
class ReplicaSourceDeleteForbidden(Forbidden):
message = _("The replica source cannot be deleted without detaching the "
"replicas.")
class ModuleTypeNotFound(NotFound):
message = _("Module type '%(module_type)s' was not found.")
class ModuleAppliedToInstance(BadRequest):
message = _("A module cannot be deleted or its contents modified if it "
"has been applied to a non-terminated instance, unless the "
"module has been marked as 'live_update.' "
"Please remove the module from all non-terminated "
"instances and try again.")
class ModuleAlreadyExists(BadRequest):
message = _("A module with the name '%(name)s' already exists for "
"datastore '%(datastore)s' and datastore version "
"'%(ds_version)s'")
class ModuleAccessForbidden(Forbidden):
message = _("You must be admin to %(action)s a module with these "
"options. %(options)s")
class ModuleInvalid(Forbidden):
message = _("The module is invalid: %(reason)s")
class InstanceNotFound(NotFound):
message = _("Instance '%(instance)s' cannot be found.")
class ClusterNotFound(NotFound):
message = _("Cluster '%(cluster)s' cannot be found.")
class ClusterFlavorsNotEqual(TroveError):
message = _("The flavor for each instance in a cluster must be the same.")
class ClusterNetworksNotEqual(TroveError):
message = _("The network for each instance in a cluster must be the same.")
class NetworkNotFound(TroveError):
message = _("Network Resource %(uuid)s cannot be found.")
class ClusterVolumeSizeRequired(TroveError):
message = _("A volume size is required for each instance in the cluster.")
class ClusterVolumeSizesNotEqual(TroveError):
message = _("The volume size for each instance in a cluster must be "
"the same.")
class ClusterNumInstancesNotSupported(TroveError):
message = _("The number of instances for your initial cluster must "
"be %(num_instances)s.")
class ClusterNumInstancesNotLargeEnough(TroveError):
message = _("The number of instances for your initial cluster must "
"be at least %(num_instances)s.")
class ClusterNumInstancesBelowSafetyThreshold(TroveError):
message = _("The number of instances in your cluster cannot "
"safely be lowered below the current level based"
"on your current fault-tolerance settings.")
class ClusterShrinkMustNotLeaveClusterEmpty(TroveError):
message = _("Must leave at least one instance in the cluster when "
"shrinking.")
class ClusterShrinkInstanceInUse(TroveError):
message = _("Instance(s) %(id)s currently in use and cannot be deleted. "
"Details: %(reason)s")
class ClusterInstanceOperationNotSupported(TroveError):
message = _("Operation not supported for instances that are part of a "
"cluster.")
class ClusterOperationNotSupported(TroveError):
message = _("The '%(operation)s' operation is not supported for cluster.")
class TroveOperationAuthError(TroveError):
message = _("Operation not allowed for tenant %(tenant_id)s.")
class ClusterDatastoreNotSupported(TroveError):
message = _("Clusters not supported for "
"%(datastore)s-%(datastore_version)s.")
class BackupTooLarge(TroveError):
message = _("Backup is too large for given flavor or volume. "
"Backup size: %(backup_size)s GBs. "
"Available size: %(disk_size)s GBs.")
class ImageNotFound(NotFound):
message = _("Image %(uuid)s cannot be found.")
class DatastoreVersionAlreadyExists(BadRequest):
message = _("A datastore version with the name '%(name)s' already exists.")
class LogAccessForbidden(Forbidden):
message = _("You must be admin to %(action)s log '%(log)s'.")
|
|
import unittest
import sys
from fatool import *
import os
class TestFa(unittest.TestCase):
def setUp(self):
with open('test.fa', 'w') as f:
f.write('>name3\nCTNACtacgatNNNNNNN\n>name4\nCTNAC\n>name5\nNNNNNACTGNNNN\n>name\nACTGactg\n>name7\nNNNACTGN\n>name8\nCTNACtacgatNNNNNNN\n>name2\nNNNNNNNNNACTGNNNN\n>name6\nCTNACtatNNN\n')
with open('f2.fa', 'w') as f:
f.write('')
pass
def test_setUpFa(self):
cl = []
cl.append(Sequence('>name', 'ACTGactg'))
cl.append(Sequence('>name2', 'CCCTAGACTG'))
cl.append(Sequence('>name3', 'CTNNNNNNACtacgat'))
f = Fa(cl, 'test-fa')
self.assertEqual(cl, f.contigs)
self.assertEqual('test-fa', f.name)
self.assertEqual({'>name':0, '>name2':1, '>name3':2}, f.contigs_idx)
cl.append('something')
with self.assertRaises(TypeError):
Fa(cl, 'name4')
def test_str(self):
cl = []
cl.append(Sequence('>name', 'ACTGactg'))
cl.append(Sequence('>name2', 'CCCTAGACTG'))
cl.append(Sequence('>name3', 'CTNNNNNNACtacgat'))
f = Fa(cl, 'test-fa')
self.assertEqual('>name\nACTGactg\n>name2\nCCCTAGACTG\n>name3\nCTNNNNNNACtacgat\n', str(f))
def test_add_contig(self):
cl = []
cl.append(Sequence('>name', 'ACTGactg'))
f = Fa(cl, 'test-fa')
self.assertEqual(cl, f.contigs)
f.add_contig(Sequence('>name2', 'CCCTAGACTG'))
cl.append(Sequence('>name2', 'CCCTAGACTG'))
self.assertEqual(cl, f.contigs)
f.add_contig(Sequence('>name2', 'ACTGaaaaaaa') )
self.assertEqual(cl, f.contigs)
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'ACTGaaaaaaa')]
f.add_contig(Sequence('>name2', 'ACTGaaaaaaa'), 1)
self.assertEqual(cl, f.contigs)
def test_add_contigs(self):
cl = [Sequence('>name', 'ACTGactg')]
f = Fa(cl, 'test-fa')
self.assertEqual(cl, f.contigs)
cl.append(Sequence('>name2', 'CCCTAGACTG'))
cl.append(Sequence('>name3', 'CTNNNNNNACtacgat'))
f.add_contigs([Sequence('>name2', 'CCCTAGACTG'), Sequence('>name3', 'CTNNNNNNACtacgat')])
self.assertEqual(cl, f.contigs)
f.add_contigs([Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')])
self.assertEqual(cl, f.contigs)
f.add_contigs([Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')], 1)
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')]
self.assertEqual(cl, f.contigs)
#self.assertEqual(cl, f.contigs)
def test_show_names(self):
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')]
f = Fa(cl, 'test-fa')
self.assertEqual(['>name','>name2','>name3'], f.show_names())
f.add_contig(Sequence('>name2', 'ACTGaaaaaaa'), 1)
self.assertEqual(['>name','>name3','>name2'], f.show_names())
f.add_contig(Sequence('>name7', 'ACTGaaaaaaa'), 1)
self.assertEqual(['>name','>name3','>name2','>name7'], f.show_names())
def test_extract(self):
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')]
f = Fa(cl, 'test-fa')
self.assertEqual(cl, f.contigs)
cl2 = [Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')]
self.assertEqual(cl2, f.extract(['>name2', '>name3']).contigs)
print 'printing contigs'
for c in f.extract(['name2', 'name3']).contigs:
print c
self.assertEqual('>extr_test-fa', f.extract(['>name2', '>name3']).name)
self.assertEqual(cl2, f.extract(['>name2', '>name3', '>name321']).contigs)
def test_remove(self):
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')]
f = Fa(cl, 'test-fa')
self.assertEqual([Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')], f.remove(['>name']).contigs)
self.assertEqual([Sequence('>name', 'ACTGactg')], f.remove(['>name2','>name3']).contigs)
self.assertEqual([Sequence('>name', 'ACTGactg')], f.remove(['>name2','>name3','>name234']).contigs)
self.assertEqual([Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')], f.remove(['>name']).contigs)
def test_statistics(self):
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC')]
f = Fa(cl, 'test-fa')
stat = {
'A': 7, 'C': 8, 'T': 7, 'G': 4, 'N': 22, 'L': 48,
'nbp1000': 0, 'nbp5000': 0, 'nbp10000': 0, 'nbp25000': 0, 'nbp50000': 0,
'lbp1000': 0, 'lbp5000': 0, 'lbp10000': 0, 'lbp25000': 0, 'lbp50000': 0,
'totalc':4, 'N50':17, 'L50':2, 'N75':8, 'L75':3, 'N90':8, 'L90':3,
'longest':18
}
self.assertEqual(stat, f.statistics())
def test_sort(self):
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC')]
f = Fa(cl, 'test-fa')
cl = [Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name', 'ACTGactg'), Sequence('>name4', 'CTNAC')]
#for r in f.sort(1).contigs:
# print r
#for r in cl.reverse():
# print r
self.assertEqual(cl, f.sort(-1).contigs)
cl = [Sequence('>name4', 'CTNAC'), Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')]
self.assertEqual(cl, f.sort(1).contigs)
def test_join(self):
cl = [Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC')]
f = Fa(cl, 'test-fa')
cl2 = [Sequence('>name', 'NNNNNNNN'), Sequence('>name5', 'NNNNNNNNNACTGNNNN'), Sequence('>name6', 'CTNACtacgatNNNNNNN')]
f2 = Fa(cl2, 'test2-fa')
f.join([f2])
cl.append(Sequence('>name5', 'NNNNNNNNNACTGNNNN'))
cl.append(Sequence('>name6', 'CTNACtacgatNNNNNNN'))
self.assertEqual(cl, f.contigs)
cl = [
Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC'),
Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN'), Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN')
]
f = Fa([Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN')], 'fa1')
f2 = Fa([Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC')], 'fa2')
f3 = Fa([Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN')], 'fa3')
f4 = Fa([Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN')], 'fa4')
f.join([f2,f3,f4])
self.assertEqual(cl, f.contigs)
f = Fa([Sequence('>name', 'ACTGactg'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name3', 'CTNACtacgatNNNNNNN')], 'fa1')
f2 = Fa([Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC'), Sequence('>name', 'AnnnnnCTGactg')], 'fa2')
f3 = Fa([Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN'), Sequence('>name4', 'annaCTNAC'), Sequence('>name', 'AaaCTnnaGactg')], 'fa3')
f4 = Fa([Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN'), Sequence('>name3', 'CTNaaaACtacgatNNNNNNN'), Sequence('>name', 'AnnnCTGactg')], 'fa4')
f.join([f2,f3,f4])
self.assertEqual(cl, f.contigs)
cl = [
Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC'), Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name', 'ACTGactg'),
Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN')
]
f = Fa([Sequence('>name', 'NNN'), Sequence('>name2', 'ACTGNNNN'), Sequence('>name3', 'NNNNNNN')], 'fa1')
f2 = Fa([Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC')], 'fa2')
f3 = Fa([Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name6', 'CTNNN'), Sequence('>name', 'ACTGactg')], 'fa3')
f4 = Fa([Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN'),Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN') ], 'fa4')
f.join([f2,f3,f4], 1)
self.assertEqual(cl, f.contigs)
def test_load_from_file(self):
cl = [
Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC'), Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name', 'ACTGactg'),
Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN')
]
with open('test.fa') as f:
fob = Fa.load_from_file(f)
self.assertEqual('test.fa', fob.name)
self.assertEqual(cl, fob.contigs)
f2 = Fa.load_from_file('test.fa')
self.assertEqual('test.fa', f2.name)
self.assertEqual(cl, f2.contigs)
def test_write(self):
cl = [
Sequence('>name3', 'CTNACtacgatNNNNNNN'), Sequence('>name4', 'CTNAC'), Sequence('>name5', 'NNNNNACTGNNNN'), Sequence('>name', 'ACTGactg'),
Sequence('>name7', 'NNNACTGN'), Sequence('>name8', 'CTNACtacgatNNNNNNN'), Sequence('>name2', 'NNNNNNNNNACTGNNNN'), Sequence('>name6', 'CTNACtatNNN')
]
f = Fa(cl, 'fa1')
f.write('f2.fa')
with open('test.fa') as f1, open('f2.fa') as f2:
f1_content = f1.read()
f2_content = f2.read()
self.assertEqual(f1_content, f2_content)
def tearDown(self):
os.remove('f2.fa')
os.remove('test.fa')
pass
def test_conv_to_fq(self):
cl = []
test = 'ATGGAATCGGCTTTTAATACTGCAGGGGCGTTAAGTTGGCATGAACTCACAACCAATAATACCGAAGAGGCCATGCGCTTCTATGCTGAGATTTTTGGCTGGCACTTTAAAACCGTCAAAATGCCCCACGGTCACTATCACATTATTGAAAACGAGGGGATCAGCATTGGCGGAATTACCGACAGTTTAATCCCCACCCTTCCCTCACATTGGACTGGCTATATTACCGTTAACGATGTGGATCAAGTGGCTATCAGTGCTAAAAAACTCGGCGGTGACATTCTGTTTGGCCCTGAAGACATTCCAGAGGTGGGCCGTTTTTGTTGGATAAAAGACCCACAGGGCGCCATTATTGCGGCCATTAGCTATTTAAAACGTTGATGTAA'
cl.append(Sequence('>test', test))
cl.append(Sequence('>test2','ATGGAATCGGCTTTTAATACTGCAGGGGCGTTAAGTTGGCATGAACTCACAACCAATAATACCGAAGAGGCCATGCGCTTCTATGCTGAGATTTTTGGCTGGCACTTTAAAACCGTCAAAATGCCCCACGGTCACTNNNNNN'))
f = Fa(cl,'fa_test')
fq = f.convert_to_fq(40)
print fq
if __name__ == "__main__":
unittest.main()
|
|
#
# ISA_kca_plugin.py - Kernel config options analyzer plugin, part of ISA FW
#
# Copyright (c) 2015 - 2016, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
import importlib
KCAnalyzer = None
class ISA_KernelChecker():
initialized = False
def __init__(self, ISA_config):
self.logfile = ISA_config.logdir + "/isafw_kcalog"
self.full_report_name = ISA_config.reportdir + "/kca_full_report_" + \
ISA_config.machine + "_" + ISA_config.timestamp
self.problems_report_name = ISA_config.reportdir + \
"/kca_problems_report_" + ISA_config.machine + "_" + ISA_config.timestamp
self.full_reports = ISA_config.full_reports
self.initialized = True
self.arch = ISA_config.arch
with open(self.logfile, 'w') as flog:
flog.write("\nPlugin ISA_KernelChecker initialized!\n")
def append_recommendation(self, report, key, value):
report.write("Recommended value:\n")
report.write(key + ' : ' + str(value) + '\n')
comment = self.comments.get(key, '')
if comment != '':
report.write("Comment:\n")
report.write(comment + '\n')
def process_kernel(self, ISA_kernel):
if (self.initialized):
if (ISA_kernel.img_name and ISA_kernel.path_to_config):
# Merging common and arch configs
common_config_module = importlib.import_module('isafw.isaplugins.configs.kca.{}'.format('common'))
arch_config_module = importlib.import_module('isafw.isaplugins.configs.kca.{}'.format(self.arch))
for c in ["hardening_kco", "keys_kco", "security_kco", "integrity_kco",
"hardening_kco_ref", "keys_kco_ref", "security_kco_ref", "integrity_kco_ref",
"comments"]:
setattr(self, c, merge_config(getattr(arch_config_module, c), getattr(common_config_module, c)))
with open(self.logfile, 'a') as flog:
flog.write("Analyzing kernel config file at: " + ISA_kernel.path_to_config +
" for the image: " + ISA_kernel.img_name + "\n")
with open(ISA_kernel.path_to_config, 'r') as fkernel_conf:
for line in fkernel_conf:
line = line.strip('\n')
for key in self.hardening_kco:
if key + '=' in line:
self.hardening_kco[key] = line.split('=')[1]
for key in self.keys_kco:
if key + '=' in line:
self.keys_kco[key] = line.split('=')[1]
for key in self.security_kco:
if key + '=' in line:
self.security_kco[key] = line.split('=')[1]
for key in self.integrity_kco:
if key + '=' in line:
self.integrity_kco[key] = line.split('=')[1]
with open(self.logfile, 'a') as flog:
flog.write("\n\nhardening_kco values: " +
str(self.hardening_kco))
flog.write("\n\nkeys_kco values: " + str(self.keys_kco))
flog.write("\n\nsecurity_kco values: " +
str(self.security_kco))
flog.write("\n\nintegrity_kco values: " +
str(self.integrity_kco))
self.write_full_report(ISA_kernel)
self.write_problems_report(ISA_kernel)
else:
with open(self.logfile, 'a') as flog:
flog.write(
"Mandatory arguments such as image name and path to config are not provided!\n")
flog.write("Not performing the call.\n")
else:
with open(self.logfile, 'a') as flog:
flog.write(
"Plugin hasn't initialized! Not performing the call!\n")
def write_full_report(self, ISA_kernel):
if self.full_reports:
with open(self.full_report_name + "_" + ISA_kernel.img_name, 'w') as freport:
freport.write("Report for image: " +
ISA_kernel.img_name + '\n')
freport.write("With the kernel conf at: " +
ISA_kernel.path_to_config + '\n\n')
freport.write("Hardening options:\n")
for key in sorted(self.hardening_kco):
freport.write(
key + ' : ' + str(self.hardening_kco[key]) + '\n')
freport.write("\nKey-related options:\n")
for key in sorted(self.keys_kco):
freport.write(key + ' : ' + str(self.keys_kco[key]) + '\n')
freport.write("\nSecurity options:\n")
for key in sorted(self.security_kco):
freport.write(
key + ' : ' + str(self.security_kco[key]) + '\n')
freport.write("\nIntegrity options:\n")
for key in sorted(self.integrity_kco):
freport.write(
key + ' : ' + str(self.integrity_kco[key]) + '\n')
def write_problems_report(self, ISA_kernel):
self.write_text_problems_report(ISA_kernel)
self.write_xml_problems_report(ISA_kernel)
def write_text_problems_report(self, ISA_kernel):
with open(self.problems_report_name + "_" + ISA_kernel.img_name, 'w') as freport:
freport.write("Report for image: " + ISA_kernel.img_name + '\n')
freport.write("With the kernel conf at: " +
ISA_kernel.path_to_config + '\n\n')
freport.write("Hardening options that need improvement:\n")
for key in sorted(self.hardening_kco):
if (self.hardening_kco[key] != self.hardening_kco_ref[key]):
valid = False
if (key == "CONFIG_CMDLINE"):
if (len(self.hardening_kco['CONFIG_CMDLINE']) > 0):
valid = True
if (key == "CONFIG_DEBUG_STRICT_USER_COPY_CHECKS"):
if (self.hardening_kco['CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS'] == 'y'):
valid = True
if (key == "CONFIG_RANDOMIZE_BASE_MAX_OFFSET"):
options = self.hardening_kco_ref[key].split(',')
for option in options:
if (option == self.hardening_kco[key]):
valid = True
break
if not valid:
freport.write("\nActual value:\n")
freport.write(
key + ' : ' + str(self.hardening_kco[key]) + '\n')
self.append_recommendation(freport, key, self.hardening_kco_ref[key])
freport.write("\nKey-related options that need improvement:\n")
for key in sorted(self.keys_kco):
if (self.keys_kco[key] != self.keys_kco_ref[key]):
freport.write("\nActual value:\n")
freport.write(key + ' : ' + str(self.keys_kco[key]) + '\n')
self.append_recommendation(freport, key, self.keys_kco_ref[key])
freport.write("\nSecurity options that need improvement:\n")
for key in sorted(self.security_kco):
if (self.security_kco[key] != self.security_kco_ref[key]):
valid = False
if (key == "CONFIG_DEFAULT_SECURITY"):
options = self.security_kco_ref[key].split(',')
for option in options:
if (option == self.security_kco[key]):
valid = True
break
if ((key == "CONFIG_SECURITY_SELINUX") or
(key == "CONFIG_SECURITY_SMACK") or
(key == "CONFIG_SECURITY_APPARMOR") or
(key == "CONFIG_SECURITY_TOMOYO")):
if ((self.security_kco['CONFIG_SECURITY_SELINUX'] == 'y') or
(self.security_kco['CONFIG_SECURITY_SMACK'] == 'y') or
(self.security_kco['CONFIG_SECURITY_APPARMOR'] == 'y') or
(self.security_kco['CONFIG_SECURITY_TOMOYO'] == 'y')):
valid = True
if not valid:
freport.write("\nActual value:\n")
freport.write(
key + ' : ' + str(self.security_kco[key]) + '\n')
self.append_recommendation(freport, key, self.security_kco_ref[key])
freport.write("\nIntegrity options that need improvement:\n")
for key in sorted(self.integrity_kco):
if (self.integrity_kco[key] != self.integrity_kco_ref[key]):
valid = False
if ((key == "CONFIG_IMA_DEFAULT_HASH_SHA1") or
(key == "CONFIG_IMA_DEFAULT_HASH_SHA256") or
(key == "CONFIG_IMA_DEFAULT_HASH_SHA512") or
(key == "CONFIG_IMA_DEFAULT_HASH_WP512")):
if ((self.integrity_kco['CONFIG_IMA_DEFAULT_HASH_SHA256'] == 'y') or
(self.integrity_kco['CONFIG_IMA_DEFAULT_HASH_SHA512'] == 'y')):
valid = True
if not valid:
freport.write("\nActual value:\n")
freport.write(
key + ' : ' + str(self.integrity_kco[key]) + '\n')
self.append_recommendation(freport, key, self.integrity_kco_ref[key])
def write_xml_problems_report(self, ISA_kernel):
# write_problems_report_xml
num_tests = len(self.hardening_kco) + len(self.keys_kco) + \
len(self.security_kco) + len(self.integrity_kco)
root = etree.Element(
'testsuite', name='KCA_Plugin', tests=str(num_tests))
for key in sorted(self.hardening_kco):
tcase1 = etree.SubElement(
root, 'testcase', classname='Hardening options', name=key)
if (self.hardening_kco[key] != self.hardening_kco_ref[key]):
valid = False
if (key == "CONFIG_CMDLINE"):
if (len(self.hardening_kco['CONFIG_CMDLINE']) > 0):
valid = True
if (key == "CONFIG_DEBUG_STRICT_USER_COPY_CHECKS"):
if (self.hardening_kco['CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS'] == 'y'):
valid = True
if (key == "CONFIG_RANDOMIZE_BASE_MAX_OFFSET"):
options = self.hardening_kco_ref[key].split(',')
for option in options:
if (option == self.hardening_kco[key]):
valid = True
break
if not valid:
msg1 = 'current=' + key + ' is ' + \
str(self.hardening_kco[
key]) + ', recommended=' + key + ' is ' + str(self.hardening_kco_ref[key])
etree.SubElement(
tcase1, 'failure', message=msg1, type='violation')
for key in sorted(self.keys_kco):
tcase2 = etree.SubElement(
root, 'testcase', classname='Key-related options', name=key)
if (self.keys_kco[key] != self.keys_kco_ref[key]):
msg2 = 'current=' + key + ' is ' + \
str(self.keys_kco[key] + ', recommended=' +
key + ' is ' + str(self.keys_kco_ref[key]))
etree.SubElement(
tcase2, 'failure', message=msg2, type='violation')
for key in sorted(self.security_kco):
tcase3 = etree.SubElement(
root, 'testcase', classname='Security options', name=key)
if (self.security_kco[key] != self.security_kco_ref[key]):
valid = False
if (key == "CONFIG_DEFAULT_SECURITY"):
options = self.security_kco_ref[key].split(',')
for option in options:
if (option == self.security_kco[key]):
valid = True
break
if ((key == "CONFIG_SECURITY_SELINUX") or
(key == "CONFIG_SECURITY_SMACK") or
(key == "CONFIG_SECURITY_APPARMOR") or
(key == "CONFIG_SECURITY_TOMOYO")):
if ((self.security_kco['CONFIG_SECURITY_SELINUX'] == 'y') or
(self.security_kco['CONFIG_SECURITY_SMACK'] == 'y') or
(self.security_kco['CONFIG_SECURITY_APPARMOR'] == 'y') or
(self.security_kco['CONFIG_SECURITY_TOMOYO'] == 'y')):
valid = True
if not valid:
msg3 = 'current=' + key + ' is ' + \
str(self.security_kco[key]) + ', recommended=' + \
key + ' is ' + str(self.security_kco_ref[key])
etree.SubElement(
tcase3, 'failure', message=msg3, type='violation')
for key in sorted(self.integrity_kco):
tcase4 = etree.SubElement(
root, 'testcase', classname='Integrity options', name=key)
if (self.integrity_kco[key] != self.integrity_kco_ref[key]):
valid = False
if ((key == "CONFIG_IMA_DEFAULT_HASH_SHA1") or
(key == "CONFIG_IMA_DEFAULT_HASH_SHA256") or
(key == "CONFIG_IMA_DEFAULT_HASH_SHA512") or
(key == "CONFIG_IMA_DEFAULT_HASH_WP512")):
if ((self.integrity_kco['CONFIG_IMA_DEFAULT_HASH_SHA256'] == 'y') or
(self.integrity_kco['CONFIG_IMA_DEFAULT_HASH_SHA512'] == 'y')):
valid = True
if not valid:
msg4 = 'current=' + key + ' is ' + \
str(self.integrity_kco[
key]) + ', recommended=' + key + ' is ' + str(self.integrity_kco_ref[key])
etree.SubElement(
tcase4, 'failure', message=msg4, type='violation')
tree = etree.ElementTree(root)
output = self.problems_report_name + "_" + ISA_kernel.img_name + '.xml'
try:
tree.write(output, encoding='UTF-8',
pretty_print=True, xml_declaration=True)
except TypeError:
tree.write(output, encoding='UTF-8', xml_declaration=True)
def merge_config(arch_kco, common_kco):
merged = arch_kco.copy()
merged.update(common_kco)
return merged
# ======== supported callbacks from ISA ============= #
def init(ISA_config):
global KCAnalyzer
KCAnalyzer = ISA_KernelChecker(ISA_config)
def getPluginName():
return "ISA_KernelChecker"
def process_kernel(ISA_kernel):
global KCAnalyzer
return KCAnalyzer.process_kernel(ISA_kernel)
# ==================================================== #
|
|
from codecs import Codec, CodecInfo, register as lookup_function
from typing import Union, Tuple
from warnings import warn
from iota.exceptions import with_context
__all__ = [
'AsciiTrytesCodec',
'TrytesDecodeError',
]
class TrytesDecodeError(ValueError):
"""
Indicates that a tryte string could not be decoded to bytes.
"""
pass
class AsciiTrytesCodec(Codec):
"""
Legacy codec for converting byte strings into trytes, and vice
versa.
This method encodes each pair of trytes as an ASCII code point (and
vice versa when decoding).
The end result requires more space than if the trytes were converted
mathematically, but because the result is ASCII, it's easier to work
with.
Think of this kind of like Base 64 for balanced ternary (:
"""
name = 'trytes_ascii'
compat_name = 'trytes'
"""
Old name for this codec.
Note: Will be removed in PyOTA v2.1!
"""
# :bc: Without the bytearray cast, Python 2 will populate the dict
# with characters instead of integers.
alphabet = dict(enumerate(bytearray(b'9ABCDEFGHIJKLMNOPQRSTUVWXYZ')))
"""
Used to encode bytes into trytes.
"""
index = dict(zip(alphabet.values(), alphabet.keys()))
"""
Used to decode trytes into bytes.
"""
@classmethod
def get_codec_info(cls) -> CodecInfo:
"""
Returns information used by the codecs library to configure the
codec for use.
"""
codec = cls()
codec_info = {
'encode': codec.encode,
'decode': codec.decode,
# In Python 2, all codecs are made equal.
# In Python 3, some codecs are more equal than others.
'_is_text_encoding': False
}
return CodecInfo(**codec_info)
def encode(self,
input: Union[memoryview, bytes, bytearray],
errors: str = 'strict') -> Tuple[bytes, int]:
"""
Encodes a byte string into trytes.
"""
if isinstance(input, memoryview):
input = input.tobytes()
if not isinstance(input, (bytes, bytearray)):
raise with_context(
exc=TypeError(
"Can't encode {type}; byte string expected.".format(
type=type(input).__name__,
)),
context={
'input': input,
},
)
# :bc: In Python 2, iterating over a byte string yields
# characters instead of integers.
if not isinstance(input, bytearray):
input = bytearray(input)
trytes = bytearray()
for c in input:
second, first = divmod(c, len(self.alphabet))
trytes.append(self.alphabet[first])
trytes.append(self.alphabet[second])
return bytes(trytes), len(input)
def decode(self,
input: Union[memoryview, bytes, bytearray],
errors: str = 'strict') -> Tuple[bytes, int]:
"""
Decodes a tryte string into bytes.
"""
if isinstance(input, memoryview):
input = input.tobytes()
if not isinstance(input, (bytes, bytearray)):
raise with_context(
exc=TypeError(
"Can't decode {type}; byte string expected.".format(
type=type(input).__name__,
)),
context={
'input': input,
},
)
# :bc: In Python 2, iterating over a byte string yields
# characters instead of integers.
if not isinstance(input, bytearray):
input = bytearray(input)
bytes_ = bytearray()
for i in range(0, len(input), 2):
try:
first, second = input[i:i + 2]
except ValueError:
if errors == 'strict':
raise with_context(
exc=TrytesDecodeError(
"'{name}' codec can't decode value; "
"tryte sequence has odd length.".format(
name=self.name,
),
),
context={
'input': input,
},
)
elif errors == 'replace':
bytes_ += b'?'
continue
try:
bytes_.append(
self.index[first]
+ (self.index[second] * len(self.index))
)
except ValueError:
# This combination of trytes yields a value > 255 when
# decoded.
# Naturally, we can't represent this using ASCII.
if errors == 'strict':
raise with_context(
exc=TrytesDecodeError(
"'{name}' codec can't decode trytes {pair} "
"at position {i}-{j}: "
"ordinal not in range(255)".format(
name=self.name,
pair=chr(first) + chr(second),
i=i,
j=i + 1,
),
),
context={
'input': input,
}
)
elif errors == 'replace':
bytes_ += b'?'
return bytes(bytes_), len(input)
@lookup_function
def check_trytes_codec(encoding):
"""
Determines which codec to use for the specified encoding.
References:
- https://docs.python.org/3/library/codecs.html#codecs.register
"""
if encoding == AsciiTrytesCodec.name:
return AsciiTrytesCodec.get_codec_info()
elif encoding == AsciiTrytesCodec.compat_name:
warn(
'"{old_codec}" codec will be removed in PyOTA v2.1. '
'Use "{new_codec}" instead.'.format(
new_codec=AsciiTrytesCodec.name,
old_codec=AsciiTrytesCodec.compat_name,
),
DeprecationWarning,
)
return AsciiTrytesCodec.get_codec_info()
return None
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from pycuda import driver, compiler, gpuarray
import time
# -- initialize the device
import pycuda.autoinit
USE_SIMPLE_KERNEL = 0
USE_TILED_KERNEL = 1
kernel_source_code = """
__global__ void MatrixMulKernel(float *a, float *b, float *c)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int ty = threadIdx.y + blockIdx.y * blockDim.y;
float Pvalue = 0;
int full_size= %(MATRIX_SIZE)s*%(MATRIX_SIZE)s;
if (ty < %(MATRIX_SIZE)s && tx < %(MATRIX_SIZE)s){
// Each thread loads one row of M and one column of N,
// to produce one element of P.
for (int k = 0; k < %(MATRIX_SIZE)s; ++k) {
if(ty * %(MATRIX_SIZE)s + k < full_size && k * %(MATRIX_SIZE)s + tx < full_size){
float Aelement = a[ty * %(MATRIX_SIZE)s + k];
float Belement = b[k * %(MATRIX_SIZE)s + tx];
Pvalue += Aelement * Belement;
}
}
// Write the matrix to device memory;
// each thread writes one element
c[ty * %(MATRIX_SIZE)s + tx] = Pvalue;
}
}
__global__ void MatrixMulKernelTiled(float *A, float *B, float *C)
{
const uint wA = %(MATRIX_SIZE)s;
const uint wB = %(MATRIX_SIZE)s;
// Block index
const uint bx = blockIdx.x;
const uint by = blockIdx.y;
// Thread index
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
uint xIndex = tx + bx * tx ;
//if (xIndex < wA){
// Index of the first sub-matrix of A processed by the block
const uint aBegin = wA * %(BLOCK_SIZE)s * by;
// Index of the last sub-matrix of A processed by the block
const uint aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
const uint aStep = %(BLOCK_SIZE)s;
// Index of the first sub-matrix of B processed by the block
const uint bBegin = %(BLOCK_SIZE)s * bx;
// Step size used to iterate through the sub-matrices of B
const uint bStep = %(BLOCK_SIZE)s * wB;
// The element of the block sub-matrix that is computed
// by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to
// compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Shared memory for the sub-matrix of A
__shared__ float As[%(BLOCK_SIZE)s][%(BLOCK_SIZE)s];
// Shared memory for the sub-matrix of B
__shared__ float Bs[%(BLOCK_SIZE)s][%(BLOCK_SIZE)s];
// Load the matrices from global memory to shared memory
// each thread loads one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < %(BLOCK_SIZE)s; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
const uint c = wB * %(BLOCK_SIZE)s * by + %(BLOCK_SIZE)s * bx;
C[c + wB * ty + tx] = Csub;
//}
}
"""
def cpu_operation(matrix_a, matrix_b):
return np.dot(matrix_a, matrix_b)
def gpu_operation(matrix_a, matrix_b, results_gpu, kernel_binary, grid, blocks):
kernel_binary(
# inputs
matrix_a, matrix_b,
# output
results_gpu,
block=blocks,
grid=grid
)
return results_gpu
def print_device_properties(dev):
MAX_BLOCK_DIM_X = dev.get_attributes()[2] # 1024
MAX_BLOCK_DIM_Y = dev.get_attributes()[3] # 1024
MAX_BLOCK_DIM_Z = dev.get_attributes()[4] # 64
MAX_GRID_DIM_X = dev.get_attributes()[5] # 2147483647
MAX_GRID_DIM_Y = dev.get_attributes()[6] # 65535
MAX_GRID_DIM_Z = dev.get_attributes()[7] # 65535
MAX_THREAD_PER_BLOCK = dev.get_attributes()[1] # 1024
print('Device attributes: *******************************')
print('MAX_BLOCK_DIM_X=', MAX_BLOCK_DIM_X)
print('MAX_BLOCK_DIM_Y=', MAX_BLOCK_DIM_Y)
print('MAX_BLOCK_DIM_Z=', MAX_BLOCK_DIM_Z)
print('MAX_GRID_DIM_X=', MAX_GRID_DIM_X)
print('MAX_GRID_DIM_Y=', MAX_GRID_DIM_Y)
print('MAX_GRID_DIM_Z=', MAX_GRID_DIM_Z)
print('MAX_THREAD_PER_BLOCK=', MAX_THREAD_PER_BLOCK)
print('*' * 50)
def gpu_compile_kernel(kernel_type, matrix_size):
driver.init()
dev = driver.Device(0)
# print_device_properties(dev)
MAX_THREAD_PER_BLOCK = dev.get_attributes()[1] # 1024
threads_per_block = int(np.sqrt(MAX_THREAD_PER_BLOCK))
number_of_blocks = int(matrix_size / threads_per_block)
# check if a new tile is required
if (number_of_blocks * threads_per_block) < matrix_size:
number_of_blocks = number_of_blocks + 1
print('## Kernel variables: ******************************')
print('matriz size = ', matrix_size)
print('threads per block = ', threads_per_block)
print('number of blocks = ', number_of_blocks)
print('*' * 50)
grid = (threads_per_block, threads_per_block, 1)
blocks = (number_of_blocks, number_of_blocks, 1)
kernel_code = kernel_source_code % {'MATRIX_SIZE': matrix_size, 'BLOCK_SIZE': threads_per_block}
# get the kernel function from the compiled module
# compile the kernel code
compiled_kernel = compiler.SourceModule(kernel_code)
binary_gpu = None
if kernel_type == USE_TILED_KERNEL:
binary_gpu = compiled_kernel.get_function("MatrixMulKernelTiled")
if kernel_type == USE_SIMPLE_KERNEL:
binary_gpu = compiled_kernel.get_function("MatrixMulKernel")
return binary_gpu, grid, blocks
def compare_results(time_cpu, time_gpu, c_cpu, c_gpu, blocks, grid):
print('## Results: **************************************')
print('Time CPU %10.8f' % time_cpu)
print('Time GPU %10.8f' % time_gpu)
print("Speedup: %5.4f" % (time_cpu / time_gpu))
# check errors
error = np.amax(c_cpu - c_gpu.get())
if error < ERROR_THRESHOLD:
print('SIZE:', matrix_size, 'SUCCESS - max difference: ', error)
else:
print('SIZE:', matrix_size, '* ERROR above threshold * - max difference: ', error)
print("Blocks: ", blocks)
print("Grid: ", grid)
def compare_matrix_operations(matrix_size):
# create two random square matrices
a_cpu = np.random.randn(matrix_size, matrix_size).astype(np.float32)
b_cpu = np.random.randn(matrix_size, matrix_size).astype(np.float32)
c_cpu = np.empty((matrix_size, matrix_size), np.float32)
# operation using the CPU
tic = time.time()
c_cpu = cpu_operation(a_cpu, b_cpu)
time_cpu = time.time() - tic
# transfer host (CPU) memory to device (GPU) memory
a_gpu = gpuarray.to_gpu(a_cpu)
b_gpu = gpuarray.to_gpu(b_cpu)
# create empty gpu array for the result (C = A * B)
c_gpu = gpuarray.empty((matrix_size, matrix_size), np.float32)
# compile kernel
print("## Simple kernel GPU operation ########################################")
kernel_binary, grid, blocks = gpu_compile_kernel(USE_SIMPLE_KERNEL, matrix_size)
# operation using the GPU
tic = time.time()
# call the kernel on the card
c_gpu = gpu_operation(a_gpu, b_gpu, c_gpu, kernel_binary, blocks, grid)
time_gpu = time.time() - tic # time measure
compare_results(time_cpu, time_gpu, c_cpu, c_gpu, blocks, grid)
# create empty gpu array for the result (C = A * B)
c_gpu = gpuarray.empty((matrix_size, matrix_size), np.float32)
# compile kernel
print("## Tiled kernel GPU operation ########################################")
kernel_binary, grid, blocks = gpu_compile_kernel(USE_TILED_KERNEL, matrix_size)
# operation using the GPU
tic = time.time()
# call the kernel on the card
c_gpu = gpu_operation(a_gpu, b_gpu, c_gpu, kernel_binary, blocks, grid)
time_gpu = time.time() - tic # time measure
compare_results(time_cpu, time_gpu, c_cpu, c_gpu, blocks, grid)
if __name__ == "__main__":
ERROR_THRESHOLD = 0.001
MAX_MATRIX_SIZE = 3000
matrix_size = 4
while matrix_size <= MAX_MATRIX_SIZE:
compare_matrix_operations(matrix_size)
matrix_size = matrix_size * 2
|
|
# coding: utf-8
# <img style='float: left' width="150px" src="http://bostonlightswim.org/wp/wp-content/uploads/2011/08/BLS-front_4-color.jpg">
# <br><br>
#
# ## [The Boston Light Swim](http://bostonlightswim.org/)
#
# ### Fetch Sea Surface Temperature time-series data
# In[1]:
import time
start_time = time.time()
# ### Save configuration
# In[2]:
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import iris
from datetime import datetime, timedelta
from utilities import CF_names, start_log
# Today +- 4 days
today = datetime.utcnow()
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
start = today - timedelta(days=4)
stop = today + timedelta(days=4)
# Boston harbor.
spacing = 0.25
bbox = [-71.05-spacing, 42.28-spacing,
-70.82+spacing, 42.38+spacing]
# CF-names.
sos_name = 'sea_water_temperature'
name_list = CF_names[sos_name]
# Units.
units = iris.unit.Unit('celsius')
# Logging.
run_name = '{:%Y-%m-%d}'.format(stop)
log = start_log(start, stop, bbox)
# Config.
fname = os.path.join(run_name, 'config.pkl')
config = dict(start=start,
stop=stop,
bbox=bbox,
name_list=name_list,
units=units,
run_name=run_name)
with open(fname, 'wb') as f:
pickle.dump(config, f)
# ### Create the data filter
# In[3]:
from owslib import fes
from utilities import fes_date_filter
kw = dict(wildCard='*',
escapeChar='\\',
singleChar='?',
propertyname='apiso:AnyText')
or_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw)
for val in name_list])
# Exclude ROMS Averages and History files.
not_filt = fes.Not([fes.PropertyIsLike(literal='*Averages*', **kw)])
begin, end = fes_date_filter(start, stop)
filter_list = [fes.And([fes.BBox(bbox), begin, end, or_filt, not_filt])]
# In[4]:
from owslib.csw import CatalogueServiceWeb
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw'
csw = CatalogueServiceWeb(endpoint, timeout=60)
csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')
fmt = '{:*^64}'.format
log.info(fmt(' Catalog information '))
log.info("URL: {}".format(endpoint))
log.info("CSW version: {}".format(csw.version))
log.info("Number of datasets available: {}".format(len(csw.records.keys())))
# In[5]:
from utilities import service_urls
dap_urls = service_urls(csw.records, service='odp:url')
sos_urls = service_urls(csw.records, service='sos:url')
log.info(fmt(' CSW '))
for rec, item in csw.records.items():
log.info('{}'.format(item.title))
log.info(fmt(' SOS '))
for url in sos_urls:
log.info('{}'.format(url))
log.info(fmt(' DAP '))
for url in dap_urls:
log.info('{}.html'.format(url))
# In[6]:
from utilities import is_station
# Filter out some station endpoints.
non_stations = []
for url in dap_urls:
try:
if not is_station(url):
non_stations.append(url)
except RuntimeError as e:
log.warn("Could not access URL {}. {!r}".format(url, e))
dap_urls = non_stations
log.info(fmt(' Filtered DAP '))
for url in dap_urls:
log.info('{}.html'.format(url))
# ### NdbcSos
# In[7]:
from pyoos.collectors.ndbc.ndbc_sos import NdbcSos
collector_ndbc = NdbcSos()
collector_ndbc.set_bbox(bbox)
collector_ndbc.end_time = stop
collector_ndbc.start_time = start
collector_ndbc.variables = [sos_name]
ofrs = collector_ndbc.server.offerings
title = collector_ndbc.server.identification.title
log.info(fmt(' NDBC Collector offerings '))
log.info('{}: {} offerings'.format(title, len(ofrs)))
# In[8]:
from utilities import collector2table, to_html, get_ndbc_longname
ndbc = collector2table(collector=collector_ndbc)
names = []
for s in ndbc['station']:
try:
name = get_ndbc_longname(s)
except ValueError:
name = s
names.append(name)
ndbc['name'] = names
ndbc.set_index('name', inplace=True)
to_html(ndbc.head())
# ### CoopsSoS
# In[9]:
from pyoos.collectors.coops.coops_sos import CoopsSos
collector_coops = CoopsSos()
collector_coops.set_bbox(bbox)
collector_coops.end_time = stop
collector_coops.start_time = start
collector_coops.variables = [sos_name]
ofrs = collector_coops.server.offerings
title = collector_coops.server.identification.title
log.info(fmt(' Collector offerings '))
log.info('{}: {} offerings'.format(title, len(ofrs)))
# In[10]:
from utilities import get_coops_metadata
coops = collector2table(collector=collector_coops)
names = []
for s in coops['station']:
try:
name = get_coops_metadata(s)[0]
except ValueError:
name = s
names.append(name)
coops['name'] = names
coops.set_index('name', inplace=True)
to_html(coops.head())
# ### Join CoopsSoS and NdbcSos
# In[11]:
from pandas import concat
all_obs = concat([coops, ndbc])
to_html(all_obs.head())
# In[12]:
fname = '{}-all_obs.csv'.format(run_name)
fname = os.path.join(run_name, fname)
all_obs.to_csv(fname)
# ### Download the observed data series
# In[13]:
from pandas import DataFrame
from owslib.ows import ExceptionReport
from utilities import pyoos2df, save_timeseries
iris.FUTURE.netcdf_promote = True
log.info(fmt(' Observations '))
outfile = '{:%Y-%m-%d}-OBS_DATA.nc'.format(stop)
outfile = os.path.join(run_name, outfile)
log.info(fmt(' Downloading to file {} '.format(outfile)))
data = dict()
col = 'sea_water_temperature (C)'
for station in all_obs.index:
try:
idx = all_obs['station'][station]
df = pyoos2df(collector_ndbc, idx, df_name=station)
if df.empty:
df = pyoos2df(collector_coops, idx, df_name=station)
data.update({idx: df[col]})
except ExceptionReport as e:
log.warning("[{}] {}:\n{}".format(idx, station, e))
# ### Uniform 1-hour time base for model/data comparison
# In[14]:
from pandas import date_range
index = date_range(start=start, end=stop, freq='1H')
for k, v in data.iteritems():
data[k] = v.reindex(index=index, limit=1, method='nearest')
obs_data = DataFrame.from_dict(data)
# In[15]:
comment = "Several stations from http://opendap.co-ops.nos.noaa.gov"
kw = dict(longitude=all_obs.lon,
latitude=all_obs.lat,
station_attr=dict(cf_role="timeseries_id"),
cube_attr=dict(featureType='timeSeries',
Conventions='CF-1.6',
standard_name_vocabulary='CF-1.6',
cdm_data_type="Station",
comment=comment,
url=url))
save_timeseries(obs_data, outfile=outfile,
standard_name=sos_name, **kw)
to_html(obs_data.head())
# ### Loop discovered models and save the nearest time-series
# In[16]:
import warnings
from iris.exceptions import (CoordinateNotFoundError, ConstraintMismatchError,
MergeError)
from utilities import (TimeoutException, quick_load_cubes, proc_cube,
time_limit, is_model, get_model_name, get_surface)
log.info(fmt(' Models '))
cubes = dict()
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Suppress iris warnings.
for k, url in enumerate(dap_urls):
log.info('\n[Reading url {}/{}]: {}'.format(k+1, len(dap_urls), url))
try:
with time_limit(60*5):
cube = quick_load_cubes(url, name_list,
callback=None, strict=True)
if is_model(cube):
cube = proc_cube(cube, bbox=bbox,
time=(start, stop), units=units)
else:
log.warning("[Not model data]: {}".format(url))
continue
cube = get_surface(cube)
mod_name, model_full_name = get_model_name(cube, url)
cubes.update({mod_name: cube})
except (TimeoutException, RuntimeError, ValueError,
ConstraintMismatchError, CoordinateNotFoundError,
IndexError, AttributeError) as e:
log.warning('Cannot get cube for: {}\n{}'.format(url, e))
# In[17]:
from iris.pandas import as_series
from utilities import (make_tree, get_nearest_water,
add_station, ensure_timeseries, remove_ssh)
for mod_name, cube in cubes.items():
fname = '{:%Y-%m-%d}-{}.nc'.format(stop, mod_name)
fname = os.path.join(run_name, fname)
log.info(fmt(' Downloading to file {} '.format(fname)))
try:
tree, lon, lat = make_tree(cube)
except CoordinateNotFoundError as e:
log.warning('Cannot make KDTree for: {}'.format(mod_name))
continue
# Get model series at observed locations.
raw_series = dict()
for station, obs in all_obs.iterrows():
try:
kw = dict(k=10, max_dist=0.08, min_var=0.01)
args = cube, tree, obs.lon, obs.lat
series, dist, idx = get_nearest_water(*args, **kw)
except ValueError as e:
status = "No Data"
log.info('[{}] {}'.format(status, obs.name))
continue
if not series:
status = "Land "
else:
raw_series.update({obs['station']: series})
series = as_series(series)
status = "Water "
log.info('[{}] {}'.format(status, obs.name))
if raw_series: # Save cube.
for station, cube in raw_series.items():
cube = add_station(cube, station)
cube = remove_ssh(cube)
try:
cube = iris.cube.CubeList(raw_series.values()).merge_cube()
except MergeError as e:
log.warning(e)
ensure_timeseries(cube)
iris.save(cube, fname)
del cube
log.info('Finished processing [{}]'.format(mod_name))
# In[18]:
elapsed = time.time() - start_time
log.info('{:.2f} minutes'.format(elapsed/60.))
log.info('EOF')
with open('{}/log.txt'.format(run_name)) as f:
print(f.read())
|
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
import binascii
import json
import StringIO
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls with a request body
def http_get_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += binascii.unhexlify(txid)
binaryRequest += pack("i", n);
binaryRequest += binascii.unhexlify(vintx);
binaryRequest += pack("i", 0);
bin_response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = StringIO.StringIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_get_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/");
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(response_str.encode("hex")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(response_header_str.encode("hex")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", "", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read()
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", "", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read()
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" Integration tests against zimbraAdmin SOAP webservice
It has to be tested against a zimbra server (see README.md)
"""
import unittest
import random
from zimsoap.exceptions import (
ZimbraSoapServerError, DomainHasNoPreAuthKey)
from zimsoap.client.account import ZimbraAccountClient
from zimsoap.client.admin import ZimbraAdminClient
from zimsoap.client import ZimbraAPISession
from zimsoap.zobjects.admin import (
Account, CalendarResource, COS, DistributionList, Domain, Server, Mailbox)
try:
from urllib2 import URLError
except ImportError:
from urllib.request import URLError
from six import text_type, binary_type, assertRegex
import tests
TEST_CONF = tests.get_config()
class ZimbraAdminClientTests(unittest.TestCase):
def setUp(self):
self.TEST_SERVER = TEST_CONF['host']
self.TEST_LOGIN = TEST_CONF['admin_login']
self.TEST_PASSWORD = TEST_CONF['admin_password']
self.TEST_ADMIN_PORT = TEST_CONF['admin_port']
self.LAMBDA_USER = TEST_CONF['lambda_user']
self.SERVER_NAME = TEST_CONF['server_name']
def testLogin(self):
zc = ZimbraAdminClient(self.TEST_SERVER, self.TEST_ADMIN_PORT)
zc.login(self.TEST_LOGIN, self.TEST_PASSWORD)
self.assertTrue(zc._session.is_logged_in())
def testBadLoginFailure(self):
with self.assertRaises(ZimbraSoapServerError) as cm:
zc = ZimbraAdminClient(self.TEST_SERVER, self.TEST_ADMIN_PORT)
zc.login('[email protected]', self.TEST_PASSWORD)
self.assertIn('authentication failed', cm.exception.msg)
def testBadPasswordFailure(self):
with self.assertRaises(ZimbraSoapServerError) as cm:
zc = ZimbraAdminClient(self.TEST_SERVER, self.TEST_ADMIN_PORT)
zc.login(self.TEST_LOGIN, 'badpassword')
self.assertIn('authentication failed', cm.exception.msg)
def testBadHostFailure(self):
with self.assertRaises(URLError):
zc = ZimbraAdminClient('nonexistanthost.example.com',
self.TEST_ADMIN_PORT)
zc.login(self.TEST_LOGIN, self.TEST_PASSWORD)
def testBadPortFailure(self):
with self.assertRaises(URLError):
zc = ZimbraAdminClient(self.TEST_SERVER, 9999)
zc.login(self.TEST_LOGIN, self.TEST_PASSWORD)
class ZimbraAdminClientRequests(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Login/connection is done at class initialization to reduce tests time
cls.zc = ZimbraAdminClient(TEST_CONF['host'], TEST_CONF['admin_port'])
cls.zc.login(TEST_CONF['admin_login'], TEST_CONF['admin_password'])
def setUp(self):
self.EXISTANT_DOMAIN = TEST_CONF['domain_1']
self.EXISTANT_MBOX_ID = "d78fd9c9-f000-440b-bce6-ea938d40fa2d"
# Should not exist before the tests
self.TEST_DL_NAME = 'unittest-test-list-1@%s' % self.EXISTANT_DOMAIN
def tearDown(self):
# Try to delete a relief test distribution list (if any)
try:
resp = self.zc.request('GetDistributionList', {
'dl': {'by': 'name', '_content': self.TEST_DL_NAME}
})
dl_id = resp['dl']['id']
self.zc.request('DeleteDistributionList', {'id': dl_id})
except ZimbraSoapServerError:
pass
def testGetAllAccountsReturnsSomething(self):
resp = self.zc.request('GetAllAccounts')
self.assertTrue(('account' in resp), list)
self.assertIsInstance(resp['account'], list)
def testGetAlllCalendarResourcesReturnsSomething(self):
resp = self.zc.request_list('GetAllCalendarResources')
self.assertIsInstance(resp, list)
def testGetAllDomainsReturnsSomething(self):
resp = self.zc.request('GetAllDomains')
self.assertTrue(('domain' in resp), list)
self.assertIsInstance(resp['domain'], list)
def testGetDomainReturnsDomain(self):
resp = self.zc.request('GetDomain', {'domain': {
'by': 'name',
'_content': self.EXISTANT_DOMAIN
}})
self.assertIsInstance(resp, dict)
self.assertTrue('domain' in resp)
self.assertIsInstance(resp['domain'], dict)
def testGetMailboxStatsReturnsSomething(self):
resp = self.zc.request('GetMailboxStats')
self.assertTrue('stats' in resp)
self.assertIsInstance(resp['stats'], dict)
def testCountAccountReturnsSomething(self):
"""Count accounts on the first of domains"""
resp = self.zc.request_list(
'CountAccount',
{'domain': {'by': 'name', '_content': self.EXISTANT_DOMAIN}}
)
first_cos = resp[0]
self.assertTrue('id' in first_cos)
# will fail if not convertible to int
self.assertIsInstance(int(first_cos['_content']), int)
def testGetMailboxRequest(self):
try:
EXISTANT_MBOX_ID = self.testGetAllMailboxes()[0]['accountId']
except Exception as e:
raise e('failed in self.testGetAllMailboxes()')
resp = self.zc.request(
'GetMailbox', {'mbox': {'id': EXISTANT_MBOX_ID}})
self.assertIsInstance(resp['mbox'], dict)
self.assertTrue('mbxid' in resp['mbox'])
def testGetAllMailboxes(self):
resp = self.zc.request('GetAllMailboxes')
mailboxes = resp['mbox']
self.assertIsInstance(resp['mbox'], list)
return mailboxes
def testCreateGetDeleteDistributionList(self):
""" As Getting and deleting a list requires it to exist
a list to exist, we group the 3 tests together.
"""
def createDistributionList(name):
resp = self.zc.request('CreateDistributionList', {'name': name})
self.assertIsInstance(resp['dl'], dict)
def getDistributionList(name):
resp = self.zc.request('GetDistributionList',
{'dl': {'by': 'name', '_content': name}})
self.assertIsInstance(resp['dl'], dict)
self.assertIsInstance(resp['dl']['id'], text_type)
return resp['dl']['id']
def deleteDistributionList(dl_id):
self.zc.request('DeleteDistributionList', {'id': dl_id})
# Should not exist
with self.assertRaises(ZimbraSoapServerError):
getDistributionList(self.TEST_DL_NAME)
createDistributionList(self.TEST_DL_NAME)
# It should now exist
list_id = getDistributionList(self.TEST_DL_NAME)
deleteDistributionList(list_id)
# Should no longer exists
with self.assertRaises(ZimbraSoapServerError):
getDistributionList(self.TEST_DL_NAME)
def testGetAccount(self):
account = {'by': 'name', '_content': TEST_CONF['lambda_user']}
resp = self.zc.request('GetAccount', {'account': account})
self.assertIsInstance(resp['account'], dict)
def testGetAccountInfo(self):
account = {'by': 'name', '_content': TEST_CONF['lambda_user']}
resp = self.zc.request('GetAccountInfo', {'account': account})
self.assertIsInstance(resp['cos']['id'], (text_type, binary_type))
def testSearchDirectory(self):
resp = self.zc.search_directory(
query='mail=%s' % TEST_CONF['lambda_user'])
self.assertEqual(resp['account'].name, TEST_CONF['lambda_user'])
resp = self.zc.search_directory(
query='dc=zimbratest', types='domains')
self.assertEqual(resp['domain'].name, 'zimbratest.example.com')
class PythonicAdminAPITests(unittest.TestCase):
""" Tests the pythonic API, the one that should be accessed by someone using
the library, zimbraAdmin features.
"""
@classmethod
def setUpClass(cls):
# Login/connection is done at class initialization to reduce tests time
cls.zc = ZimbraAdminClient(TEST_CONF['host'],
TEST_CONF['admin_port'])
cls.zc.login(TEST_CONF['admin_login'], TEST_CONF['admin_password'])
def setUp(self):
self.HOST = TEST_CONF['host']
self.ADMIN_PASSWORD = TEST_CONF['admin_password']
self.ADMIN_PORT = TEST_CONF['admin_port']
self.ADMIN_LOGIN = TEST_CONF['admin_login']
self.LAMBDA_USER = TEST_CONF['lambda_user']
self.DOMAIN1 = TEST_CONF['domain_1']
self.DOMAIN2 = TEST_CONF['domain_2']
self.TMP_DOMAIN = 'oazimtools.test'
self.SERVER_NAME = TEST_CONF['server_name']
self.EXISTANT_MBOX_ID = "d78fd9c9-f000-440b-bce6-ea938d40fa2d"
# Should not exist before the tests
self.TEST_DL_NAME = 'unittest-test-list-1@%s' % self.DOMAIN1
def tearDown(self):
try:
self.zc.delete_distribution_list(
DistributionList(name=self.TEST_DL_NAME))
except (ZimbraSoapServerError, KeyError):
pass
def test_get_all_domains(self):
doms = self.zc.get_all_domains()
self.assertIsInstance(doms, list)
self.assertIsInstance(doms[0], Domain)
# Look for client1.unbound.example.com
found = False
for i in doms:
if i.name == self.DOMAIN1:
found = True
self.assertTrue(found)
def test_create_delete_domain(self):
# CREATE
self.zc.create_domain(self.TMP_DOMAIN)
dom = self.zc.get_domain(Domain(name=self.TMP_DOMAIN))
self.assertIsInstance(dom, Domain)
self.assertEqual(dom.name, self.TMP_DOMAIN)
# DELETE
self.zc.delete_domain(dom)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_domain(dom)
def test_create_delete_forced_domain(self):
account_mail = 'test_user@' + self.TMP_DOMAIN
cal_res_mail = 'test_res@' + self.TMP_DOMAIN
alias_name = self.LAMBDA_USER.split('@')[0] + '@' + self.TMP_DOMAIN
dl_mail = 'test_dl@' + self.TMP_DOMAIN
# CREATE
self.zc.create_domain(self.TMP_DOMAIN)
dom = self.zc.get_domain(Domain(name=self.TMP_DOMAIN))
self.assertIsInstance(dom, Domain)
self.assertEqual(dom.name, self.TMP_DOMAIN)
self.zc.create_account(account_mail, 'pass1234')
self.zc.create_calendar_resource(cal_res_mail, attrs={
'displayName': 'test display name',
'zimbraCalResType': CalendarResource.EQUIPMENT_TYPE
})
self.zc.add_account_alias(Account(name=self.LAMBDA_USER), alias_name)
self.zc.create_distribution_list(dl_mail)
# DELETE
self.zc.delete_domain_forced(dom)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_domain(dom)
def test_get_domain(self):
dom = self.zc.get_domain(Domain(name=self.DOMAIN1))
self.assertIsInstance(dom, Domain)
self.assertEqual(dom.name, self.DOMAIN1)
def test_modify_domain(self):
rand_str = random.randint(0, 10**9)
dom = self.zc.get_domain(Domain(name=self.DOMAIN1))
a = {'zimbraAutoProvNotificationBody': rand_str}
self.zc.modify_domain(dom, a)
dom = self.zc.get_domain(Domain(name=self.DOMAIN1))
self.assertEqual(dom['zimbraAutoProvNotificationBody'], rand_str)
def test_get_all_accounts(self):
accounts = self.zc.get_all_accounts()
self.assertIsInstance(accounts[0], Account)
self.assertEqual(len(accounts), 17)
def test_get_all_accounts_by_single_server(self):
test_server = Server(name=self.SERVER_NAME)
accounts = self.zc.get_all_accounts(server=test_server)
self.assertIsInstance(accounts[0], Account)
self.assertEqual(len(accounts), 17)
def test_get_all_accounts_by_single_domain(self):
test_domain = Domain(name=self.DOMAIN2)
accounts = self.zc.get_all_accounts(domain=test_domain)
self.assertIsInstance(accounts[0], Account)
self.assertEqual(len(accounts), 5)
def test_get_all_accounts_by_single_domain_and_server(self):
test_domain = Domain(name=self.DOMAIN2)
test_server = Server(name=self.SERVER_NAME)
accounts = self.zc.get_all_accounts(domain=test_domain,
server=test_server)
self.assertIsInstance(accounts[0], Account)
self.assertEqual(len(accounts), 5)
def test_get_all_accounts_exclusion_filters(self):
# The self.DOMAIN1 contains 5 user accounts, 1 system and 1 admin
test_domain = Domain(name=self.DOMAIN1)
accounts = self.zc.get_all_accounts(
domain=test_domain,
include_system_accounts=True, include_admin_accounts=True)
self.assertEqual(len(accounts), 7)
accounts_no_admin = self.zc.get_all_accounts(
domain=test_domain,
include_system_accounts=True, include_admin_accounts=False)
self.assertEqual(len(accounts_no_admin), 6)
accounts_no_system = self.zc.get_all_accounts(
domain=test_domain,
include_system_accounts=False, include_admin_accounts=True)
self.assertEqual(len(accounts_no_system), 6)
accounts_no_admin_no_system = self.zc.get_all_accounts(
domain=test_domain,
include_admin_accounts=False, include_system_accounts=False)
self.assertEqual(len(accounts_no_admin_no_system), 5)
def test_get_all_calendar_resources(self):
resources = self.zc.get_all_calendar_resources()
self.assertIsInstance(resources[0], CalendarResource)
self.assertEqual(len(resources), 2)
def test_get_all_calendar_resources_by_single_server(self):
test_server = Server(name=self.SERVER_NAME)
resources = self.zc.get_all_calendar_resources(server=test_server)
self.assertIsInstance(resources[0], CalendarResource)
self.assertEqual(len(resources), 2)
def test_get_all_calendar_resources_by_single_domain(self):
test_domain = Domain(name=self.DOMAIN2)
resources = self.zc.get_all_calendar_resources(domain=test_domain)
self.assertEqual(len(resources), 1)
def test_get_calendar_resource(self):
calendar_resource = self.zc.get_calendar_resource(
CalendarResource(name=TEST_CONF['calres1']))
self.assertIsInstance(calendar_resource, CalendarResource)
self.assertEqual(calendar_resource.name, TEST_CONF['calres1'])
# Now grab it by ID
calendar_resource_by_id = self.zc.get_calendar_resource(
CalendarResource(id=calendar_resource.id))
self.assertIsInstance(calendar_resource_by_id, CalendarResource)
self.assertEqual(calendar_resource_by_id.name, TEST_CONF['calres1'])
self.assertEqual(calendar_resource_by_id.id, calendar_resource.id)
def test_get_quota_usage(self):
resp = self.zc.get_quota_usage('zimbratest3.example.com')
for account in resp:
if account['name'] == '[email protected]':
quota_user = account
self.assertIsInstance(resp, list)
self.assertEqual(quota_user['used'], '0')
self.assertEqual(quota_user['limit'], '0')
def test_create_get_update_delete_calendar_resource(self):
name = 'test-{}@zimbratest.example.com'.format(
random.randint(0, 10**9))
new_name = 'new-{}@zimbratest.example.com'.format(
random.randint(0, 10**9))
res_req = CalendarResource(name=name)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_calendar_resource(res_req)
# CREATE
res = self.zc.create_calendar_resource(name, attrs={
'displayName': 'test display name',
'zimbraCalResType': CalendarResource.EQUIPMENT_TYPE
})
self.assertIsInstance(res, CalendarResource)
self.assertEqual(res.name, name)
# GET
res_got = self.zc.get_calendar_resource(res_req)
self.assertIsInstance(res_got, CalendarResource)
self.assertEqual(res.name, name)
# UPDATE
random_name_1 = 'test-{}'.format(random.randint(0, 10**9))
self.zc.modify_calendar_resource(res_got,
{'displayName': random_name_1})
res_got = self.zc.get_calendar_resource(res_req)
self.assertEqual(res_got['displayName'], random_name_1)
# RENAME
new_r = self.zc.rename_calendar_resource(res_got, new_name)
self.assertEqual(new_r.name, new_name)
# DELETE
self.zc.delete_calendar_resource(res_got)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_calendar_resource(res)
def test_create_get_update_rename_delete_account(self):
name = 'test-{}@zimbratest.example.com'.format(
random.randint(0, 10**9))
password = 'pass124'
ac_req = Account(name=name)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_account(ac_req)
# CREATE
ac = self.zc.create_account(name, password)
self.assertIsInstance(ac, Account)
self.assertEqual(ac.name, name)
# GET
ac_got = self.zc.get_account(ac_req)
self.assertIsInstance(ac_got, Account)
self.assertEqual(ac_got.name, name)
# UPDATE
random_name_1 = 'test-{}'.format(random.randint(0, 10**9))
self.zc.modify_account(ac_got, {'displayName': random_name_1})
ac_got = self.zc.get_account(ac_req)
self.assertEqual(ac_got['displayName'], random_name_1)
# MODIFY PASSWORD
new_password = 'new_pass1234'
self.zc.set_password(ac, new_password)
act_zc = ZimbraAccountClient(TEST_CONF['host'],
TEST_CONF['https_port'])
try:
act_zc.login(ac.name, new_password)
except ZimbraSoapServerError:
self.fail('self.zc.set_password has failed to change password')
# RENAME
self.zc.rename_account(
ac_got, '[email protected]')
renamed_ac_got = self.zc.get_account(
Account(name='[email protected]'))
self.assertEqual(renamed_ac_got['mail'],
'[email protected]')
# DELETE
self.zc.delete_account(renamed_ac_got)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_account(ac)
def test_create_delete_account_alias(self):
# prepare account
ac_name = 'test-{}@zimbratest.example.com'.format(
random.randint(0, 10**9))
ac = self.zc.create_account(ac_name, 'pass1234')
alias_name = 'test-{}@zimbratest.example.com'.format(
random.randint(0, 10**9))
# CREATE
retval = self.zc.add_account_alias(Account(name=ac_name), alias_name)
self.assertEqual(retval, None)
# GET
ac_got = self.zc.get_account(Account(name=ac_name))
self.assertIn(alias_name, ac_got['mail'])
# DELETE
self.zc.remove_account_alias(ac, alias_name)
# GET
ac_got = self.zc.get_account(Account(name=ac_name))
self.assertNotIn(alias_name, ac_got['mail'])
self.zc.delete_account(ac)
def test_get_mailbox_stats(self):
stats = self.zc.get_mailbox_stats()
self.assertIsInstance(stats, dict)
self.assertIsInstance(stats['numMboxes'], int)
self.assertIsInstance(stats['totalSize'], int)
def test_count_account(self):
d = Domain(name=self.DOMAIN1)
# ex return: list: ((<COS object>, <int>), ...)
cos_counts = self.zc.count_account(d)
self.assertIsInstance(cos_counts, list)
self.assertIsInstance(cos_counts[0], tuple)
self.assertIsInstance(cos_counts[0][0],
COS)
self.assertIsInstance(cos_counts[0][1], int)
def test_get_all_mailboxes(self):
mboxes = self.zc.get_all_mailboxes()
self.assertIsInstance(mboxes, list)
self.assertIsInstance(mboxes[0], Mailbox)
def test_account_mailbox(self):
# First, fetch an existing account_id
first_account_id = self.zc.get_all_mailboxes()[0].accountId
mbox = self.zc.get_account_mailbox(first_account_id)
self.assertTrue(hasattr(mbox, 'mbxid'))
self.assertTrue(hasattr(mbox, 's')) # size
def test_create_get_modify_rename_delete_distribution_list(self):
name = self.TEST_DL_NAME
dl_req = DistributionList(name=name)
with self.assertRaises(ZimbraSoapServerError):
print(self.zc.get_distribution_list(dl_req))
# CREATE
dl = self.zc.create_distribution_list(name)
self.assertIsInstance(dl, DistributionList)
self.assertEqual(dl.name, name)
# GET ALL
dl_list = self.zc.get_all_distribution_lists()
self.assertIsInstance(dl_list[1], DistributionList)
# MODIFY
self.zc.add_distribution_list_member(
dl, ['[email protected]', '[email protected]'])
dl_membered = self.zc.get_distribution_list(dl_req)
self.assertEqual(
set(dl_membered.members),
set(['[email protected]', '[email protected]']))
self.zc.remove_distribution_list_member(
dl, ['[email protected]'])
dl_unmembered = self.zc.get_distribution_list(dl_req)
self.assertEqual(dl_unmembered.members, ['[email protected]'])
rand = 'list-{}'.format(random.randint(0, 10**9))
self.zc.modify_distribution_list(dl, {'displayName': rand})
dl_modified = self.zc.get_distribution_list(dl_req)
self.assertEqual(dl_modified.property('displayName'), rand)
# GET
dl_got = self.zc.get_distribution_list(dl_req)
self.assertIsInstance(dl_got, DistributionList)
self.assertEqual(dl_got, dl_list[1])
# RENAME
new_dl = self.zc.rename_distribution_list(
dl_got,
'[email protected]')
self.assertEqual(new_dl.name, '[email protected]')
# ALIAS
alias_name = '[email protected]'
self.zc.add_distribution_list_alias(new_dl, alias_name)
new_dl_got = self.zc.get_distribution_list(
DistributionList(name=new_dl.name)
)
if alias_name in new_dl_got.property('zimbraMailAlias'):
alias_present = True
else:
alias_present = False
self.assertTrue(alias_present)
self.zc.remove_distribution_list_alias(new_dl, alias_name)
new_dl_got = self.zc.get_distribution_list(
DistributionList(name=new_dl.name)
)
if alias_name in new_dl_got.property('zimbraMailAlias'):
alias_present = True
else:
alias_present = False
self.assertFalse(alias_present)
# DELETE
self.zc.delete_distribution_list(new_dl)
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_distribution_list(dl)
def test_delete_distribution_list_by_name(self):
name = self.TEST_DL_NAME
dl_req = DistributionList(name=name)
dl_full = self.zc.create_distribution_list(name)
self.zc.delete_distribution_list(dl_req)
# List with such a name does not exist
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_distribution_list(dl_req)
# List with such an ID does not exist
with self.assertRaises(ZimbraSoapServerError):
self.zc.get_distribution_list(dl_full)
def test_get_account(self):
account = self.zc.get_account(Account(name=self.LAMBDA_USER))
self.assertIsInstance(account, Account)
self.assertEqual(account.name, self.LAMBDA_USER)
# Now grab it by ID
account_by_id = self.zc.get_account(Account(id=account.id))
self.assertIsInstance(account_by_id, Account)
self.assertEqual(account_by_id.name, self.LAMBDA_USER)
self.assertEqual(account_by_id.id, account.id)
def test_get_account_cos(self):
cos = self.zc.get_account_cos(Account(name=self.LAMBDA_USER))
self.assertIsInstance(cos, COS)
self.assertEqual(cos.name, 'default')
assertRegex(self, cos.id, r'[\w\-]{36}')
def test_mk_auth_token_succeeds(self):
user = Account(name='admin@{0}'.format(self.DOMAIN1))
tk = self.zc.mk_auth_token(user, 0)
self.assertIsInstance(tk, str)
def test_mk_auth_token_fails_if_no_key(self):
user = Account(name='admin@{0}'.format(self.DOMAIN2))
with self.assertRaises(DomainHasNoPreAuthKey):
self.zc.mk_auth_token(user, 0)
def test_admin_get_logged_in_by(self):
new_zc = ZimbraAdminClient(self.HOST, self.ADMIN_PORT)
new_zc.get_logged_in_by(self.ADMIN_LOGIN, self.zc)
self.assertTrue(new_zc._session.is_logged_in())
self.assertTrue(new_zc.is_session_valid())
def test_admin_get_account_authToken1(self):
""" From an existing account """
authToken, lifetime = self.zc.get_account_authToken(
account=Account(name=self.LAMBDA_USER)
)
new_zc = ZimbraAccountClient(self.HOST)
new_zc.login_with_authToken(authToken, lifetime)
self.assertTrue(new_zc._session.is_logged_in())
self.assertTrue(new_zc.is_session_valid())
def test_admin_get_account_authToken2(self):
""" From an account name """
authToken, lifetime = self.zc.get_account_authToken(
account_name=self.LAMBDA_USER
)
new_zc = ZimbraAccountClient(self.HOST)
new_zc.login_with_authToken(authToken, lifetime)
self.assertTrue(new_zc._session.is_logged_in())
self.assertTrue(new_zc.is_session_valid())
def test_get_modify_config(self):
attr = 'zimbraMtaMaxMessageSize'
new_value = '42'
ori_value = '10240000'
self.assertEqual(
self.zc.get_config(attr)[attr],
ori_value
)
modified_conf = self.zc.modify_config(attr, new_value)
self.assertEqual(modified_conf[attr], new_value)
# Undo
self.zc.modify_config(attr, ori_value)
class ZimbraAPISessionTests(unittest.TestCase):
def setUp(self):
self.HOST = TEST_CONF['host']
self.ADMIN_PORT = TEST_CONF['admin_port']
self.ADMIN_LOGIN = TEST_CONF['admin_login']
self.ADMIN_PASSWORD = TEST_CONF['admin_password']
self.cli = ZimbraAdminClient(self.HOST, self.ADMIN_PORT)
self.session = ZimbraAPISession(self.cli)
def testInit(self):
self.session = ZimbraAPISession(self.cli)
self.assertFalse(self.session.is_logged_in())
def testSuccessfullLogin(self):
self.session.login(self.ADMIN_LOGIN, self.ADMIN_PASSWORD)
self.assertTrue(self.session.is_logged_in())
def testGoodSessionValidates(self):
self.session.login(self.ADMIN_LOGIN, self.ADMIN_PASSWORD)
self.assertTrue(self.session.is_session_valid())
def testBadSessionFails(self):
self.session.login(self.ADMIN_LOGIN, self.ADMIN_PASSWORD)
self.session.authToken = '42'
self.assertFalse(self.session.is_session_valid())
|
|
# Using the magic encoding
# -*- coding: utf-8 -*-
from .. import msg
import xml.etree.ElementTree as ET
import re
from ..docelements import DocElement, DocGroup
import os
def _update_globals(kids, xmlglobals):
"""Updates the specified 'xmlglobals' dictionary with the specific
*and* supported global tag definitions.
"""
for child in kids:
key = child.tag.lower()
if key != "defaults":
if key not in xmlglobals:
xmlglobals[key] = {}
if "identifier" in child.attrib:
xmlglobals[key][child.attrib["identifier"]] = child
elif "name" in child.attrib:
xmlglobals[key][child.attrib["name"].lower()] = child
else:
if "defaults" not in xmlglobals:
xmlglobals["defaults"] = {}
xmlglobals["defaults"].update(child.attrib)
def _set_global_defaults(xmlglobals):
"""Sets the default attributes on tags that were specified in <global>
tags in the XML file."""
for key, val in xmlglobals.items():
if key != "defaults":
for name, tag in val.items():
_update_from_globals(tag, xmlglobals, None)
def _handle_ignores(testtag):
"""Checks if the specified test tag has attribute "ignores"; if it
does, a <global ignore="true" /> tag is created for each variable
name in the list.
"""
if "ignores" in testtag.attrib:
from xml.etree.ElementTree import Element
for varname in re.split("[\s,]+", testtag.attrib["ignores"]):
#For ignored variables, the order does not matter. However,
#if the user included the caret, just remove it.
if varname[0] == '^':
varname = varname[1::]
e = Element("global", {"name": varname, "ignore": "true"})
testtag.append(e)
limited = ["staging", "folder", "when"]
"""A set of attributes in the global defaults that only copy to tags that can
make use of them."""
monitored = {
"group": ["staging"],
"output": ["folder"],
"value": ["folder"],
"input": ["folder"],
"target": ["when"]
}
"""Dictionary of fortpy testing tags that have their attributes monitored.
The attributes listed in 'limited' will only be copied to those tags that
explicitly require them in this dictionary."""
def _update_from_globals(xtag, xmlglobals, container):
"""Updates the attributes of the specified XML tag using the globals
dictionary for those tags that are supported.
"""
#We also need to perform a globals update on each of the *original* child tags
#in the current tag being updated.
for child in list(xtag):
_update_from_globals(child, xmlglobals, xtag)
#If the tag doesn't have a value that is in the global defaults
#then we automatically copy it over.
if "defaults" in xmlglobals:
#Iterate over the attributes that we have default values for.
for key in xmlglobals["defaults"]:
if key not in xtag.attrib:
overwrite = True
#If the key is a limited one, then it only gets copied to
#tags that we know can actually use the attribute.
if key in limited:
overwrite = (xtag.tag in monitored and key in monitored[xtag.tag])
if overwrite:
xtag.attrib[key] = xmlglobals["defaults"][key]
#For the global tags, we just want to update using the default values
#They aren't allowed to reference other globals.
if container is None:
return
def update_tag(xtag, gtag):
"""Updates the missing attributes in xtag with those from gtag."""
for attr, value in gtag.attr.items():
if attr not in xtag.attrib:
xtag.attr[attr] = value
def resolve_global(xtag, xmlglobals, xtagname, gtagname, attrib, container, op, gassumed):
"""Adds the child in xmlglobals with the relevant id to the tag.
:arg xtag: the tag with the reference to a global tag.
:arg xmlglobals: the dictionary of global tags extracted from <global>.
:arg tagname: the name of the global tag to look for.
:arg attrib: the name of the attribute in 'xtag' that references the
global tag to process.
:arg container: for "op"=="append", the XMLElement to append the global
tag to as a subelement.
:arg op: one of ["append", "update"]. For "append" the global tag is appended
as a subelement to 'container'. For "update", the attributes of the global
tag are copied to 'xtag'.
:arg gassumed: when True, the 'attrib' being examined is assumed to reference
only tags in <globals>, so no 'global:' prefix is required before attributes.
"""
if attrib not in xtag.attrib:
return False
result = False
if xtag.tag == xtagname and (gassumed or "global:" in xtag.attrib[attrib]):
#We allow a ';'-separated list of global tag names, all of
#which are appended to the child's subelements list.
i = -1
for sattr in xtag.attrib[attrib].split(";"):
gid = sattr.strip().split(":")[1] if ":" in sattr else sattr.strip()
#Since ordering matters, if the id starts with ^, we insert it at
#the beginning of the subelements list instead of appending
if gid[0] == '^':
insert = True
gid = gid[1::]
i += 1
else:
insert = False
if gtagname in xmlglobals and gid.lower() in xmlglobals[gtagname]:
gtag = xmlglobals[gtagname][gid.lower()]
if op == "append":
if insert:
container.insert(i, gtag)
else:
container.append(gtag)
elif op == "update":
update_tag(xtag, gtag)
result = True
else:
from fortpy.msg import warn
wstr = 'The global tag of type <{}> for <{} {}="{}"> does not exist.'
warn(wstr.format(gtagname, xtagname, attrib, xtag.attrib[attrib]))
#Overwrite the name of the tag to not have the 'global:' in it anymore.
xtag.attrib[attrib] = xtag.attrib[attrib].replace("global:", "")
return result
#Next, we just need to check the attributes of the tag that are allowed
#to point to global tag children.
i = 0
#The order of these is xtagname, gtagname, xtagattrib, container, op, gassumed
tags = [("assignment", "value", "value", xtag, "append", False),
("target", "output", "compareto", container, "append", False),
("test", "input", "inputs", xtag, "append", True),
("test", "global", "globals", xtag, "append", True),
("test", "assignment", "assignments", xtag, "append", True),
("test", "mapping", "mappings", xtag, "append", True),
("group", "input", "inputs", xtag, "append", True),
("group", "global", "globals", xtag, "append", True),
("group", "assignment", "assignments", xtag, "append", True),
("group", "mapping", "mappings", xtag, "append", True)]
while i < len(tags):
resolve_global(xtag, xmlglobals, *tags[i])
i += 1
if xtag.tag in ["test", "group"]:
_handle_ignores(xtag)
if xtag.tag == "auto":
_expand_autotag(xtag, container)
def _expand_autotag(atag, container):
"""Expands the contents of the specified auto tag within its parent container.
"""
if atag.tag != "auto":
return
if "names" in atag.attrib:
i = -1
for name in re.split("[\s,]+", atag.attrib["names"]):
if name[0] == '^':
name = name[1::]
insert = True
i += 1
else:
insert = False
for child in atag:
dupe = child.copy()
for attr, value in dupe.items():
dupe.attrib[attr] = value.replace("$", name)
if insert:
container.insert(i, dupe)
else:
container.append(dupe)
else:
from fortpy.msg import warn
warn("'names' is a required attribute of the <auto> tag.")
class DocStringParser(object):
"""Parses the XML tags from the custom docstrings in our fortran code."""
def __init__(self):
self.setup_regex()
def setup_regex(self):
"""Sets up the patterns and regex objects for parsing the docstrings."""
#Regex for grabbing out valid XML tags that represent known docstrings that we can work with.
self.keywords = [ "summary", "usage", "errors", "member", "group", "local",
"comments", "parameter" ]
#Regex for extracting the contents of docstrings minus the !! and any leading spaces.
self._RX_DOCS = "^\s*!!(?P<docstring>.+?)$"
self.RE_DOCS = re.compile(self._RX_DOCS, re.M)
#Regex for handling cross references in the documentation
self._RX_REFS = r"@CREF\[(?P<reference>[^\]]+)\]"
self.RE_REFS = re.compile(self._RX_REFS)
#Regex to match first lines of declarations for code elements that can be
#decorated by docstrings.
self._RX_DECOR = (r"((?P<type>character|real|type|logical|integer|complex)?"
r"(?P<kind>\([a-z0-9_]+\))?)?(,?(?P<modifiers>[^\n]+?))?"
r"\s*(?P<codetype>subroutine|function|type|module|interface)\s+(?P<name>[^(]+)")
self.RE_DECOR = re.compile(self._RX_DECOR, re.I)
#Regex for getting the docstrings decorating one or more modules in a code file,
#Since they aren't contained inside any other code element, we can't just use
#the normal docblocks routines.
self._RX_MODDOCS = (r"^(?P<docstring>\s*!!.+?)\n\s*module\s+(?P<name>[A-Za-z0-9_]+)"
".+?end\s+module(\s+(?P=name))?")
self.RE_MODDOCS = re.compile(self._RX_MODDOCS, re.DOTALL | re.I)
def parse_docs(self, string, container = None):
"""Parses the docstrings from the specified string that is the contents of container.
Returns a dictionary with keys as parent.code_element_name and the values
a list of XML elements for corresponding docstrings.
:arg container: the instance of the element who owns the string.
"""
from fortpy.utility import XML
result = {}
if container is None:
#We are working with the code file at the module level. Extract the module
#docstrings and XML and return the dictionary with module names as keys.
for module in self.RE_MODDOCS.finditer(string):
docstring = re.sub("\s*!!", "", module.group("docstring"))
doctext = "<doc>{}</doc>".format(re.sub("\n", "\s", docstring))
try:
docs = XML(doctext)
#Get the name of the module to use as the key and then add the list
#of XML docstrings to the result.
key = module.group("name")
if not key in result:
result[key] = [list(docs), module.start(), module.end()]
else:
result[key][0].extend(list(docs))
except ET.ParseError:
msg.err(doctext)
else:
#This is the text content of a code element that was buried inside of the module.
#Get all the docblocks and the items they decorate from this parent.
result = self._parse_docblocks(string, container)
return result
def _process_docgroup(self, group, code_el, add=True):
"""Explodes the group members into a list; adds the group to the
specified code element and updates the group value for each
of the docstring elements in the group.
:arg add: when true, docgroups must be unique in the code element;
otherwise, existing groups are overwritten."""
if group.name in code_el.groups and add:
msg.warn("duplicate group names in code element {}".format(code_el.name))
else:
code_el.groups[group.name] = group
kids = self.to_doc(list(group.xml), group.decorates)
for child in kids:
child.group = group.name
return kids
def process_execdocs(self, docs, anexec, key, add=True):
"""Associates parameter documentation with parameters for the executable
and any remaining docs with the executable itself.
- key: the module.executable identifier for the function or subroutine.
"""
#Paramdocs has a list of docstrings for summary, usage, parameters, etc.
#check which belong to parameters and associate them, otherwise append
#them to the executable.
for doc in docs:
if doc.doctype == "parameter":
if doc.pointsto is not None and doc.pointsto in anexec.parameters:
if add:
anexec.parameters[doc.pointsto].docstring.append(doc)
else:
anexec.parameters[doc.pointsto].overwrite_docs(doc)
else:
#the parameter docstring is orphaned, give a warning.
wmsg = ("the docstring for parameter '{}' had no corresponding "
"parameter in the executable definition for '{}' ({}).")
msg.warn(wmsg.format(doc.pointsto, anexec.name, anexec.module.filepath))
elif doc.doctype == "group":
if "name" not in doc.attributes:
doc.attributes["name"] = "default"
kids = self._process_docgroup(doc, anexec)
if add:
anexec.docstring.extend(kids)
else:
for kid in kids:
anexec.overwrite_docs(kid)
else:
#The docstring must be for the executable
if add:
anexec.docstring.append(doc)
else:
anexec.overwrite_docs(doc)
def process_embedded(self, xlist, anexec, add=True):
"""Processes the specified xml list and executable to link *embedded*
types and executables to their docstrings.
:arg xlist: a list of XML elements returned by parse_docs().
:arg add: when true, docstrings are only appended, never overwritten.
"""
#Keep track of the changes that took place in the lengths of the
#docstrings that got added/updated on the elements children.
delta = 0
for t in anexec.types:
key = "{}.{}".format(anexec.name, t)
if key in xlist:
docs = self.to_doc(xlist[key][0], t)
self.process_memberdocs(docs, anexec.types[t], add)
anexec.types[t].docstart = xlist[key][1]
delta += xlist[key][2] - anexec.types[t].docend
anexec.types[t].docend = xlist[key][2]
for iexec in anexec.executables:
key = "{}.{}".format(anexec.name, iexec)
if key in xlist:
docs = self.to_doc(xlist[key][0], t)
self.process_memberdocs(docs, anexec.executables[iexec], add)
anexec.executables[iexec].docstart = xlist[key][1]
delta += xlist[key][2] - anexec.executables[iexec].docend
anexec.executables[iexec].docend = xlist[key][2]
if not add:
return delta
def process_memberdocs(self, docs, codeEl, add=True):
"""Associates member type DocElements with their corresponding members
in the specified code element. The element must have a dictionary of
members already."""
#Now we need to associate the members with their docstrings
#Some of the members may be buried inside a group tag and
#need to be handled separately.
remainingdocs = []
expandeddocs = []
#Process any groups that are in the doc list.
for doc in docs:
if isinstance(doc, DocGroup):
kids = self._process_docgroup(doc, codeEl, add)
expandeddocs.extend(kids)
else:
expandeddocs.append(doc)
for doc in expandeddocs:
#Process the docstring, if it doesn't belong to a member
#we will add it to the list of unassigned docstrings,
#these most likely point to type declarations.
if not self._process_docstrings(doc, codeEl.members, add):
remainingdocs.append(doc)
return remainingdocs
def _process_docstrings(self, doc, members, add=True):
"""Adds the docstrings from the list of DocElements to their
respective members.
Returns true if the doc element belonged to a member."""
if ((doc.doctype == "member" or doc.doctype == "local") and
doc.pointsto is not None and
doc.pointsto in members):
if add:
members[doc.pointsto].docstring.append(doc)
else:
members[doc.pointsto].overwrite_docs(doc)
return True
else:
return False
def to_doc(self, xmllist, decorates):
"""Converts the specified xml list to a list of docstring elements."""
result = []
for xitem in xmllist:
if xitem.tag != "group":
#The docstring allows a single string to point to multiple
#names in a comma-separated list in the names attribute.
if "name" in list(xitem.keys()):
names = re.split("[\s,]+", xitem.get("name"))
for name in names:
#Once we have created the DocElement, we need to override
#its name attribute (which will have the comma-separated
#list) with the single name
docel = DocElement(xitem, self, decorates)
docel.attributes["name"] = name
result.append(docel)
else:
#This docstring doesn't have a name attribute, just add it
result.append(DocElement(xitem, self, decorates))
else:
docel = DocGroup(xitem, decorates)
result.append(docel)
return result
def _parse_docblocks(self, string, container):
"""Parses all the docstrings out of the specified string.
Returns a dictionary of docstrings with the key as parent.code_element_name
and the value a list of XML elements that contain docstrings.
"""
#The easiest way to do this is to look at one line at a time and see if it is a docstring
#When we find a group of docstrings that suddenly ends, the next item is the code element
#that they were decorating (which may or may not be pertinent).
from fortpy.utility import XML
current = []
docblocks = {}
docstart = 0
for line in string.split("\n"):
match = self.RE_DOCS.match(line)
if match is not None:
current.append(match.group("docstring"))
if len(current) == 1:
#This was the first docstring of a new documentation block.
docend = docstart + len(line) + 1 # +1 for \n removed by split()
else:
#We already have some docstrings in the block, update start/end
docend += len(line) + 1
else:
#See if we were previously working on a docstring block or not.
if len(current) > 0:
#Save all the current docstrings in the blocks dictionary
#under the name of the code element in this line.
key = self._parse_docline(line, container)
#If the docblock has multiple XML tags at the same depth, the XML
#parser will scream. Wrap everything in a doc tag.
doctext = "<doc>{}</doc>".format(" ".join(current))
try:
#Let the docstart and docend *always* be absolute character references.
tabsstart, tabsend = container.module.absolute_charindex(string, docstart, docend-len(line))
emsg="module '{0}' docstring starting @ {1[0]}:{1[1]}"
emsg=emsg.format(container.module.name, container.module.linenum(tabsstart))
docs = XML(doctext, emsg)
if not key in docblocks:
absstart, absend = tabsstart, tabsend
docblocks[key] = [list(docs), absstart, absend]
else:
docblocks[key][0].extend(list(docs))
except ET.ParseError as err:
msg.err(err.msg)
#Reset the list of current docstrings
current = []
docstart = docend + len(line) + 1
else:
#We need to keep track of the line lengths for docstart/end.
docstart += len(line) + 1
return docblocks
def _parse_docline(self, line, container):
"""Parses a single line of code following a docblock to see if
it as a valid code element that can be decorated. If so, return
the name of the code element."""
match = self.RE_DECOR.match(line)
if match is not None:
return "{}.{}".format(container.name, match.group("name"))
else:
return container.name
def parsexml(self, xmlstring, modules, source=None):
"""Parses the docstrings out of the specified xml file.
:arg source: the path to the file from which the XML string was extracted.
"""
result = {}
from fortpy.utility import XML_fromstring
xmlroot = XML_fromstring(xmlstring, source)
if xmlroot.tag == "fortpy" and "mode" in xmlroot.attrib and \
xmlroot.attrib["mode"] == "docstring":
#First, cycle through the kids to find the <global> tag (if any
#exist). It's children will apply to any of the other tags we find
#and we will have to update their attributes accordingly.
xmlglobals = {}
for child in xmlroot.iterfind("globals"):
_update_globals(list(child), xmlglobals)
_set_global_defaults(xmlglobals)
#We fill the dictionary with decorates names as keys and lists
#of the xml docstring elements as values.
for child in xmlroot:
if child.tag == "globals":
continue
xmltags = []
if child.tag == "decorates" and "name" in child.attrib:
decorates = child.attrib["name"]
xmltags.extend(list(child))
elif "decorates" in child.attrib:
decorates = child.attrib["decorates"]
xmltags.append(child)
for xtag in xmltags:
_update_from_globals(xtag, xmlglobals, child)
if decorates in result:
result[decorates].extend(xmltags)
else:
result[decorates] = xmltags
#Loop through all the docstrings we found and team them up with
#their respective module members.
self._xml_update_modules(result, modules)
def _xml_update_modules(self, xmldict, modules):
"""Updates the docstrings in the specified modules by looking for
docstrings in the xmldict."""
for kdecor in xmldict:
modname, memname = kdecor.split(".")
if modname in modules:
module = modules[modname]
#We only need to check the members, types and executables
memname = memname.lower()
if memname in module.members:
docs = self.to_doc(xmldict[kdecor], modname)
self.process_memberdocs(docs, module)
elif memname in module.types:
member = module.types[memname]
docs = self.to_doc(xmldict[kdecor], memname)
member.docstring.extend(docs)
elif memname in module.executables:
member = module.executables[memname]
docs = self.to_doc(xmldict[kdecor], memname)
self.process_execdocs(docs, member, kdecor)
else:
msg.warn("orphaned docstring. No member {} in module {}.".format(
memname, modname))
else:
msg.warn("orphaned docstring from XML docfile for {}".format(kdecor))
def rt_update_module(self, xmldict, module):
"""Updates the members, executables and types in the specified module
to have the latest docstring information from the xmldict.
"""
#This keeps track of how many character were added/removed by
#updating the docstrings in xmldict.
delta = 0
for kdecor in xmldict:
if "." in kdecor:
modname, memname = kdecor.split(".")
else:
modname, memname = module.name, None
if module.name == modname:
#This tag is relevant to the specified module. Continue
xlist, docstart, docend = xmldict[kdecor]
#We only need to check the members, types and executables
#For executables and types, we need to update the docstart and
#docend attributes since their docstrings must come as a single
#block immediately preceding the signature, so that our values
#from the updater will be correct.
if memname in module.types:
member = module.types[memname]
docs = self.to_doc(xlist, memname)
member.docstring = docs
delta += self._rt_update_docindices(member, docstart, docend)
elif memname in module.executables:
member = module.executables[memname]
docs = self.to_doc(xlist, memname)
self.process_execdocs(docs, member, kdecor, False)
delta += self._rt_update_docindices(member, docstart, docend)
else:
#Since it didn't point to anything else, it must be for the
#members of the module.
docs = self.to_doc(xlist, modname)
self.process_memberdocs(docs, module, False)
return delta
def _rt_update_docindices(self, element, docstart, docend):
"""Updates the docstart, docend, start and end attributes for the
specified element using the new limits for the docstring."""
#see how many characters have to be added/removed from the end
#of the current doc limits.
delta = element.docend - docend
element.docstart = docstart
element.docend = docend
element.start += delta
element.end += delta
return delta
|
|
#!/usr/bin/env python
"""
Viewer Sync
===========
Contains the functions required for two views to be kept in sync.
## Public Functions
remove_callback()
Removes callback from all selected viewers and all viewers linked.
setup_sync()
Sets up a viewerSync between a group of Viewer nodes.
sync_viewers()
Syncs all the given viewers to the settings on the caller node.
## License
The MIT License (MIT)
iconPanel
Copyright (c) 2011-2014 Philippe Huberdeau and Sean Wallitsch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Imports
from ast import literal_eval
# Nuke Imports
try:
import nuke
except ImportError:
pass
# =============================================================================
# GLOBALS
# =============================================================================
# The specific text to display on the viewerSync knob for the listed
# viewer knob.
KNOB_TITLES = {
'channels': 'channels',
'cliptest': 'zebra-stripe',
'downrez': 'proxy settings',
'format_center': 'format center',
'gain': 'gain',
'gamma': 'gamma',
'masking_mode': 'masking mode',
'masking_ratio': 'masking ratio',
'overscan': 'overscan',
'ignore_pixel_aspect': 'ignore pixel aspect ratio',
'input_number': 'viewed input',
'input_process': 'input process on/off',
'input_process_node': 'input process node',
'inputs': 'input nodes',
'rgb_only': 'LUT applies to rgb channels only',
'roi': 'roi',
'safe_zone': 'safe zone',
'show_overscan': 'show overscan',
'viewerInputOrder': 'input process order',
'viewerProcess': 'LUT',
'zoom_lock': 'zoom lock'
}
# These are tooltips for the viewerSync knobs, with the keys being the normal
# knob the viewerSync knob refers to.
KNOB_TOOLTIPS = {
'channels': 'Sync the layers and alpha channel to display in the viewers. '
'The "display style" is not synced.',
'cliptest': 'Sync if zebra-striping is enabled or not between viewers.',
'downrez': 'Sync the scale down factor for proxy mode. Proxy mode '
'activation is always synced.',
'format_center': 'Sync if a crosshair is displayed at the center of the '
'viewer window.',
'gain': 'Sync the gain slider between viewers.',
'gamma': 'Sync the gamma slider between viewers.',
'masking_mode': 'Sync the mask style between viewers.',
'masking_ratio': 'Sync the mask ratio selection between viewers.',
'overscan': 'Sync the amount of overscan displayed between viewers.',
'ignore_pixel_aspect': 'If selected all viewers will either show square '
'pixels or the pixel aspect ratio denoted by '
'the format.',
'input_number': 'Syncs which input number is being viewed between all '
'viewers. This does not mean that all viewers are '
'viewing the same nodes, just that all viewers are '
'viewing input 1, etc.',
'input_process': 'If selected all viewers will either have the input '
'process on, or off.',
'input_process_node': 'Syncs what node is used as the input process '
'between all viewers.',
'inputs': 'If selected, all viewers will point to the same nodes in the '
'node graph.',
'rgb_only': 'Syncs the "apply LUT to color channels only" knob, which '
'indicates that the viewer will attempt to apply the lut to '
'only the color channels. This only works with knobs that '
'have an "rgb_only" knob, which is few.',
'roi': 'Syncs the ROI window between all viewers. ROI needs to be manually '
'activated for all viewers.',
'safe_zone': 'Syncs the safe zone overlays between all viewers.',
'show_overscan': 'If selected, all viewers will either show overscan or '
'not show overscan.',
'viewerInputOrder': 'Syncs if the input process occurs before or after '
'the viewer process between all viewers.',
'viewerProcess': 'Syncs the LUT between all viewers.',
'zoom_lock': 'If selected, the zoom lock will apply to all viewers or '
'none.'
}
# The default values for a fresh viewerSync. Ideally these would be read from
# a savable config file.
SYNC_DEFAULTS = {
'channels': False,
'cliptest': True,
'downrez': True,
'format_center': True,
'gain': False,
'gamma': False,
'masking_mode': True,
'masking_ratio': True,
'overscan': True,
'ignore_pixel_aspect': True,
'input_number': True,
'input_process': True,
'input_process_node': True,
'inputs': False,
'rgb_only': True,
'roi': True,
'safe_zone': True,
'show_overscan': True,
'viewerInputOrder': True,
'viewerProcess': True,
'zoom_lock': True
}
# List all viewerSync specific knobs.
# These knobs contain the bool values specifying if a normal viewer knob
# should be synced or not.
VIEWER_SYNC_KNOBS = [
'vs_{knob}'.format(knob=sync_knob) for sync_knob in SYNC_DEFAULTS.keys()
]
# =============================================================================
# EXPORTS
# =============================================================================
__all__ = [
'remove_callbacks',
'setup_sync',
'sync_viewers',
]
# =============================================================================
# PRIVATE FUNCTIONS
# =============================================================================
def _add_sync_knobs(viewer):
"""Adds the sync option knobs to the given given viewer node.
If this gets called on a node that already has viewerSync knobs, those
knobs will sync instead of being added again.
Args:
viewer : (<nuke.nodes.Viewer>)
The Viewer node to add viewerSync knobs to.
Returns:
None
Raises:
N/A
"""
if 'vs_options' in viewer.knobs():
# This node already has a settings pane- we'll reset the settings to
# default.
for knob in SYNC_DEFAULTS:
viewer['vs_' + knob].setValue(SYNC_DEFAULTS[knob])
return
tab = nuke.Tab_Knob('vs_options', 'Viewer Sync')
viewer.addKnob(tab)
def add_knobs(knob_list):
"""For every knob in the list, adds that knob to the current tab"""
for knob in knob_list:
new_knob = nuke.Boolean_Knob('vs_' + knob, KNOB_TITLES[knob])
new_knob.setTooltip(KNOB_TOOLTIPS[knob])
new_knob.setValue(SYNC_DEFAULTS[knob])
new_knob.setFlag(nuke.STARTLINE)
viewer.addKnob(new_knob)
input_options = nuke.Text_Knob('vs_input_options', 'Input Options')
viewer.addKnob(input_options)
add_knobs(['inputs', 'input_number', 'channels'])
display_options = nuke.Text_Knob('vs_display_options', 'Display Options')
viewer.addKnob(display_options)
add_knobs(
[
'viewerProcess', 'rgb_only', 'input_process',
'input_process_node', 'viewerInputOrder', 'gain', 'gamma',
'ignore_pixel_aspect', 'zoom_lock', 'show_overscan',
'overscan'
]
)
overlay_options = nuke.Text_Knob('vs_overlay_options', 'Overlay Options')
viewer.addKnob(overlay_options)
add_knobs(
[
'masking_mode', 'masking_ratio', 'safe_zone',
'format_center', 'cliptest'
]
)
process_options = nuke.Text_Knob('vs_process_options', 'Processing Options')
viewer.addKnob(process_options)
add_knobs(['downrez', 'roi'])
# =============================================================================
def _extract_viewer_list(viewer):
"""Extracts a list of Viewer nodes from a callback.
Searches a viewer node for a viewerSync callback, and extracts the
value of the `viewers` arg.
Args:
viewer : (<nuke.nodes.Viewer>)
The viewer node with the callback attached.
Returns:
[<nuke.nodes.Viewer>]
A list of viewer nodes that were listed in the callback arg.
Raises:
ValueError
If the callback found on the viewer is present, but not for
viewerSync.
"""
callback = viewer['knobChanged'].value()
if not callback:
return []
elif 'viewerSync' not in callback:
raise ValueError("Not a viewerSync'd viewer.")
callback = callback.replace('viewerSync.sync_viewers(', '')[:-1]
linked_viewers = literal_eval(callback)
viewer_nodes = [
nuke.toNode(node) for node in linked_viewers if nuke.toNode(node)
]
return viewer_nodes
# =============================================================================
def _remove_knobs(viewer):
"""Removes all viewerSync knobs from a viewer.
Since this function only deletes knobs that begin with `vs_`, it should
not raise any exceptions due to missing nodes. One should be able
to run this on a Viewer- or any node for that matter- with no viewerSync
knobs on it whatsoever and not raise any errors.
Args:
viewer : (<nuke.nodes.Viewer>)
The viewer node with the viewerSync knobs on it.
Returns:
None
Raises:
N/A
"""
for knob in viewer.knobs():
if knob.startswith('vs_'):
viewer.removeKnob(viewer[knob])
# It's unlikely that the tab knob was deleted at first.
if 'vs_options' in viewer.knobs():
viewer.removeKnob(viewer['vs_options'])
# =============================================================================
def _sync_knob(source, targets, knob):
"""Syncs a knob setting from the source to the target.
Args:
source : (<nuke.Node>)
Any node that has a knob with a value we want to sync from.
targets : [<nuke.Node>]
A list of nodes that should have the same knob as source, that we
want to have the same value as source. The call to these nodes
and knobs is protected by a try/except, so even if the knob is
missing it should resolve without error.
knob : (str)
The knob name to match between the source and the targets.
Returns:
None
Raises:
N/A
"""
for target in targets:
try:
target[knob].setValue(source[knob].value())
except NameError:
# Knob doesn't exist on target.
continue
# =============================================================================
def _set_callback(node, viewers):
"""Sets the callback on the node with the viewers and knobs args.
Args:
node : (<nuke.nodes.Viewer>)
The viewer node we're going to set the callback on.
viewers : [<nuke.nodes.Viewer>]
The viewers the callback should reference.
Returns:
None
Raises:
N/A
"""
# Create a copy of list, as we're poppin' the `node` if found.
viewers = list(viewers)
# Remove our caller from the nodes to update if present.
if node in viewers:
viewers.pop(viewers.index(node))
# Get the list of node names to populate the arg with
viewer_names = [viewer.fullName() for viewer in viewers]
node['knobChanged'].setValue(
'viewerSync.sync_viewers({viewers})'.format(
viewers=viewer_names
)
)
# =============================================================================
# PUBLIC FUNCTIONS
# =============================================================================
def remove_callbacks():
"""Removes callback from all selected viewers and all viewers linked.
Checks to make sure that the callback present is a viewerSync callback
before we remove the callback, this prevents us from interfering with
another tool.
Args:
N/A
Returns:
None
Raises:
N/A
"""
viewers = nuke.selectedNodes('Viewer')
if not viewers:
viewers = nuke.allNodes('Viewer')
else:
extra_viewers = [] # Viewers that weren't in the selected group.
for viewer in viewers:
try:
linked_viewers = _extract_viewer_list(viewer)
except ValueError:
pass
else:
extra_viewers.extend(linked_viewers)
viewers.extend(extra_viewers)
for viewer in viewers:
if 'viewerSync' in viewer['knobChanged'].value():
viewer['knobChanged'].setValue('')
_remove_knobs(viewer)
# =============================================================================
def setup_sync():
"""Sets up a viewerSync between a group of Viewer nodes.
This sets up callbacks between either all selected viewers, or all viewers
at the current node graph level (as defined by what nuke.allNodes()
returns). It also sets up a series of settings on the Viewer nodes
themselves, controlling which knobs get synced between the Viewers.
Before setting up the viewers, we check the current knobChanged value.
Often that value is a viewerSync callback already. If so, we deactivate
that viewerSync group before continuing. If the callback is foreign (not
a viewerSync callback), we leave it alone and remove that Viewer from the
viewerSync group, rather than mess up another python process.
Args:
N/A
Returns:
None
Raises:
N/A
"""
# Grab all of our currently selected Viewer nodes:
viewers = nuke.selectedNodes('Viewer')
# We'll be using the viewer_levels dictionary to link viewers
# across the same DAG level, and avoid linking lone viewers on sub DAGs.
viewer_levels = {}
# If we find ANY viewers of the currently selected set having a
# knobChanged value, we'll turn off syncing on all the node's it's linked
# to. Safer that way.
remove_viewers = []
if viewers:
for viewer in viewers:
# In case Nuke returns us viewers split across different levels,
# we'll need to split them up by level so that we don't
# attempt to link those.
group = '.'.join(viewer.fullName().split('.')[:-1])
if not group:
group = 'root'
group_viewers = viewer_levels.get(group, [])
group_viewers.append(viewer)
viewer_levels[group] = group_viewers
else:
# No viewers were provided, so we'll just grab all the viewers
# at our current level
viewers = nuke.allNodes('Viewer')
viewer_levels['group'] = viewers
for level in viewer_levels.keys():
if len(viewer_levels[level]) <= 1:
# Nothing to sync, delete this level.
del viewer_levels[level]
bad_viewers = [] # List of viewers that have foreign callbacks
for viewers in viewer_levels.values():
for viewer in viewers:
try:
linked_viewers = _extract_viewer_list(viewer)
except ValueError:
bad_viewers.append(viewer)
else:
remove_viewers.extend(linked_viewers)
for rm_viewer in list(remove_viewers):
for viewers in viewer_levels.values():
if rm_viewer in viewers:
remove_viewers.remove(rm_viewer)
if rm_viewer in bad_viewers:
try:
remove_viewers.remove(rm_viewer)
except ValueError:
# We probably already removed this viewer above.
pass
if remove_viewers:
for viewer in set(remove_viewers):
viewer['knobChanged'].setValue('')
_remove_knobs(viewer)
for viewers in viewer_levels.values():
for viewer in bad_viewers:
if viewer in viewers:
viewers.remove(viewer)
for viewer in viewers:
_add_sync_knobs(viewer)
_set_callback(viewer, viewers)
# =============================================================================
def sync_viewers(viewers):
"""Syncs all the given viewers to the settings on the caller node.
This is the primary callback for viewerSync. Through it, the actual sync
happens. Before the callback executes, we compare the calling knob to a
list of knobs that viewerSync is concerned about. If the caller knob isn't
on the white-list, or the calling knob isn't currently set to sync (via the
caller node's settings) we return early.
Otherwise we sync the knob values for the knob that called us.
Args:
viewers : [str]
This list of absolute viewer names will be resolved into
<nuke.nodes.Viewer>s, which will be synced to the caller
node's knob values.
Returns:
None
Raises:
N/A
"""
caller = nuke.thisNode()
caller_knob = nuke.thisKnob().name()
# We need to check what knob is calling us first- if that knob isn't a
# syncing knob, we'll return.
if caller_knob not in ['inputChange', 'knobChanged']:
if caller_knob not in SYNC_DEFAULTS.keys() + VIEWER_SYNC_KNOBS:
return
if caller_knob not in VIEWER_SYNC_KNOBS:
if not caller['vs_{knob}'.format(knob=caller_knob)].value():
# Sync setting is false for this knob
return
# Grab our viewer nodes and remove any that have been deleted.
viewer_nodes = [
nuke.toNode(viewer) for viewer in viewers if nuke.toNode(viewer)
]
if caller_knob in VIEWER_SYNC_KNOBS:
# Sync setting and continue
_sync_knob(caller, viewer_nodes, caller_knob)
if caller[caller_knob].value():
caller_knob = caller_knob.replace('vs_', '')
if caller_knob in ['inputChange', 'inputs']:
if caller['vs_inputs'].value():
for viewer in viewer_nodes:
for i in xrange(caller.inputs()):
viewer.setInput(i, caller.input(i))
return
elif caller_knob == 'knobChanged':
knob_list = [
knob for knob in SYNC_DEFAULTS.keys() if SYNC_DEFAULTS[knob]
]
else:
knob_list = [caller_knob]
# Update remaining viewers to point at our current node.
for knob in knob_list:
_sync_knob(caller, viewer_nodes, knob)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A job server submitting portable pipelines as uber jars to Flink."""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os
import shutil
import tempfile
import time
import zipfile
from concurrent import futures
import grpc
import requests
from google.protobuf import json_format
from apache_beam.options import pipeline_options
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
_LOGGER = logging.getLogger(__name__)
class FlinkUberJarJobServer(abstract_job_service.AbstractJobServiceServicer):
"""A Job server which submits a self-contained Jar to a Flink cluster.
The jar contains the Beam pipeline definition, dependencies, and
the pipeline artifacts.
"""
def __init__(self, master_url, options):
super(FlinkUberJarJobServer, self).__init__()
self._master_url = master_url
self._executable_jar = (options.view_as(pipeline_options.FlinkRunnerOptions)
.flink_job_server_jar)
self._artifact_port = (options.view_as(pipeline_options.JobServerOptions)
.artifact_port)
self._temp_dir = tempfile.mkdtemp(prefix='apache-beam-flink')
def start(self):
return self
def stop(self):
pass
def executable_jar(self):
url = (self._executable_jar or
job_server.JavaJarJobServer.path_to_beam_jar(
'runners:flink:%s:job-server:shadowJar' % self.flink_version()))
return job_server.JavaJarJobServer.local_jar(url)
def flink_version(self):
full_version = requests.get(
'%s/v1/config' % self._master_url).json()['flink-version']
# Only return up to minor version.
return '.'.join(full_version.split('.')[:2])
def create_beam_job(self, job_id, job_name, pipeline, options):
return FlinkBeamJob(
self._master_url,
self.executable_jar(),
job_id,
job_name,
pipeline,
options,
artifact_port=self._artifact_port)
class FlinkBeamJob(abstract_job_service.AbstractBeamJob):
"""Runs a single Beam job on Flink by staging all contents into a Jar
and uploading it via the Flink Rest API."""
# These must agree with those defined in PortablePipelineJarUtils.java.
PIPELINE_FOLDER = 'BEAM-PIPELINE'
PIPELINE_MANIFEST = PIPELINE_FOLDER + '/pipeline-manifest.json'
# We only stage a single pipeline in the jar.
PIPELINE_NAME = 'pipeline'
PIPELINE_PATH = '/'.join(
[PIPELINE_FOLDER, PIPELINE_NAME, "pipeline.json"])
PIPELINE_OPTIONS_PATH = '/'.join(
[PIPELINE_FOLDER, PIPELINE_NAME, 'pipeline-options.json'])
ARTIFACT_MANIFEST_PATH = '/'.join(
[PIPELINE_FOLDER, PIPELINE_NAME, 'artifact-manifest.json'])
ARTIFACT_FOLDER = '/'.join([PIPELINE_FOLDER, PIPELINE_NAME, 'artifacts'])
def __init__(
self, master_url, executable_jar, job_id, job_name, pipeline, options,
artifact_port=0):
super(FlinkBeamJob, self).__init__(job_id, job_name, pipeline, options)
self._master_url = master_url
self._executable_jar = executable_jar
self._jar_uploaded = False
self._artifact_port = artifact_port
def prepare(self):
# Copy the executable jar, injecting the pipeline and options as resources.
with tempfile.NamedTemporaryFile(suffix='.jar') as tout:
self._jar = tout.name
shutil.copy(self._executable_jar, self._jar)
with zipfile.ZipFile(self._jar, 'a', compression=zipfile.ZIP_DEFLATED) as z:
with z.open(self.PIPELINE_PATH, 'w') as fout:
fout.write(json_format.MessageToJson(
self._pipeline_proto).encode('utf-8'))
with z.open(self.PIPELINE_OPTIONS_PATH, 'w') as fout:
fout.write(json_format.MessageToJson(
self._pipeline_options).encode('utf-8'))
with z.open(self.PIPELINE_MANIFEST, 'w') as fout:
fout.write(json.dumps(
{'defaultJobName': self.PIPELINE_NAME}).encode('utf-8'))
self._start_artifact_service(self._jar, self._artifact_port)
def _start_artifact_service(self, jar, requested_port):
self._artifact_staging_service = artifact_service.ZipFileArtifactService(
jar, self.ARTIFACT_FOLDER)
self._artifact_staging_server = grpc.server(futures.ThreadPoolExecutor())
port = self._artifact_staging_server.add_insecure_port(
'[::]:%s' % requested_port)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_staging_service, self._artifact_staging_server)
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='localhost:%d' % port)
self._artifact_staging_server.start()
_LOGGER.info('Artifact server started on port %s', port)
return port
def _stop_artifact_service(self):
self._artifact_staging_server.stop(1)
self._artifact_staging_service.close()
self._artifact_manifest_location = (
self._artifact_staging_service.retrieval_token(self._job_id))
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def request(self, method, path, expected_status=200, **kwargs):
response = method('%s/%s' % (self._master_url, path), **kwargs)
if response.status_code != expected_status:
raise RuntimeError(response.text)
if response.text:
return response.json()
def get(self, path, **kwargs):
return self.request(requests.get, path, **kwargs)
def post(self, path, **kwargs):
return self.request(requests.post, path, **kwargs)
def delete(self, path, **kwargs):
return self.request(requests.delete, path, **kwargs)
def run(self):
self._stop_artifact_service()
# Move the artifact manifest to the expected location.
with zipfile.ZipFile(self._jar, 'a', compression=zipfile.ZIP_DEFLATED) as z:
with z.open(self._artifact_manifest_location) as fin:
manifest_contents = fin.read()
with z.open(self.ARTIFACT_MANIFEST_PATH, 'w') as fout:
fout.write(manifest_contents)
# Upload the jar and start the job.
with open(self._jar, 'rb') as jar_file:
self._flink_jar_id = self.post(
'v1/jars/upload',
files={'jarfile': ('beam.jar', jar_file)})['filename'].split('/')[-1]
self._jar_uploaded = True
self._flink_job_id = self.post(
'v1/jars/%s/run' % self._flink_jar_id,
json={
'entryClass': 'org.apache.beam.runners.flink.FlinkPipelineRunner'
})['jobid']
os.unlink(self._jar)
_LOGGER.info('Started Flink job as %s' % self._flink_job_id)
def cancel(self):
self.post('v1/%s/stop' % self._flink_job_id, expected_status=202)
self.delete_jar()
def delete_jar(self):
if self._jar_uploaded:
self._jar_uploaded = False
try:
self.delete('v1/jars/%s' % self._flink_jar_id)
except Exception:
_LOGGER.info(
'Error deleting jar %s' % self._flink_jar_id, exc_info=True)
def _get_state(self):
"""Query flink to get the current state.
:return: tuple of int and Timestamp or None
timestamp will be None if the state has not changed since the last query.
"""
# For just getting the status, execution-result seems cheaper.
flink_status = self.get(
'v1/jobs/%s/execution-result' % self._flink_job_id)['status']['id']
if flink_status == 'COMPLETED':
flink_status = self.get('v1/jobs/%s' % self._flink_job_id)['state']
beam_state = {
'CREATED': beam_job_api_pb2.JobState.STARTING,
'RUNNING': beam_job_api_pb2.JobState.RUNNING,
'FAILING': beam_job_api_pb2.JobState.RUNNING,
'FAILED': beam_job_api_pb2.JobState.FAILED,
'CANCELLING': beam_job_api_pb2.JobState.CANCELLING,
'CANCELED': beam_job_api_pb2.JobState.CANCELLED,
'FINISHED': beam_job_api_pb2.JobState.DONE,
'RESTARTING': beam_job_api_pb2.JobState.RUNNING,
'SUSPENDED': beam_job_api_pb2.JobState.RUNNING,
'RECONCILING': beam_job_api_pb2.JobState.RUNNING,
'IN_PROGRESS': beam_job_api_pb2.JobState.RUNNING,
'COMPLETED': beam_job_api_pb2.JobState.DONE,
}.get(flink_status, beam_job_api_pb2.JobState.UNSPECIFIED)
if self.is_terminal_state(beam_state):
self.delete_jar()
# update the state history if it has changed
return beam_state, self.set_state(beam_state)
def get_state(self):
state, timestamp = self._get_state()
if timestamp is None:
# state has not changed since it was last checked: use previous timestamp
return super(FlinkBeamJob, self).get_state()
else:
return state, timestamp
def get_state_stream(self):
def _state_iter():
sleep_secs = 1.0
while True:
current_state, timestamp = self._get_state()
if timestamp is not None:
# non-None indicates that the state has changed
yield current_state, timestamp
sleep_secs = min(60, sleep_secs * 1.2)
time.sleep(sleep_secs)
for state, timestamp in self.with_state_history(_state_iter()):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
for state, timestamp in self.get_state_stream():
if self.is_terminal_state(state):
response = self.get('v1/jobs/%s/exceptions' % self._flink_job_id)
for ix, exc in enumerate(response['all-exceptions']):
yield beam_job_api_pb2.JobMessage(
message_id='message%d' % ix,
time=str(exc['timestamp']),
importance=
beam_job_api_pb2.JobMessage.MessageImportance.JOB_MESSAGE_ERROR,
message_text=exc['exception'])
yield state, timestamp
break
else:
yield state, timestamp
|
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake 'Build' implementation
#
# Core code for function execution and task handling in the
# BitBake build tools.
#
# Copyright (C) 2003, 2004 Chris Larson
#
# Based on Gentoo's portage.py.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
import os
import sys
import logging
import shlex
import glob
import time
import bb
import bb.msg
import bb.process
from contextlib import nested
from bb import event, utils
bblogger = logging.getLogger('BitBake')
logger = logging.getLogger('BitBake.Build')
NULL = open(os.devnull, 'r+')
# When we execute a python function we'd like certain things
# in all namespaces, hence we add them to __builtins__
# If we do not do this and use the exec globals, they will
# not be available to subfunctions.
__builtins__['bb'] = bb
__builtins__['os'] = os
class FuncFailed(Exception):
def __init__(self, name = None, logfile = None):
self.logfile = logfile
self.name = name
if name:
self.msg = 'Function failed: %s' % name
else:
self.msg = "Function failed"
def __str__(self):
if self.logfile and os.path.exists(self.logfile):
msg = ("%s (log file is located at %s)" %
(self.msg, self.logfile))
else:
msg = self.msg
return msg
class TaskBase(event.Event):
"""Base class for task events"""
def __init__(self, t, logfile, d):
self._task = t
self._package = d.getVar("PF", True)
self.taskfile = d.getVar("FILE", True)
self.taskname = self._task
self.logfile = logfile
self.time = time.time()
event.Event.__init__(self)
self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
def getTask(self):
return self._task
def setTask(self, task):
self._task = task
def getDisplayName(self):
return bb.event.getName(self)[4:]
task = property(getTask, setTask, None, "task property")
class TaskStarted(TaskBase):
"""Task execution started"""
def __init__(self, t, logfile, taskflags, d):
super(TaskStarted, self).__init__(t, logfile, d)
self.taskflags = taskflags
class TaskSucceeded(TaskBase):
"""Task execution completed"""
class TaskFailed(TaskBase):
"""Task execution failed"""
def __init__(self, task, logfile, metadata, errprinted = False):
self.errprinted = errprinted
super(TaskFailed, self).__init__(task, logfile, metadata)
class TaskFailedSilent(TaskBase):
"""Task execution failed (silently)"""
def getDisplayName(self):
# Don't need to tell the user it was silent
return "Failed"
class TaskInvalid(TaskBase):
def __init__(self, task, metadata):
super(TaskInvalid, self).__init__(task, None, metadata)
self._message = "No such task '%s'" % task
class LogTee(object):
def __init__(self, logger, outfile):
self.outfile = outfile
self.logger = logger
self.name = self.outfile.name
def write(self, string):
self.logger.plain(string)
self.outfile.write(string)
def __enter__(self):
self.outfile.__enter__()
return self
def __exit__(self, *excinfo):
self.outfile.__exit__(*excinfo)
def __repr__(self):
return '<LogTee {0}>'.format(self.name)
def flush(self):
self.outfile.flush()
def exec_func(func, d, dirs = None):
"""Execute an BB 'function'"""
body = d.getVar(func)
if not body:
if body is None:
logger.warn("Function %s doesn't exist", func)
return
flags = d.getVarFlags(func)
cleandirs = flags.get('cleandirs')
if cleandirs:
for cdir in d.expand(cleandirs).split():
bb.utils.remove(cdir, True)
bb.utils.mkdirhier(cdir)
if dirs is None:
dirs = flags.get('dirs')
if dirs:
dirs = d.expand(dirs).split()
if dirs:
for adir in dirs:
bb.utils.mkdirhier(adir)
adir = dirs[-1]
else:
adir = d.getVar('B', True)
bb.utils.mkdirhier(adir)
ispython = flags.get('python')
lockflag = flags.get('lockfiles')
if lockflag:
lockfiles = [f for f in d.expand(lockflag).split()]
else:
lockfiles = None
tempdir = d.getVar('T', True)
# or func allows items to be executed outside of the normal
# task set, such as buildhistory
task = d.getVar('BB_RUNTASK', True) or func
if task == func:
taskfunc = task
else:
taskfunc = "%s.%s" % (task, func)
runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
runfile = os.path.join(tempdir, runfn)
bb.utils.mkdirhier(os.path.dirname(runfile))
# Setup the courtesy link to the runfn, only for tasks
# we create the link 'just' before the run script is created
# if we create it after, and if the run script fails, then the
# link won't be created as an exception would be fired.
if task == func:
runlink = os.path.join(tempdir, 'run.{0}'.format(task))
if runlink:
bb.utils.remove(runlink)
try:
os.symlink(runfn, runlink)
except OSError:
pass
with bb.utils.fileslocked(lockfiles):
if ispython:
exec_func_python(func, d, runfile, cwd=adir)
else:
exec_func_shell(func, d, runfile, cwd=adir)
_functionfmt = """
def {function}(d):
{body}
{function}(d)
"""
logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
def exec_func_python(func, d, runfile, cwd=None):
"""Execute a python BB 'function'"""
bbfile = d.getVar('FILE', True)
code = _functionfmt.format(function=func, body=d.getVar(func, True))
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
script.write(code)
if cwd:
try:
olddir = os.getcwd()
except OSError:
olddir = None
os.chdir(cwd)
bb.debug(2, "Executing python function %s" % func)
try:
comp = utils.better_compile(code, func, bbfile)
utils.better_exec(comp, {"d": d}, code, bbfile)
except:
if sys.exc_info()[0] in (bb.parse.SkipPackage, bb.build.FuncFailed):
raise
raise FuncFailed(func, None)
finally:
bb.debug(2, "Python function %s finished" % func)
if cwd and olddir:
try:
os.chdir(olddir)
except OSError:
pass
def shell_trap_code():
return '''#!/bin/sh\n
# Emit a useful diagnostic if something fails:
bb_exit_handler() {
ret=$?
case $ret in
0) ;;
*) case $BASH_VERSION in
"") echo "WARNING: exit code $ret from a shell command.";;
*) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from
\"$BASH_COMMAND\"";;
esac
exit $ret
esac
}
trap 'bb_exit_handler' 0
set -e
'''
def exec_func_shell(func, d, runfile, cwd=None):
"""Execute a shell function from the metadata
Note on directory behavior. The 'dirs' varflag should contain a list
of the directories you need created prior to execution. The last
item in the list is where we will chdir/cd to.
"""
# Don't let the emitted shell script override PWD
d.delVarFlag('PWD', 'export')
with open(runfile, 'w') as script:
script.write(shell_trap_code())
bb.data.emit_func(func, script, d)
if bb.msg.loggerVerboseLogs:
script.write("set -x\n")
if cwd:
script.write("cd '%s'\n" % cwd)
script.write("%s\n" % func)
script.write('''
# cleanup
ret=$?
trap '' 0
exit $?
''')
os.chmod(runfile, 0775)
cmd = runfile
if d.getVarFlag(func, 'fakeroot'):
fakerootcmd = d.getVar('FAKEROOT', True)
if fakerootcmd:
cmd = [fakerootcmd, runfile]
if bb.msg.loggerDefaultVerbose:
logfile = LogTee(logger, sys.stdout)
else:
logfile = sys.stdout
bb.debug(2, "Executing shell function %s" % func)
try:
with open(os.devnull, 'r+') as stdin:
bb.process.run(cmd, shell=False, stdin=stdin, log=logfile)
except bb.process.CmdError:
logfn = d.getVar('BB_LOGFILE', True)
raise FuncFailed(func, logfn)
bb.debug(2, "Shell function %s finished" % func)
def _task_data(fn, task, d):
localdata = bb.data.createCopy(d)
localdata.setVar('BB_FILENAME', fn)
localdata.setVar('BB_CURRENTTASK', task[3:])
localdata.setVar('OVERRIDES', 'task-%s:%s' %
(task[3:].replace('_', '-'), d.getVar('OVERRIDES', False)))
localdata.finalize()
bb.data.expandKeys(localdata)
return localdata
def _exec_task(fn, task, d, quieterr):
"""Execute a BB 'task'
Execution of a task involves a bit more setup than executing a function,
running it with its own local metadata, and with some useful variables set.
"""
if not d.getVarFlag(task, 'task'):
event.fire(TaskInvalid(task, d), d)
logger.error("No such task: %s" % task)
return 1
logger.debug(1, "Executing task %s", task)
localdata = _task_data(fn, task, d)
tempdir = localdata.getVar('T', True)
if not tempdir:
bb.fatal("T variable not set, unable to build")
# Change nice level if we're asked to
nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
if nice:
curnice = os.nice(0)
nice = int(nice) - curnice
newnice = os.nice(nice)
logger.debug(1, "Renice to %s " % newnice)
bb.utils.mkdirhier(tempdir)
# Determine the logfile to generate
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
logbase = logfmt.format(task=task, pid=os.getpid())
# Document the order of the tasks...
logorder = os.path.join(tempdir, 'log.task_order')
try:
with open(logorder, 'a') as logorderfile:
logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
except OSError:
logger.exception("Opening log file '%s'", logorder)
pass
# Setup the courtesy link to the logfn
loglink = os.path.join(tempdir, 'log.{0}'.format(task))
logfn = os.path.join(tempdir, logbase)
if loglink:
bb.utils.remove(loglink)
try:
os.symlink(logbase, loglink)
except OSError:
pass
prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)
class ErrorCheckHandler(logging.Handler):
def __init__(self):
self.triggered = False
logging.Handler.__init__(self, logging.ERROR)
def emit(self, record):
self.triggered = True
# Handle logfiles
si = open('/dev/null', 'r')
try:
bb.utils.mkdirhier(os.path.dirname(logfn))
logfile = open(logfn, 'w')
except OSError:
logger.exception("Opening log file '%s'", logfn)
pass
# Dup the existing fds so we dont lose them
osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
# Replace those fds with our own
os.dup2(si.fileno(), osi[1])
os.dup2(logfile.fileno(), oso[1])
os.dup2(logfile.fileno(), ose[1])
# Ensure python logging goes to the logfile
handler = logging.StreamHandler(logfile)
handler.setFormatter(logformatter)
# Always enable full debug output into task logfiles
handler.setLevel(logging.DEBUG - 2)
bblogger.addHandler(handler)
errchk = ErrorCheckHandler()
bblogger.addHandler(errchk)
localdata.setVar('BB_LOGFILE', logfn)
localdata.setVar('BB_RUNTASK', task)
flags = localdata.getVarFlags(task)
event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
try:
for func in (prefuncs or '').split():
exec_func(func, localdata)
exec_func(task, localdata)
for func in (postfuncs or '').split():
exec_func(func, localdata)
except FuncFailed as exc:
if quieterr:
event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
else:
errprinted = errchk.triggered
logger.error(str(exc))
event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
return 1
finally:
sys.stdout.flush()
sys.stderr.flush()
bblogger.removeHandler(handler)
# Restore the backup fds
os.dup2(osi[0], osi[1])
os.dup2(oso[0], oso[1])
os.dup2(ose[0], ose[1])
# Close the backup fds
os.close(osi[0])
os.close(oso[0])
os.close(ose[0])
si.close()
logfile.close()
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
logger.debug(2, "Zero size logfn %s, removing", logfn)
bb.utils.remove(logfn)
bb.utils.remove(loglink)
event.fire(TaskSucceeded(task, logfn, localdata), localdata)
if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'):
make_stamp(task, localdata)
return 0
def exec_task(fn, task, d, profile = False):
try:
quieterr = False
if d.getVarFlag(task, "quieterrors") is not None:
quieterr = True
if profile:
profname = "profile-%s.log" % (d.getVar("PN", True) + "-" + task)
try:
import cProfile as profile
except:
import profile
prof = profile.Profile()
ret = profile.Profile.runcall(prof, _exec_task, fn, task, d, quieterr)
prof.dump_stats(profname)
bb.utils.process_profilelog(profname)
return ret
else:
return _exec_task(fn, task, d, quieterr)
except Exception:
from traceback import format_exc
if not quieterr:
logger.error("Build of %s failed" % (task))
logger.error(format_exc())
failedevent = TaskFailed(task, None, d, True)
event.fire(failedevent, d)
return 1
def stamp_internal(taskname, d, file_name):
"""
Internal stamp helper function
Makes sure the stamp directory exists
Returns the stamp path+filename
In the bitbake core, d can be a CacheData and file_name will be set.
When called in task context, d will be a data store, file_name will not be set
"""
taskflagname = taskname
if taskname.endswith("_setscene") and taskname != "do_setscene":
taskflagname = taskname.replace("_setscene", "")
if file_name:
stamp = d.stamp_base[file_name].get(taskflagname) or d.stamp[file_name]
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
else:
stamp = d.getVarFlag(taskflagname, 'stamp-base', True) or d.getVar('STAMP', True)
file_name = d.getVar('BB_FILENAME', True)
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
if not stamp:
return
stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
stampdir = os.path.dirname(stamp)
if bb.parse.cached_mtime_noerror(stampdir) == 0:
bb.utils.mkdirhier(stampdir)
return stamp
def stamp_cleanmask_internal(taskname, d, file_name):
"""
Internal stamp helper function to generate stamp cleaning mask
Returns the stamp path+filename
In the bitbake core, d can be a CacheData and file_name will be set.
When called in task context, d will be a data store, file_name will not be set
"""
taskflagname = taskname
if taskname.endswith("_setscene") and taskname != "do_setscene":
taskflagname = taskname.replace("_setscene", "")
if file_name:
stamp = d.stamp_base_clean[file_name].get(taskflagname) or d.stampclean[file_name]
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
else:
stamp = d.getVarFlag(taskflagname, 'stamp-base-clean', True) or d.getVar('STAMPCLEAN', True)
file_name = d.getVar('BB_FILENAME', True)
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
if not stamp:
return []
cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo)
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
def make_stamp(task, d, file_name = None):
"""
Creates/updates a stamp for a given task
(d can be a data dict or dataCache)
"""
cleanmask = stamp_cleanmask_internal(task, d, file_name)
for mask in cleanmask:
for name in glob.glob(mask):
# Preserve sigdata files in the stamps directory
if "sigdata" in name:
continue
# Preserve taint files in the stamps directory
if name.endswith('.taint'):
continue
os.unlink(name)
stamp = stamp_internal(task, d, file_name)
# Remove the file and recreate to force timestamp
# change on broken NFS filesystems
if stamp:
bb.utils.remove(stamp)
open(stamp, "w").close()
# If we're in task context, write out a signature file for each task
# as it completes
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
file_name = d.getVar('BB_FILENAME', True)
bb.parse.siggen.dump_sigtask(file_name, task, d.getVar('STAMP', True), True)
def del_stamp(task, d, file_name = None):
"""
Removes a stamp for a given task
(d can be a data dict or dataCache)
"""
stamp = stamp_internal(task, d, file_name)
bb.utils.remove(stamp)
def write_taint(task, d, file_name = None):
"""
Creates a "taint" file which will force the specified task and its
dependents to be re-run the next time by influencing the value of its
taskhash.
(d can be a data dict or dataCache)
"""
import uuid
if file_name:
taintfn = d.stamp[file_name] + '.' + task + '.taint'
else:
taintfn = d.getVar('STAMP', True) + '.' + task + '.taint'
bb.utils.mkdirhier(os.path.dirname(taintfn))
# The specific content of the taint file is not really important,
# we just need it to be random, so a random UUID is used
with open(taintfn, 'w') as taintf:
taintf.write(str(uuid.uuid4()))
def stampfile(taskname, d, file_name = None):
"""
Return the stamp for a given task
(d can be a data dict or dataCache)
"""
return stamp_internal(taskname, d, file_name)
def add_tasks(tasklist, deltasklist, d):
task_deps = d.getVar('_task_deps')
if not task_deps:
task_deps = {}
if not 'tasks' in task_deps:
task_deps['tasks'] = []
if not 'parents' in task_deps:
task_deps['parents'] = {}
for task in tasklist:
task = d.expand(task)
if task in deltasklist:
continue
d.setVarFlag(task, 'task', 1)
if not task in task_deps['tasks']:
task_deps['tasks'].append(task)
flags = d.getVarFlags(task)
def getTask(name):
if not name in task_deps:
task_deps[name] = {}
if name in flags:
deptask = d.expand(flags[name])
task_deps[name][task] = deptask
getTask('depends')
getTask('rdepends')
getTask('deptask')
getTask('rdeptask')
getTask('recrdeptask')
getTask('recideptask')
getTask('nostamp')
getTask('fakeroot')
getTask('noexec')
getTask('umask')
task_deps['parents'][task] = []
if 'deps' in flags:
for dep in flags['deps']:
dep = d.expand(dep)
task_deps['parents'][task].append(dep)
# don't assume holding a reference
d.setVar('_task_deps', task_deps)
def addtask(task, before, after, d):
if task[:3] != "do_":
task = "do_" + task
d.setVarFlag(task, "task", 1)
bbtasks = d.getVar('__BBTASKS') or []
if not task in bbtasks:
bbtasks.append(task)
d.setVar('__BBTASKS', bbtasks)
existing = d.getVarFlag(task, "deps") or []
if after is not None:
# set up deps for function
for entry in after.split():
if entry not in existing:
existing.append(entry)
d.setVarFlag(task, "deps", existing)
if before is not None:
# set up things that depend on this func
for entry in before.split():
existing = d.getVarFlag(entry, "deps") or []
if task not in existing:
d.setVarFlag(entry, "deps", [task] + existing)
def deltask(task, d):
if task[:3] != "do_":
task = "do_" + task
bbtasks = d.getVar('__BBDELTASKS') or []
if not task in bbtasks:
bbtasks.append(task)
d.setVar('__BBDELTASKS', bbtasks)
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from tornado.web import authenticated, HTTPError
from wtforms import (Form, StringField, SelectField, SelectMultipleField,
TextAreaField, validators)
from qiita_db.study import Study, StudyPerson
from qiita_db.util import get_timeseries_types, get_environmental_packages
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.util import check_access
from qiita_core.util import execute_as_transaction
class StudyEditorForm(Form):
r"""Reduced WTForm for editing the study information
Allows editing any study information that will not require a metadata
changes
Attributes
----------
study_title
study_alias
pubmed_id
study_abstract
study_description
principal_investigator
lab_person
Parameters
----------
study : Study, optional
The study to be modified. If not provided, the Form will not be
prepopulated and can be used for study creation
See Also
--------
StudyEditorExtendedForm
wtforms.Form
"""
study_title = StringField('Study Title', [validators.Required()])
study_alias = StringField('Study Alias', [validators.Required()])
pubmed_id = StringField('PubMed ID')
study_abstract = TextAreaField('Study Abstract', [validators.Required()])
study_description = StringField('Study Description',
[validators.Required()])
# The choices for these "people" fields will be filled from the database
principal_investigator = SelectField('Principal Investigator',
[validators.Required()],
coerce=lambda x: x)
lab_person = SelectField('Lab Person', coerce=lambda x: x)
@execute_as_transaction
def __init__(self, study=None, **kwargs):
super(StudyEditorForm, self).__init__(**kwargs)
# Get people from the study_person table to populate the PI and
# lab_person fields
choices = [(sp.id, u"%s, %s"
% (sp.name.decode('utf-8'),
sp.affiliation.decode('utf-8')))
for sp in StudyPerson.iter()]
choices.insert(0, ('', ''))
self.lab_person.choices = choices
self.principal_investigator.choices = choices
# If a study is provided, put its values in the form
if study:
study_info = study.info
self.study_title.data = study.title.decode('utf-8')
self.study_alias.data = study_info['study_alias'].decode('utf-8')
self.pubmed_id.data = ",".join(study.pmids).decode('utf-8')
self.study_abstract.data = study_info[
'study_abstract'].decode('utf-8')
self.study_description.data = study_info[
'study_description'].decode('utf-8')
self.principal_investigator.data = study_info[
'principal_investigator_id']
self.lab_person.data = study_info['lab_person_id']
class StudyEditorExtendedForm(StudyEditorForm):
r"""Extended WTForm for editing the study information
Allows editing all the study information
Attributes
----------
environmental_packages
timeseries
Parameters
----------
study : Study, optional
The study to be modified. If not provided, the Form will not be
prepopulated and can be used for study creation
See Also
--------
StudyEditorForm
wtforms.Form
"""
environmental_packages = SelectMultipleField('Environmental Packages',
[validators.Required()])
timeseries = SelectField('Event-Based Data', coerce=lambda x: x)
@execute_as_transaction
def __init__(self, study=None, **kwargs):
super(StudyEditorExtendedForm, self).__init__(study=study, **kwargs)
# Populate the choices for the environmental packages
# Get environmental packages returns a list of tuples of the form
# (env package name, table name), but we need a list of
# (table name, env package name) so the actual environmental package
# name is displayed on the GUI
self.environmental_packages.choices = [
(name, name) for name, table in get_environmental_packages()]
# Get the available timeseries types to populate the timeseries field
choices = [[time_id, '%s, %s' % (int_t, time_t)]
for time_id, time_t, int_t in get_timeseries_types()]
# Change None, None to 'No timeseries', just for GUI purposes
choices[0][1] = 'No timeseries'
self.timeseries.choices = choices
# If a study is provided, put its values in the form
if study:
study_info = study.info
self.environmental_packages.data = study.environmental_packages
self.timeseries.data = study_info['timeseries_type_id']
class StudyEditHandler(BaseHandler):
@execute_as_transaction
def _check_study_exists_and_user_access(self, study_id):
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
# Study not in database so fail nicely
raise HTTPError(404, "Study %s does not exist" % study_id)
# We need to check if the user has access to the study
check_access(self.current_user, study)
return study
def _get_study_person_id(self, index, new_people_info):
"""Returns the id of the study person, creating if needed
If index < 0, means that we need to create a new study person, and its
information is stored in new_people_info[index]
Parameters
----------
index : int
The index of the study person
new_people_info : list of tuples
The information of the new study persons added through the
interface
Returns
-------
int
the study person id
"""
# If the ID is less than 0, then this is a new person
if index < 0:
return StudyPerson.create(*new_people_info[index]).id
return index
@authenticated
@execute_as_transaction
def get(self, study_id=None):
study = None
form_factory = StudyEditorExtendedForm
if study_id:
# Check study and user access
study = self._check_study_exists_and_user_access(study_id)
# If the study is not sandboxed, we use the short
# version of the form
if study.status != 'sandbox':
form_factory = StudyEditorForm
creation_form = form_factory(study=study)
self.render('edit_study.html',
creation_form=creation_form, study=study)
@authenticated
@execute_as_transaction
def post(self, study=None):
the_study = None
form_factory = StudyEditorExtendedForm
if study:
# Check study and user access
the_study = self._check_study_exists_and_user_access(study)
# If the study is not sandbox, we use the short version
if the_study.status != 'sandbox':
form_factory = StudyEditorForm
# Get the form data from the request arguments
form_data = form_factory()
form_data.process(data=self.request.arguments)
# Get information about new people that need to be added to the DB
# Phones and addresses are optional, so make sure that we have None
# values instead of empty strings
new_people_info = [
(name, email, affiliation, phone or None, address or None)
for name, email, affiliation, phone, address in
zip(self.get_arguments('new_people_names'),
self.get_arguments('new_people_emails'),
self.get_arguments('new_people_affiliations'),
self.get_arguments('new_people_phones'),
self.get_arguments('new_people_addresses'))]
# New people will be indexed with negative numbers, so we reverse
# the list here
new_people_info.reverse()
index = int(form_data.data['principal_investigator'][0])
PI = self._get_study_person_id(index, new_people_info)
if form_data.data['lab_person'][0]:
index = int(form_data.data['lab_person'][0])
lab_person = self._get_study_person_id(index, new_people_info)
else:
lab_person = None
# TODO: MIXS compliant? Always true, right?
info = {
'lab_person_id': lab_person,
'principal_investigator_id': PI,
'metadata_complete': False,
'mixs_compliant': True,
'study_description': form_data.data['study_description'][0],
'study_alias': form_data.data['study_alias'][0],
'study_abstract': form_data.data['study_abstract'][0]}
if 'timeseries' in form_data.data and form_data.data['timeseries']:
info['timeseries_type_id'] = form_data.data['timeseries'][0]
study_title = form_data.data['study_title'][0]
if the_study:
# We are under editing, so just update the values
the_study.title = study_title
the_study.info = info
msg = ('Study <a href="/study/description/%d">%s</a> '
'successfully updated' %
(the_study.id, form_data.data['study_title'][0]))
else:
# create the study
# TODO: Fix this EFO once ontology stuff from emily is added
the_study = Study.create(self.current_user, study_title,
efo=[1], info=info)
msg = ('Study <a href="/study/description/%d">%s</a> '
'successfully created' %
(the_study.id, form_data.data['study_title'][0]))
# Add the environmental packages, this attribute can only be edited
# if the study is not public, otherwise this cannot be changed
if isinstance(form_data, StudyEditorExtendedForm):
the_study.environmental_packages = form_data.data[
'environmental_packages']
pubmed_ids = form_data.data['pubmed_id'][0]
if pubmed_ids:
# The user can provide a comma-seprated list
pmids = pubmed_ids.split(',')
# Make sure that we strip the spaces from the pubmed ids
the_study.pmids = [pmid.strip() for pmid in pmids]
self.render('index.html', message=msg, level='success')
class CreateStudyAJAX(BaseHandler):
@authenticated
def get(self):
study_title = self.get_argument('study_title', None)
old_study_title = self.get_argument('old_study_title', None)
if study_title is None:
to_write = False
elif study_title == old_study_title:
to_write = True
else:
to_write = False if Study.exists(study_title) else True
self.write(str(to_write))
|
|
from __future__ import unicode_literals
from django.db import models
from datetime import timedelta, date
from django.core.validators import URLValidator
# Create your models here.
COUNTRIES = (
('AT', 'Austria'),
('BE', 'Belgium'),
('BG', 'Bulgaria'),
('HR', 'Croatia'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('EE', 'Estonia'),
('FI', 'Finland'),
('FR', 'France'),
('DE', 'Germany'),
('GR', 'Greece'),
('GE', 'Georgia'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IE', 'Ireland'),
('IT', 'Italy'),
('KZ', 'Kazakhstan'),
('XK', 'Kosovo'),
('LV', 'Latvia'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MK', 'Macedonia'),
('MT', 'Malta'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('ME', 'Montenegro'),
('NL', 'Netherlands'),
('NO', 'Norway'),
('PL', 'Poland'),
('PT', 'Portugal'),
('RO', 'Romania'),
('RU', 'Russia'),
('CS', 'Serbia'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('ES', 'Spain'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('TR', 'Turkey'),
('UA', 'Ukraine'),
('UK', 'UK'),
('UA', 'Ukraine'),
('PL', 'Poland'),
)
class Artist(models.Model):
name = models.CharField(max_length=70, blank=True, null=True)
country = models.CharField(
max_length=70, blank=True, null=True, choices=COUNTRIES)
def num_videos(self):
return Video.objects.filter(artist=self.id).count()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class Video(models.Model):
YEAR_CHOICES = [(r,r) for r in range(2000, date.today().year + 1)]
YEAR_CHOICES.append((None, '-'))
VIDEO_TYPES = (
(1, 'Music video'),
(2, 'Interview'),
(3, 'Documentary'),
)
title = models.CharField(max_length=70, blank=True, null=True)
artist = models.ManyToManyField(Artist, blank=True)
featured = models.ManyToManyField(Artist,
related_name="featured", blank=True)
video_file = models.FileField(upload_to='videos/', null=True, default=None)
length = models.DurationField(default=timedelta(minutes=0))
year = models.IntegerField(choices=YEAR_CHOICES, default=YEAR_CHOICES[-1][0],
null=True, blank=True)
active = models.BooleanField(default=True)
plays = models.IntegerField(default=0)
type = models.IntegerField(choices=VIDEO_TYPES, default=VIDEO_TYPES[0][0])
__original_video_file = None
def __init__(self, *args, **kwargs):
super(Video, self).__init__(*args, **kwargs)
self.__original_video_file = self.video_file
def get_verbose_country(self):
return dict(COUNTRIES)[self.get_country()]
def get_length(self):
return round(float(self.length.seconds) +
float(self.length.microseconds)/1000000, 2)
def get_byline(self):
byline = ''
artists = list(self.artist.all())
names = [a.name for a in artists]
if len(artists) == 1:
byline = names[0]
elif len(artists) > 1:
byline = ', '.join(names[:-1])
byline += ' & ' + names[-1]
return byline
def get_featured_byline(self):
byline = ''
featured = list(self.featured.all())
names = [a.name for a in featured]
if len(featured) > 0:
byline += 'ft. '
if len(featured) == 1:
byline += names[0]
elif len(featured) > 1:
byline += ', '.join(names[:-1])
byline += ' & ' + names[-1]
return byline
def get_country(self):
'''
Return the country for the video (the country of its first artist)
'''
artists = self.artist.all()
try:
return artists[0].country
except IndexError:
return None
class Meta:
ordering = ['title']
def __unicode__(self):
return self.title
class Transition(models.Model):
title = models.CharField(max_length=70, blank=True, null=True)
length = models.DurationField(default=timedelta(minutes=0))
active = models.BooleanField(default=True)
plays = models.IntegerField(default=0)
frequency = models.IntegerField(default=0)
video_file = models.FileField(upload_to='transitions/', null=True, default=None)
__original_video_file = None
def get_length(self):
return round(float(self.length.seconds) +
float(self.length.microseconds)/1000000, 2)
def __unicode__(self):
return str(self.id)
class Playlist(models.Model):
title = models.CharField(max_length=70, blank=True, null=True)
active = models.BooleanField(default=True)
auto = models.BooleanField(default=False)
play_order = models.IntegerField(blank=False, null=False, default=0)
video = models.ManyToManyField(Video, through='PlaylistVideo')
def get_length(self):
playlist_videos = PlaylistVideo.objects.filter(playlist=self.id,
active=1).prefetch_related('video')
total_length = timedelta(seconds=0)
for pl_vi in playlist_videos:
total_length += pl_vi.video.length
return total_length
def get_num_videos(self):
return PlaylistVideo.objects.filter(playlist=self.id).count()
def get_num_active_videos(self):
return PlaylistVideo.objects.filter(playlist=self.id,
active=True).count()
def __unicode__(self):
return '(%s) %s [%s]' % (self.id, self.title, self.play_order)
class PlaylistVideo(models.Model):
playlist = models.ForeignKey(Playlist)
video = models.ForeignKey(Video, limit_choices_to={'active': True},)
play_order = models.IntegerField(blank=False, null=False, default=0)
active = models.BooleanField(default=True)
def get_length(self):
return self.video.get_length()
def save(self, *args, **kwargs):
'''
Increment play order based on last value
'''
if self.play_order == 0:
self.play_order = PlaylistVideo.objects.filter(
playlist=self.playlist).count() + 1
super(PlaylistVideo, self).save()
class Meta:
ordering = ['video__title']
def __unicode__(self):
return str(self.video.id)
# Proxy models
class VideoArtistProxy(Video.artist.through):
class Meta:
proxy = True
def __unicode__(self):
return str(self.video)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.translation.v3beta1 TranslationService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import google.api_core.protobuf_helpers
import grpc
from google.cloud.translate_v3beta1.gapic import translation_service_client_config
from google.cloud.translate_v3beta1.gapic.transports import (
translation_service_grpc_transport,
)
from google.cloud.translate_v3beta1.proto import translation_service_pb2
from google.cloud.translate_v3beta1.proto import translation_service_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-translate"
).version
class TranslationServiceClient(object):
"""Provides natural language translation operations."""
SERVICE_ADDRESS = "translate.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.translation.v3beta1.TranslationService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranslationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def glossary_path(cls, project, location, glossary):
"""Return a fully-qualified glossary string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/glossaries/{glossary}",
project=project,
location=location,
glossary=glossary,
)
@classmethod
def location_path(cls, project, location):
"""Return a fully-qualified location string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project,
location=location,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.TranslationServiceGrpcTransport,
Callable[[~.Credentials, type], ~.TranslationServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = translation_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=translation_service_grpc_transport.TranslationServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = translation_service_grpc_transport.TranslationServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def translate_text(
self,
contents,
target_language_code,
parent,
mime_type=None,
source_language_code=None,
model=None,
glossary_config=None,
labels=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Translates input text and returns translated text.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> # TODO: Initialize `contents`:
>>> contents = []
>>>
>>> # TODO: Initialize `target_language_code`:
>>> target_language_code = ''
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> response = client.translate_text(contents, target_language_code, parent)
Args:
contents (list[str]): Required. The content of the input in string format.
We recommend the total content be less than 30k codepoints.
Use BatchTranslateText for larger text.
target_language_code (str): Required. The BCP-47 language code to use for translation of the input
text, set to one of the language codes listed in Language Support.
parent (str): Required. Project or location to make a call. Must refer to a caller's
project.
Format: ``projects/{project-id}`` or
``projects/{project-id}/locations/{location-id}``.
For global calls, use ``projects/{project-id}/locations/global`` or
``projects/{project-id}``.
Non-global location is required for requests using AutoML models or
custom glossaries.
Models and glossaries must be within the same region (have same
location-id), otherwise an INVALID\_ARGUMENT (400) error is returned.
mime_type (str): Optional. The format of the source text, for example, "text/html",
"text/plain". If left blank, the MIME type defaults to "text/html".
source_language_code (str): Optional. The BCP-47 language code of the input text if
known, for example, "en-US" or "sr-Latn". Supported language codes are
listed in Language Support. If the source language isn't specified, the API
attempts to identify the source language automatically and returns the
source language within the response.
model (str): Optional. The ``model`` type requested for this translation.
The format depends on model type:
- AutoML Translation models:
``projects/{project-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-id}/locations/{location-id}/models/general/base``
For global (non-regionalized) requests, use ``location-id`` ``global``.
For example,
``projects/{project-id}/locations/global/models/general/nmt``.
If missing, the system decides which google base model to use.
glossary_config (Union[dict, ~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig]): Optional. Glossary to be applied. The glossary must be within the same
region (have the same location-id) as the model, otherwise an
INVALID\_ARGUMENT (400) error is returned.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig`
labels (dict[str -> str]): Optional. The labels with user-defined metadata for the request.
Label keys and values can be no longer than 63 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter.
See https://cloud.google.com/translate/docs/labels for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.TranslateTextResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "translate_text" not in self._inner_api_calls:
self._inner_api_calls[
"translate_text"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.translate_text,
default_retry=self._method_configs["TranslateText"].retry,
default_timeout=self._method_configs["TranslateText"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.TranslateTextRequest(
contents=contents,
target_language_code=target_language_code,
parent=parent,
mime_type=mime_type,
source_language_code=source_language_code,
model=model,
glossary_config=glossary_config,
labels=labels,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["translate_text"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def detect_language(
self,
parent,
model=None,
content=None,
mime_type=None,
labels=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Detects the language of text within a request.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> response = client.detect_language(parent)
Args:
parent (str): Required. Project or location to make a call. Must refer to a caller's
project.
Format: ``projects/{project-id}/locations/{location-id}`` or
``projects/{project-id}``.
For global calls, use ``projects/{project-id}/locations/global`` or
``projects/{project-id}``.
Only models within the same region (has same location-id) can be used.
Otherwise an INVALID\_ARGUMENT (400) error is returned.
model (str): Optional. The language detection model to be used.
Format:
``projects/{project-id}/locations/{location-id}/models/language-detection/{model-id}``
Only one language detection model is currently supported:
``projects/{project-id}/locations/{location-id}/models/language-detection/default``.
If not specified, the default model is used.
content (str): The content of the input stored as a string.
mime_type (str): Optional. The format of the source text, for example, "text/html",
"text/plain". If left blank, the MIME type defaults to "text/html".
labels (dict[str -> str]): Optional. The labels with user-defined metadata for the request.
Label keys and values can be no longer than 63 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter.
See https://cloud.google.com/translate/docs/labels for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.DetectLanguageResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "detect_language" not in self._inner_api_calls:
self._inner_api_calls[
"detect_language"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.detect_language,
default_retry=self._method_configs["DetectLanguage"].retry,
default_timeout=self._method_configs["DetectLanguage"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(content=content)
request = translation_service_pb2.DetectLanguageRequest(
parent=parent,
model=model,
content=content,
mime_type=mime_type,
labels=labels,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["detect_language"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_supported_languages(
self,
parent,
display_language_code=None,
model=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of supported languages for translation.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> response = client.get_supported_languages(parent)
Args:
parent (str): Required. Project or location to make a call. Must refer to a caller's
project.
Format: ``projects/{project-id}`` or
``projects/{project-id}/locations/{location-id}``.
For global calls, use ``projects/{project-id}/locations/global`` or
``projects/{project-id}``.
Non-global location is required for AutoML models.
Only models within the same region (have same location-id) can be used,
otherwise an INVALID\_ARGUMENT (400) error is returned.
display_language_code (str): Optional. The language to use to return localized, human readable names
of supported languages. If missing, then display names are not returned
in a response.
model (str): Optional. Get supported languages of this model.
The format depends on model type:
- AutoML Translation models:
``projects/{project-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-id}/locations/{location-id}/models/general/base``
Returns languages supported by the specified model. If missing, we get
supported languages of Google general base (PBMT) model.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.SupportedLanguages` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_supported_languages" not in self._inner_api_calls:
self._inner_api_calls[
"get_supported_languages"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_supported_languages,
default_retry=self._method_configs["GetSupportedLanguages"].retry,
default_timeout=self._method_configs["GetSupportedLanguages"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.GetSupportedLanguagesRequest(
parent=parent, display_language_code=display_language_code, model=model
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_supported_languages"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def batch_translate_text(
self,
parent,
source_language_code,
target_language_codes,
input_configs,
output_config,
models=None,
glossaries=None,
labels=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Translates a large volume of text in asynchronous batch mode.
This function provides real-time output as the inputs are being processed.
If caller cancels a request, the partial results (for an input file, it's
all or nothing) may still be available on the specified output location.
This call returns immediately and you can
use google.longrunning.Operation.name to poll the status of the call.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `source_language_code`:
>>> source_language_code = ''
>>>
>>> # TODO: Initialize `target_language_codes`:
>>> target_language_codes = []
>>>
>>> # TODO: Initialize `input_configs`:
>>> input_configs = []
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.batch_translate_text(parent, source_language_code, target_language_codes, input_configs, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Location to make a call. Must refer to a caller's project.
Format: ``projects/{project-id}/locations/{location-id}``.
The ``global`` location is not supported for batch translation.
Only AutoML Translation models or glossaries within the same region
(have the same location-id) can be used, otherwise an INVALID\_ARGUMENT
(400) error is returned.
source_language_code (str): Required. Source language code.
target_language_codes (list[str]): Required. Specify up to 10 language codes here.
input_configs (list[Union[dict, ~google.cloud.translate_v3beta1.types.InputConfig]]): Required. Input configurations.
The total number of files matched should be <= 1000.
The total content size should be <= 100M Unicode codepoints.
The files must use UTF-8 encoding.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.InputConfig`
output_config (Union[dict, ~google.cloud.translate_v3beta1.types.OutputConfig]): Required. Output configuration.
If 2 input configs match to the same file (that is, same input path),
we don't generate output for duplicate inputs.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.OutputConfig`
models (dict[str -> str]): Optional. The models to use for translation. Map's key is target
language code. Map's value is model name. Value can be a built-in
general model, or an AutoML Translation model.
The value format depends on model type:
- AutoML Translation models:
``projects/{project-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-id}/locations/{location-id}/models/general/base``
If the map is empty or a specific model is not requested for a language
pair, then default google model (nmt) is used.
glossaries (dict[str -> Union[dict, ~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig]]): Optional. Glossaries to be applied for translation.
It's keyed by target language code.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig`
labels (dict[str -> str]): Optional. The labels with user-defined metadata for the request.
Label keys and values can be no longer than 63 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter.
See https://cloud.google.com/translate/docs/labels for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_translate_text" not in self._inner_api_calls:
self._inner_api_calls[
"batch_translate_text"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_translate_text,
default_retry=self._method_configs["BatchTranslateText"].retry,
default_timeout=self._method_configs["BatchTranslateText"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.BatchTranslateTextRequest(
parent=parent,
source_language_code=source_language_code,
target_language_codes=target_language_codes,
input_configs=input_configs,
output_config=output_config,
models=models,
glossaries=glossaries,
labels=labels,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_translate_text"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
translation_service_pb2.BatchTranslateResponse,
metadata_type=translation_service_pb2.BatchTranslateMetadata,
)
def create_glossary(
self,
parent,
glossary,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a glossary and returns the long-running operation. Returns
NOT\_FOUND, if the project doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `glossary`:
>>> glossary = {}
>>>
>>> response = client.create_glossary(parent, glossary)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project name.
glossary (Union[dict, ~google.cloud.translate_v3beta1.types.Glossary]): Required. The glossary to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.Glossary`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_glossary" not in self._inner_api_calls:
self._inner_api_calls[
"create_glossary"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_glossary,
default_retry=self._method_configs["CreateGlossary"].retry,
default_timeout=self._method_configs["CreateGlossary"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.CreateGlossaryRequest(
parent=parent, glossary=glossary
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["create_glossary"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
translation_service_pb2.Glossary,
metadata_type=translation_service_pb2.CreateGlossaryMetadata,
)
def list_glossaries(
self,
parent,
page_size=None,
filter_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists glossaries in a project. Returns NOT\_FOUND, if the project
doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_glossaries(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_glossaries(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The name of the project from which to list all of the glossaries.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): Optional. Filter specifying constraints of a list operation.
Filtering is not supported yet, and the parameter currently has no effect.
If missing, no filtering is performed.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.translate_v3beta1.types.Glossary` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_glossaries" not in self._inner_api_calls:
self._inner_api_calls[
"list_glossaries"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_glossaries,
default_retry=self._method_configs["ListGlossaries"].retry,
default_timeout=self._method_configs["ListGlossaries"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.ListGlossariesRequest(
parent=parent, page_size=page_size, filter=filter_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_glossaries"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="glossaries",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_glossary(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a glossary. Returns NOT\_FOUND, if the glossary doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]')
>>>
>>> response = client.get_glossary(name)
Args:
name (str): Required. The name of the glossary to retrieve.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.Glossary` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_glossary" not in self._inner_api_calls:
self._inner_api_calls[
"get_glossary"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_glossary,
default_retry=self._method_configs["GetGlossary"].retry,
default_timeout=self._method_configs["GetGlossary"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.GetGlossaryRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_glossary"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_glossary(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a glossary, or cancels glossary construction if the glossary
isn't created yet. Returns NOT\_FOUND, if the glossary doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]')
>>>
>>> response = client.delete_glossary(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. The name of the glossary to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_glossary" not in self._inner_api_calls:
self._inner_api_calls[
"delete_glossary"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_glossary,
default_retry=self._method_configs["DeleteGlossary"].retry,
default_timeout=self._method_configs["DeleteGlossary"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.DeleteGlossaryRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["delete_glossary"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
translation_service_pb2.DeleteGlossaryResponse,
metadata_type=translation_service_pb2.DeleteGlossaryMetadata,
)
|
|
#!/usr/bin/env python
import sweeny_c as syc
import numpy as np
class Sweeny(object):
"""
Provides 4 different implementations of Sweeny's algorithm for the RCM
on a two-dimensional square lattice with periodic boundary conditions.
"""
def init_sim(self,q,l,beta,coupl,cutoff,tslength,rngseed,impl='ibfs'):
"""
Resets/Prepares to/for simulation with specified parameters.
Parameters
----------
q: float
Cluster-weight, (>0).
l: int
Number of vertices per dimension, (>2)
beta: float
Inverse temperature, (>0)
coupl: float
Coupling constant, (>0)/Ferromagnetic
cutoff: int
Number of sweeps for equilibration, (>0)
tslength: int
Number of sweeps of actual simulation. This also corresponds
to the number of observable samples, (>0)
rngseed: int
Seed for the pseudo-random-number generator, (>0)
impl: str
Type of implementation. 'ibfs', 'sbfs', 'uf' and 'dc'
correspond to Interleaved/Sequential BFS, Union-Find and
Dynamic Connectivity,respectively
Raises
------
ValueError if the requirements for the simulation parameters are not fullfilled.
KeyError if the impl identifier is not choosen among the 4 possibilities.
"""
if self.__sim_i:
syc.sy_destroy()
if self.__arr_av and tslength != self.tslength:
del self.__ts_u32, self.__ts_u64
self.__arr_av = False
self.q,self.l,self.beta,self.coupl,self.cutoff,\
self.tslength,self.rngseed = float(q),int(l),float(beta),\
float(coupl),int(cutoff),int(tslength),int(rngseed)
try:
assert self.q >0
assert self.l > 2
assert self.beta > 0
assert self.coupl > 0
assert self.cutoff > 0
assert self.tslength > 0
except:
raise ValueError
self.supp_impl = ('ibfs','sbfs','dc','uf')
self.impl = impl.lower()
self.impl_idx = self.supp_impl.index(self.impl)
assert(self.impl in self.supp_impl)
if not self.__arr_av:
# num_bonds, num_cluster, size_giant
self.__ts_u32 = np.empty((3,tslength),dtype=np.uint32)
# sec_cs_moment and four_cs_moment
self.__ts_u64 = np.empty((2,tslength),dtype=np.uint64)
self.__arr_av = True
assert syc.sy_setup(self.impl_idx,self.q,self.l,self.beta,self.coupl,
self.cutoff,self.rngseed,self.__ts_u32[0],self.__ts_u32[1],self.__ts_u32[2]
,self.__ts_u64[0],self.__ts_u64[1])
self.__sim_i=True
def __init__(self,q,l,beta,coupl,cutoff,tslength,rngseed,impl='ibfs'):
"""
Prepares for simulation with specified parameters.
Parameters
----------
q: float
Cluster-weight, (>0).
l: int
Number of vertices per dimension, (>2)
beta: float
Inverse temperature, (>0)
coupl: float
Coupling constant, (>0)/Ferromagnetic
cutoff: int
Number of sweeps for equilibration, (>0)
tslength: int
Number of sweeps of actual simulation. This also corresponds
to the number of observable samples, (>0)
rngseed: int
Seed for the pseudo-random-number generator, (>0)
impl: str
Type of implementation. 'ibfs', 'sbfs', 'uf' and 'dc'
correspond to Interleaved/Sequential BFS, Union-Find and
Dynamic Connectivity,respectively
Raises
------
ValueError if the requirements for the simulation parameters are not fullfilled.
KeyError if the impl identifier is not choosen among the 4 possibilities.
"""
self.__ts_av = False
self.__sim_i = False
self.__arr_av = False
self.init_sim(q,l,beta,coupl,cutoff,tslength,rngseed,impl)
def simulate(self):
"""
Start the prepared/configured simulation. Note if run multiple times
the previous state is used as the "initial" configuration of the
simulation (markov process).
"""
if self.__sim_i:
if syc.sy_simulate():
self.__ts_av = True
@property
def ts_num_bonds(self):
"""
Returns
-------
ts_num_bonds: ndarray
Time-series of number of active bonds. If simulation has not been initialized yet then None.
"""
return self.__ts_u32[0] if self.__arr_av else None
@property
def ts_num_cluster(self):
"""
Returns
-------
ts_num_cluster: ndarray
Time-series of number of clusters/components. If simulation has not been initialized yet then None.
"""
return self.__ts_u32[1] if self.__arr_av else None
@property
def ts_size_giant(self):
"""
Returns
-------
ts_size_giant: ndarray
Time-series of the size of largest component. If simulation has not been initialized yet then None.
"""
return self.__ts_u32[2] if self.__arr_av else None
@property
def ts_sec_cs_moment(self):
"""
Returns
-------
ts_sec_cs_moment: ndarray
Time-series of the second moment of the cluster size distribution. If simulation has not been initialized yet then None.
"""
return self.__ts_u64[0] if self.__arr_av else None
@property
def ts_four_cs_moment(self):
"""
Returns
-------
ts_four_cs_moment: ndarray
Time-series of the fourth moment of the cluster size distribution. If simulation has not been initialized yet then None.
"""
return self.__ts_u64[1] if self.__arr_av else None
def __destroy__(self):
if self.__arr_av:
del self.__ts_u32,self.__ts_u64
if self.__sim_i:
syc.sy_destroy()
def __write_attributes(self,dset):
ds.attrs['q'] = self.q
ds.attrs['l'] = self.l
ds.attrs['beta'] = self.beta
ds.attrs['coupl'] = self.coupl
ds.attrs['cutoff'] = self.cutoff
ds.attrs['tslength'] =self.tslength
ds.attrs['rngseed'] = self.rngseed
def export_to_hdf5(self,file_name):
if self.__arr_av:
f = h5py.File(file_name,"w")
dset = corr_av_h5py['/'].create_dataset("num_bonds",
self.tslength,dtype=self.__ts_u32.dtype,
compression='gzip')
self.__write_attributes(dset)
dset[...] = self.__ts_u32[0]
dset = corr_av_h5py['/'].create_dataset("num_cluster",
self.tslength,dtype=self.__ts_u32.dtype,
compression='gzip')
dset[...] = self.__ts_u32[1]
dset = corr_av_h5py['/'].create_dataset("size_giant",
self.tslength,dtype=self.__ts_u32.dtype,
compression='gzip')
dset[...] = self.__ts_u32[2]
dset = corr_av_h5py['/'].create_dataset("sec_cs_moment",
self.tslength,dtype=self.__ts_u64.dtype,
compression='gzip')
dset[...] = self.__ts_u64[0]
dset = corr_av_h5py['/'].create_dataset("four_cs_moment",
self.tslength,dtype=self.__ts_u64.dtype,
compression='gzip')
dset[...] = self.__ts_u64[1]
f.close()
|
|
# -*- coding: utf-8 -*-
"""Project version handling."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import unicodedata
from builtins import object, range
from collections import defaultdict
import six
from packaging.version import InvalidVersion, Version
from readthedocs.builds.constants import (
LATEST_VERBOSE_NAME, STABLE_VERBOSE_NAME, TAG)
def get_major(version):
"""
Return the major version.
:param version: version to get the major
:type version: packaging.version.Version
"""
# pylint: disable=protected-access
return version._version.release[0]
def get_minor(version):
"""
Return the minor version.
:param version: version to get the minor
:type version: packaging.version.Version
"""
# pylint: disable=protected-access
try:
return version._version.release[1]
except IndexError:
return 0
class VersionManager(object):
"""Prune list of versions based on version windows."""
def __init__(self):
self._state = defaultdict(lambda: defaultdict(list))
def add(self, version):
self._state[get_major(version)][get_minor(version)].append(version)
def prune_major(self, num_latest):
all_keys = sorted(set(self._state.keys()))
major_keep = []
for __ in range(num_latest):
if all_keys:
major_keep.append(all_keys.pop(-1))
for to_remove in all_keys:
del self._state[to_remove]
def prune_minor(self, num_latest):
for major, minors in list(self._state.items()):
all_keys = sorted(set(minors.keys()))
minor_keep = []
for __ in range(num_latest):
if all_keys:
minor_keep.append(all_keys.pop(-1))
for to_remove in all_keys:
del self._state[major][to_remove]
def prune_point(self, num_latest):
for major, minors in list(self._state.items()):
for minor in list(minors.keys()):
try:
self._state[major][minor] = sorted(
set(self._state[major][minor]))[-num_latest:]
except TypeError:
# Raise these for now.
raise
def get_version_list(self):
versions = []
for major_val in list(self._state.values()):
for version_list in list(major_val.values()):
versions.extend(version_list)
versions = sorted(versions)
return [
version.public for version in versions if not version.is_prerelease
]
def version_windows(versions, major=1, minor=1, point=1):
"""
Return list of versions that have been pruned to version windows.
Uses :py:class:`VersionManager` to prune the list of versions
:param versions: List of version strings
:param major: Major version window
:param minor: Minor version window
:param point: Point version window
"""
# TODO: This needs some documentation on how VersionManager etc works and
# some examples what the expected outcome is.
version_identifiers = []
for version_string in versions:
try:
version_identifiers.append(Version(version_string))
except (InvalidVersion, UnicodeEncodeError):
pass
major_version_window = major
minor_version_window = minor
point_version_window = point
manager = VersionManager()
for v in version_identifiers:
manager.add(v)
manager.prune_major(major_version_window)
manager.prune_minor(minor_version_window)
manager.prune_point(point_version_window)
return manager.get_version_list()
def parse_version_failsafe(version_string):
"""
Parse a version in string form and return Version object.
If there is an error parsing the string, ``None`` is returned.
:param version_string: version as string object (e.g. '3.10.1')
:type version_string: str or unicode
:returns: version object created from a string object
:rtype: packaging.version.Version
"""
if not isinstance(version_string, six.text_type):
uni_version = version_string.decode('utf-8')
else:
uni_version = version_string
try:
normalized_version = unicodedata.normalize('NFKD', uni_version)
ascii_version = normalized_version.encode('ascii', 'ignore')
final_form = ascii_version.decode('ascii')
return Version(final_form)
except (UnicodeError, InvalidVersion):
return None
def comparable_version(version_string):
"""
Can be used as ``key`` argument to ``sorted``.
The ``LATEST`` version shall always beat other versions in comparison.
``STABLE`` should be listed second. If we cannot figure out the version
number then we sort it to the bottom of the list.
:param version_string: version as string object (e.g. '3.10.1' or 'latest')
:type version_string: str or unicode
:returns: a comparable version object (e.g. 'latest' -> Version('99999.0'))
:rtype: packaging.version.Version
"""
comparable = parse_version_failsafe(version_string)
if not comparable:
if version_string == LATEST_VERBOSE_NAME:
comparable = Version('99999.0')
elif version_string == STABLE_VERBOSE_NAME:
comparable = Version('9999.0')
else:
comparable = Version('0.01')
return comparable
def sort_versions(version_list):
"""
Take a list of Version models and return a sorted list.
:param version_list: list of Version models
:type version_list: list(readthedocs.builds.models.Version)
:returns: sorted list in descending order (latest version first) of versions
:rtype: list(tupe(readthedocs.builds.models.Version,
packaging.version.Version))
"""
versions = []
for version_obj in version_list:
version_slug = version_obj.verbose_name
comparable_version = parse_version_failsafe(version_slug)
if comparable_version:
versions.append((version_obj, comparable_version))
return list(
sorted(
versions,
key=lambda version_info: version_info[1],
reverse=True,
))
def highest_version(version_list):
"""
Return the highest version for a given ``version_list``.
:rtype: tupe(readthedocs.builds.models.Version, packaging.version.Version)
"""
versions = sort_versions(version_list)
if versions:
return versions[0]
return (None, None)
def determine_stable_version(version_list):
"""
Determine a stable version for version list.
:param version_list: list of versions
:type version_list: list(readthedocs.builds.models.Version)
:returns: version considered the most recent stable one or ``None`` if there
is no stable version in the list
:rtype: readthedocs.builds.models.Version
"""
versions = sort_versions(version_list)
versions = [(version_obj, comparable)
for version_obj, comparable in versions
if not comparable.is_prerelease]
if versions:
# We take preference for tags over branches. If we don't find any tag,
# we just return the first branch found.
for version_obj, comparable in versions:
if version_obj.type == TAG:
return version_obj
version_obj, comparable = versions[0]
return version_obj
return None
|
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + \
RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['zhiliao.zhi12.net'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='zhiliao <[email protected]>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[zhiliao] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
NEW_RELIC_LICENSE_KEY = env('NEW_RELIC_LICENSE_KEY')
NEW_RELIC_APP_NAME = 'zhiliao'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
|
|
"""
Support for RFXtrx binary sensors.
Lighting4 devices (sensors based on PT2262 encoder) are supported and
tested. Other types may need some work.
"""
import logging
import voluptuous as vol
from homeassistant.components import rfxtrx
from homeassistant.util import slugify
from homeassistant.util import dt as dt_util
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import event as evt
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.rfxtrx import (
ATTR_AUTOMATIC_ADD, ATTR_NAME, ATTR_OFF_DELAY, ATTR_FIREEVENT,
ATTR_DATA_BITS, CONF_DEVICES
)
from homeassistant.const import (
CONF_DEVICE_CLASS, CONF_COMMAND_ON, CONF_COMMAND_OFF
)
DEPENDENCIES = ["rfxtrx"]
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = vol.Schema({
vol.Required("platform"): rfxtrx.DOMAIN,
vol.Optional(CONF_DEVICES, default={}): vol.All(
dict, rfxtrx.valid_binary_sensor),
vol.Optional(ATTR_AUTOMATIC_ADD, default=False): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Binary Sensor platform to rfxtrx."""
import RFXtrx as rfxtrxmod
sensors = []
for packet_id, entity in config['devices'].items():
event = rfxtrx.get_rfx_object(packet_id)
device_id = slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
continue
if entity[ATTR_DATA_BITS] is not None:
_LOGGER.info("Masked device id: %s",
rfxtrx.get_pt2262_deviceid(device_id,
entity[ATTR_DATA_BITS]))
_LOGGER.info("Add %s rfxtrx.binary_sensor (class %s)",
entity[ATTR_NAME], entity[CONF_DEVICE_CLASS])
device = RfxtrxBinarySensor(event, entity[ATTR_NAME],
entity[CONF_DEVICE_CLASS],
entity[ATTR_FIREEVENT],
entity[ATTR_OFF_DELAY],
entity[ATTR_DATA_BITS],
entity[CONF_COMMAND_ON],
entity[CONF_COMMAND_OFF])
device.hass = hass
device.is_lighting4 = (packet_id[2:4] == '13')
sensors.append(device)
rfxtrx.RFX_DEVICES[device_id] = device
add_devices_callback(sensors)
# pylint: disable=too-many-branches
def binary_sensor_update(event):
"""Callback for control updates from the RFXtrx gateway."""
if not isinstance(event, rfxtrxmod.ControlEvent):
return
device_id = slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
sensor = rfxtrx.RFX_DEVICES[device_id]
else:
sensor = rfxtrx.get_pt2262_device(device_id)
if sensor is None:
# Add the entity if not exists and automatic_add is True
if not config[ATTR_AUTOMATIC_ADD]:
return
poss_dev = rfxtrx.find_possible_pt2262_device(device_id)
if poss_dev is not None:
poss_id = slugify(poss_dev.event.device.id_string.lower())
_LOGGER.info("Found possible matching deviceid %s.",
poss_id)
pkt_id = "".join("{0:02x}".format(x) for x in event.data)
sensor = RfxtrxBinarySensor(event, pkt_id)
sensor.hass = hass
sensor.is_lighting4 = (pkt_id[2:4] == '13')
rfxtrx.RFX_DEVICES[device_id] = sensor
add_devices_callback([sensor])
_LOGGER.info("Added binary sensor %s "
"(Device_id: %s Class: %s Sub: %s)",
pkt_id,
slugify(event.device.id_string.lower()),
event.device.__class__.__name__,
event.device.subtype)
elif not isinstance(sensor, RfxtrxBinarySensor):
return
else:
_LOGGER.info("Binary sensor update "
"(Device_id: %s Class: %s Sub: %s)",
slugify(event.device.id_string.lower()),
event.device.__class__.__name__,
event.device.subtype)
if sensor.is_lighting4:
if sensor.data_bits is not None:
cmd = rfxtrx.get_pt2262_cmd(device_id, sensor.data_bits)
sensor.apply_cmd(int(cmd, 16))
else:
sensor.update_state(True)
else:
rfxtrx.apply_received_command(event)
if (sensor.is_on and sensor.off_delay is not None and
sensor.delay_listener is None):
def off_delay_listener(now):
"""Switch device off after a delay."""
sensor.delay_listener = None
sensor.update_state(False)
sensor.delay_listener = evt.track_point_in_time(
hass, off_delay_listener, dt_util.utcnow() + sensor.off_delay
)
# Subscribe to main rfxtrx events
if binary_sensor_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(binary_sensor_update)
# pylint: disable=too-many-instance-attributes,too-many-arguments
class RfxtrxBinarySensor(BinarySensorDevice):
"""An Rfxtrx binary sensor."""
def __init__(self, event, name, device_class=None,
should_fire=False, off_delay=None, data_bits=None,
cmd_on=None, cmd_off=None):
"""Initialize the sensor."""
self.event = event
self._name = name
self._should_fire_event = should_fire
self._device_class = device_class
self._off_delay = off_delay
self._state = False
self.is_lighting4 = False
self.delay_listener = None
self._data_bits = data_bits
self._cmd_on = cmd_on
self._cmd_off = cmd_off
if data_bits is not None:
self._masked_id = rfxtrx.get_pt2262_deviceid(
event.device.id_string.lower(),
data_bits)
def __str__(self):
"""Return the name of the sensor."""
return self._name
@property
def name(self):
"""Return the device name."""
return self._name
@property
def masked_id(self):
"""Return the masked device id (isolated address bits)."""
return self._masked_id
@property
def data_bits(self):
"""Return the number of data bits."""
return self._data_bits
@property
def cmd_on(self):
"""Return the value of the 'On' command."""
return self._cmd_on
@property
def cmd_off(self):
"""Return the value of the 'Off' command."""
return self._cmd_off
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def should_fire_event(self):
"""Return is the device must fire event."""
return self._should_fire_event
@property
def device_class(self):
"""Return the sensor class."""
return self._device_class
@property
def off_delay(self):
"""Return the off_delay attribute value."""
return self._off_delay
@property
def is_on(self):
"""Return true if the sensor state is True."""
return self._state
def apply_cmd(self, cmd):
"""Apply a command for updating the state."""
if cmd == self.cmd_on:
self.update_state(True)
elif cmd == self.cmd_off:
self.update_state(False)
def update_state(self, state):
"""Update the state of the device."""
self._state = state
self.schedule_update_ha_state()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t):
arg = op_def_pb2.OpDef.ArgDef()
arg.name = _make_argname_from_tensor_name(t.name)
arg.type = t.dtype.as_datatype_enum
return arg
def _get_node_def_attr(op):
# pylint: disable=protected-access
return op._node_def.attr
# pylint: enable=protected-access
def _add_input_array(op, start, limit, dtype, func):
"""Adds a _ListToArray node in the func for op.inputs[start:limit]."""
node = function_pb2.FunctionDef.Node()
node.op = "_ListToArray"
ret_name = op.name + "_L2A_" + str(start)
node.ret.extend([ret_name])
node.arg.extend([_make_argname_from_tensor_name(x.name)
for x in op.inputs[start:limit]])
num = limit - start
node.attr["Tin"].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=[dtype] * num)))
node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtype))
node.attr["N"].CopyFrom(attr_value_pb2.AttrValue(i=num))
func.node.extend([node])
return ret_name
def _add_output_array(op, start, limit, dtype, func):
"""Adds a _ArrayToList node in the func for op.outputs[start:limit]."""
dtype_proto = attr_value_pb2.AttrValue(type=dtype)
# A node converting N*T to list(T)
node = function_pb2.FunctionDef.Node()
node.op = "_ArrayToList"
arg_name = op.name + "_A2L_" + str(start)
ret_name = arg_name + "_out"
node.ret.append(ret_name)
node.arg.append(arg_name)
node.attr["T"].CopyFrom(dtype_proto)
num = limit - start
node.attr["N"].CopyFrom(attr_value_pb2.AttrValue(i=num))
node.attr["out_types"].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=[dtype] * num)))
func.node.extend([node])
num = limit - start
# Adds an identity node for each element in the array N*T so that
# uses of each element can be added easily later. These Identity
# will be eliminated before graph execution.
for i in xrange(num):
node = function_pb2.FunctionDef.Node()
node.op = "Identity"
node.arg.append(ret_name + ":" + str(i))
node.ret.append(_make_argname_from_tensor_name(op.outputs[i].name))
node.attr["T"].CopyFrom(dtype_proto)
func.node.extend([node])
return arg_name
def _add_output_list(op, start, limit, dtype_lst, func):
"""Adds a _ArrayToList node in the func for op.outputs[start:limit]."""
ret_name = op.name + "_Lst_" + str(start) + "_" + str(limit)
num = limit - start
assert len(dtype_lst) == num
# Adds an identity node for each element in the array N*T so that
# uses of each element can be added easily later. These Identity
# will be eliminated before graph execution.
for i in xrange(num):
node = function_pb2.FunctionDef.Node()
node.op = "Identity"
node.arg.append(ret_name + ":" + str(i))
node.ret.append(_make_argname_from_tensor_name(op.outputs[i].name))
node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtype_lst[i]))
func.node.extend([node])
return ret_name
def _add_op_node(graph, op, func):
"""Converts an op to a function def node and add it to `func`."""
node = function_pb2.FunctionDef.Node()
node.op = op.type
# pylint: disable=protected-access
if graph._is_function(op.type):
op_def = graph._get_function(op.type).signature
else:
op_def = op_def_registry.get_registered_ops()[op.type]
# pylint: enable=protected-access
attrs = _get_node_def_attr(op)
out_index = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
dtype = arg_def.type or attrs[arg_def.type_attr].type
num = attrs[arg_def.number_attr].i
node.ret.append(_add_output_array(op, out_index, out_index + num, dtype,
func))
out_index += num
elif arg_def.type_list_attr:
dtype_lst = attrs[arg_def.type_list_attr].list.type
num = len(dtype_lst)
node.ret.append(_add_output_list(op, out_index, out_index + num,
dtype_lst, func))
out_index += num
else:
node.ret.append(_make_argname_from_tensor_name(op.outputs[
out_index].name))
out_index += 1
inp_index = 0
for arg_def in op_def.input_arg:
if arg_def.number_attr:
dtype = arg_def.type or attrs[arg_def.type_attr].type
num = attrs[arg_def.number_attr].i
node.arg.append(_add_input_array(op, inp_index, inp_index + num, dtype,
func))
inp_index += num
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
node.arg.extend([_make_argname_from_tensor_name(op.inputs[i].name)
for i in range(inp_index, inp_index + num)])
inp_index += num
else:
node.arg.append(_make_argname_from_tensor_name(op.inputs[inp_index].name))
inp_index += 1
node.dep.extend([_make_argname_from_tensor_name(x.name)
for x in op.control_inputs])
for k, v in _get_node_def_attr(op).items():
node.attr[k].CopyFrom(v)
func.node.extend([node])
# pylint: disable=line-too-long
def graph_to_function_def(graph, name, inputs, outputs):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops present in the graph. The
graph effectively becomes the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
The returned protocol buffer can be passed to the
[`Graph.add_function()`](#Graph.add_function) method of a
different graph to make it available there.
Args:
graph: GraphDef proto.
name: string. The name to use for the function.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
Returns:
A FunctionDef protocol buffer.
"""
# pylint: enable=line-too-long
func = function_pb2.FunctionDef()
func.signature.name = name
func.signature.input_arg.extend([_tensor_to_argdef(graph.get_tensor_by_name(
i.name)) for i in inputs])
func.signature.output_arg.extend([_tensor_to_argdef(graph.get_tensor_by_name(
o.name)) for o in outputs])
func_arg_placeholders = set([i.name for i in inputs])
g = ops.get_default_graph()
for op in graph.get_operations():
tensor_name = op.values()[0].name
if tensor_name not in func_arg_placeholders:
_add_op_node(g, op, func)
return func
def call_function(func_def, *inputs, **kwargs):
"""Calls the function described by `func_def`.
This adds a `call` op to the default graph that calls the function described
by `func_def` with the tensors listed in `inputs` as arguments. It returns
the outputs of the call, which are one or more tensors.
`func_def` is a
[`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protcol buffer describing a
TensorFlow function. See [`define_function()`](#define_function) for an
easy way to create one from a Python function.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to instruct
the runtime not to inline the function body into the call site.
`func_def` is automatically added to the function library of the graph if
needed.
Args:
func_def: A `FunctionDef` protocol buffer.
*inputs: A list of tensors
**kwargs: Optional keyword arguments. Can only contain 'name'.
Returns:
A list of tensors representing the outputs of the call to `func_def`.
Raises:
ValueError: if the arguments are invalid.
"""
name = kwargs.pop("name", None)
noinline = kwargs.pop("noinline", None)
if noinline is None:
attrs = None
else:
attrs = {}
attrs["noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
func_name = func_def.signature.name
with ops.name_scope(name, func_name, inputs) as name:
if len(inputs) != len(func_def.signature.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" %
(len(func_def.signature.input_arg), len(inputs)))
output_types = [dtypes.DType(x.type) for x in func_def.signature.output_arg]
# TODO(touts): Pass compute_shapes as "try if function exists"
g = ops.get_default_graph()
op = g.create_op(func_name,
list(inputs),
output_types,
name=name,
attrs=attrs,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
return op.outputs[0]
else:
return tuple(op.outputs)
else:
return op
def _get_func_name(func):
if isinstance(func, _DefinedFunction):
return func.name
elif callable(func):
if inspect.isfunction(func):
return func.__name__
elif inspect.ismethod(func):
return "%s.%s" % (func.__self__.__name__, func.__name__)
else: # Probably a class instance with __call__
return type(func)
else:
raise ValueError("Argument must be callable")
def define_function(func, input_types, func_name=None, grad_func=None,
python_grad_func=None):
"""Creates a `FunctionDef` for a python function.
`func` is a Python function that receives zero or more tensors and returns at
least one tensor. It should add ops to the default graph the usual way by
calling TensorFlow functions such as `tf.constant()`, `tf.matmul()`, etc.
`input_types` is a dictionary of strings to `tf.Dtype` objects. Keys are
names arguments to `func`. The value indicate the type of tensor expected
by the function.
The returned `FunctionDef` protocol buffer is also added to the
default graph library. After it has been added you can add calls to
the function by passing it to `tf.call_function()`, together with a
list of tensors to use as inputs for the function.
Notes:
* `func` is called once, with `placeholder` tensors of the types specified in
`input_types` as arguments.
* Values returned by `func` must be tensors and they are recorded as being
the output of the function def.
* While `func` is a called, an empty graph is temporarily pushed as the
default graph. All ops added by `func` to that graph are part of the body
of the returned function def.
Example, but also see the [How To on functions](link_needed).
```python
# A function that receives two tensors x, y and returns their
# sum and difference.
def my_func(x, y):
return x + y, x - y
# Create a FunctionDef for 'my_func'. (This does not change the default
graph.)
my_func_def = tf.define_function(my_func, {'x': tf.float32, 'y': tf.float32})
# Alternatively:
# my_func_def = tf.define_function(my_func, [tf.float32, tf.float32])
# Build the graph, calling the function.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = tf.call_function(my_func_def, a, b, name='mycall')
```
Args:
func: a Python function.
input_types: if a dict, keys are the names of the arguments of
`func`, values are their expected `tf.DType`. Otherwise,
a list of `tf.DType`s.
func_name: Pyton string. If not None, specifies the name to use when
creating the Function. By default, introspection on `func` is used to
generate a name.
grad_func: If not None, specifies the gradient function. The
gradient function must satisify the criterion defined in
function.proto:GradientDef.
python_grad_func: If not None, specifies the gradient function with the same
interface as that expected by `tf.RegisterGradient`. This
will be called by tf.gradients to add the gradient ops to the
graph. No more than one of {grad_func, python_grad_func} may be
specified.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if the arguments are invalid.
"""
# TODO(touts): Lift the limitation that func can only receive Tensor args.
func_name = func_name or _get_func_name(func)
grad_func_name = _get_func_name(grad_func) if grad_func is not None else None
argspec = inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError("Functions with argument defaults or keyword "
"arguments are not supported.")
if inspect.isfunction(func):
if argspec.varargs and (
len(argspec.args) > len(input_types)) or not argspec.varargs and (
len(argspec.args) != len(input_types)):
raise ValueError("The function has fewer arguments "
"than the number of specified input types.")
argnames = argspec.args
elif inspect.ismethod(func):
if argspec.varargs and (
len(argspec.args) > 1 + len(input_types)) or not argspec.varargs and (
len(argspec.args) != 1 + len(input_types)):
raise ValueError("The class function has fewer arguments "
"than the number of specified input types.")
# 1st argument is the "class" type.
argnames = argspec.args[1:]
args = []
if isinstance(input_types, (list, tuple)):
for i in range(len(input_types)):
argname = argnames[i] if i < len(argnames) else ("arg%d" % i)
argtype = input_types[i]
args.append((argname, argtype))
else:
for name in argnames:
if name not in input_types:
raise ValueError("Missing type for argument: " + name)
args.append((name, input_types[name]))
# Create the func_def object.
temp_graph = ops.Graph()
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
# Arglist to call 'func'
kwargs = {}
for (argname, argtype) in args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
kwargs[argname] = argholder
# Call func and gather the output tensors.
if isinstance(input_types, (list, tuple)):
outputs = func(*inputs)
else:
outputs = func(**kwargs)
if not isinstance(outputs, ops.Tensor) and not outputs:
raise ValueError("Function must return at least one tensor")
# Convenience: if func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
# Build the FunctionDef
func_def = graph_to_function_def(temp_graph, func_name, inputs, outputs)
g = ops.get_default_graph()
# pylint: disable=protected-access
g._add_function(func_def, grad_func_name, python_grad_func=python_grad_func)
# pylint: enable=protected-access
return func_def
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the graph.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.Constant([1.0])
b = tf.Constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
@@__init__
"""
def __init__(self, *input_type_list, **input_types):
"""Create a `Defun` decorator.
Args:
*input_type_list: A list of `tf.DType`
**input_types: Dict mapping string with `tf.DType`
One key for each argument of the function to decorate.
Note that these optional keyword arguments are also accepted:
func_name - (optional). A python string, the name to use to declare
this `Function` in the graph.
grad_func - (optional). A function implementing the gradient of the
function-to-register. This is usually a previously
`Defun`-registered Python callable.
python_grad_func - (optional). A function implementing the gradient of
the function python-side. This function must take the current op and
the gradients w.r.t. its outputs, and return the gradients w.r.t. the
inputs (identical to the interface expected by
`tf.RegisterGradient`).
"""
self._func_name = input_types.pop("func_name", None)
self._grad_func = input_types.pop("grad_func", None)
self._python_grad_func = input_types.pop("python_grad_func", None)
assert not input_type_list or not input_types, (
"Can't specify both *input_type_list and **input_types")
self._input_types = input_types
self._input_type_list = input_type_list
def __call__(self, f):
if self._input_types:
func_def = define_function(
f, self._input_types,
func_name=self._func_name, grad_func=self._grad_func,
python_grad_func=self._python_grad_func)
else:
func_def = define_function(
f, self._input_type_list,
func_name=self._func_name, grad_func=self._grad_func,
python_grad_func=self._python_grad_func)
return _DefinedFunction(definition=func_def)
class _DefinedFunction(object):
"""Class to store the name and definition of the function defined by Defun.
This object implements a callable interface that runs `call_function`, and
provides a `name` property to look up the name of the `Function`.
An instance of `_DefinedFunction` may be passed to the `grad_func` parameter
of `define_function` and `Defun`.
"""
def __init__(self, definition):
self._definition = definition
@property
def name(self):
return self._definition.signature.name
def __call__(self, *args, **kwargs):
return call_function(self._definition, *args, **kwargs)
|
|
import json
import textwrap
import unittest
import pytest
from conans.test.utils.tools import TestClient, GenConanfile
from conans.util.env_reader import get_env
class LockRecipeTest(unittest.TestCase):
def test_error_pass_base(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/0.1@")
client.save({"conanfile.py": GenConanfile().with_require("pkg/0.1")})
client.run("lock create conanfile.py --base --lockfile-out=conan.lock")
client.run("install . --lockfile=conan.lock", assert_error=True)
self.assertIn("Lockfiles with --base do not contain profile information, "
"cannot be used. Create a full lockfile", client.out)
def test_lock_recipe(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile().with_setting("os")})
client.run("create . pkg/0.1@ -s os=Windows")
self.assertIn("pkg/0.1:3475bd55b91ae904ac96fde0f106a136ab951a5e - Build", client.out)
self.assertIn("pkg/0.1: Created package revision d0f0357277b3417d3984b5a9a85bbab6",
client.out)
client.run("create . pkg/0.1@ -s os=Linux")
self.assertIn("pkg/0.1:cb054d0b3e1ca595dc66bc2339d40f1f8f04ab31 - Build", client.out)
self.assertIn("pkg/0.1: Created package revision 9e99cfd92d0d7df79d687b01512ce844",
client.out)
client.save({"conanfile.py": GenConanfile().with_require("pkg/0.1")})
client.run("lock create conanfile.py --base --lockfile-out=base.lock")
lock = json.loads(client.load("base.lock"))
self.assertEqual(2, len(lock["graph_lock"]["nodes"]))
pkg_node = lock["graph_lock"]["nodes"]["1"]
if client.cache.config.revisions_enabled:
self.assertEqual(pkg_node["ref"], "pkg/0.1#f096d7d54098b7ad7012f9435d9c33f3")
else:
self.assertEqual(pkg_node["ref"], "pkg/0.1")
client.run("lock create conanfile.py -s os=Linux "
"--lockfile-out=linux.lock --lockfile=base.lock")
lock = json.loads(client.load("linux.lock"))
pkg_node = lock["graph_lock"]["nodes"]["1"]
if client.cache.config.revisions_enabled:
self.assertEqual(pkg_node["ref"], "pkg/0.1#f096d7d54098b7ad7012f9435d9c33f3")
self.assertEqual(pkg_node["package_id"], "cb054d0b3e1ca595dc66bc2339d40f1f8f04ab31")
self.assertEqual(pkg_node["prev"], "9e99cfd92d0d7df79d687b01512ce844")
else:
self.assertEqual(pkg_node["ref"], "pkg/0.1")
self.assertEqual(pkg_node["package_id"], "cb054d0b3e1ca595dc66bc2339d40f1f8f04ab31")
self.assertEqual(pkg_node["prev"], "0")
self.assertEqual(pkg_node["options"], "")
self.assertIsNone(pkg_node.get("modified"))
client.run("lock create conanfile.py -s os=Windows "
"--lockfile-out=windows.lock --lockfile=base.lock")
lock = json.loads(client.load("windows.lock"))
pkg_node = lock["graph_lock"]["nodes"]["1"]
if client.cache.config.revisions_enabled:
self.assertEqual(pkg_node["ref"], "pkg/0.1#f096d7d54098b7ad7012f9435d9c33f3")
self.assertEqual(pkg_node["package_id"], "3475bd55b91ae904ac96fde0f106a136ab951a5e")
self.assertEqual(pkg_node["prev"], "d0f0357277b3417d3984b5a9a85bbab6")
else:
self.assertEqual(pkg_node["ref"], "pkg/0.1")
self.assertEqual(pkg_node["package_id"], "3475bd55b91ae904ac96fde0f106a136ab951a5e")
self.assertEqual(pkg_node["prev"], "0")
self.assertEqual(pkg_node["options"], "")
# Now it is possible to obtain the base one again from the full ones
client.run("lock create conanfile.py --base "
"--lockfile-out=windows_base.lock --lockfile=windows.lock")
self.assertEqual(client.load("windows_base.lock"), client.load("base.lock"))
# Now it is possible to obtain the base one again from the full ones
client.run("lock create conanfile.py --base "
"--lockfile-out=linux_base.lock --lockfile=linux.lock")
self.assertEqual(client.load("linux_base.lock"), client.load("base.lock"))
def test_lock_recipe_from_partial(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . LibA/1.0@")
client.save({"conanfile.py": GenConanfile().with_require("LibA/[>=1.0]")})
client.run("create . LibB/1.0@")
client.run("lock create --reference=LibB/1.0 --lockfile-out=base.lock --base")
client.run("lock create --reference=LibB/1.0 --lockfile=base.lock --lockfile-out=libb.lock")
client.save({"conanfile.py": GenConanfile()})
client.run("create . LibA/1.0.1@")
client.save({"conanfile.py": GenConanfile().with_require("LibB/1.0")})
for lock in ("base.lock", "libb.lock"):
client.run("lock create conanfile.py --name=LibC --version=1.0 --lockfile=%s "
"--lockfile-out=full.lock --base" % lock)
self.assertIn("LibA/1.0 from local cache - Cache", client.out)
lock = json.loads(client.load("full.lock"))
for id_, ref, rrev in ("1", "LibB/1.0", "6e5c7369c3d3f7a7a5a60ddec16a941f"), \
("2", "LibA/1.0", "f3367e0e7d170aa12abccb175fee5f97"):
pkg_node = lock["graph_lock"]["nodes"][id_]
if client.cache.config.revisions_enabled:
self.assertEqual(pkg_node["ref"], "%s#%s" % (ref, rrev))
else:
self.assertEqual(pkg_node["ref"], ref)
self.assertIsNone(pkg_node.get("package_id"))
self.assertIsNone(pkg_node.get("prev"))
self.assertIsNone(pkg_node.get("options"))
def test_conditional_lock_recipe(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . common/0.1@")
client.run("create . win/0.1@")
client.run("create . linux/0.1@")
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
settings = "os"
requires = "common/0.1"
def requirements(self):
if self.settings.os == "Windows":
self.requires("win/0.1")
else:
self.requires("linux/0.1")
""")
client.save({"conanfile.py": conanfile})
client.run("lock create conanfile.py --base -s os=Windows --lockfile-out=conan.lock")
lock = json.loads(client.load("conan.lock"))
self.assertEqual(3, len(lock["graph_lock"]["nodes"]))
common = lock["graph_lock"]["nodes"]["1"]
win = lock["graph_lock"]["nodes"]["2"]
if client.cache.config.revisions_enabled:
self.assertEqual(common["ref"], "common/0.1#f3367e0e7d170aa12abccb175fee5f97")
self.assertEqual(win["ref"], "win/0.1#f3367e0e7d170aa12abccb175fee5f97")
else:
self.assertEqual(common["ref"], "common/0.1")
self.assertEqual(win["ref"], "win/0.1")
self.assertIsNone(common.get("package_id"))
self.assertIsNone(common.get("prev"))
self.assertIsNone(common.get("options"))
self.assertIsNone(win.get("package_id"))
self.assertIsNone(win.get("prev"))
self.assertIsNone(win.get("options"))
client.run("lock create conanfile.py -s os=Linux "
"--lockfile-out=linux.lock --lockfile=conan.lock")
lock = json.loads(client.load("linux.lock"))
self.assertEqual(3, len(lock["graph_lock"]["nodes"]))
common = lock["graph_lock"]["nodes"]["1"]
linux = lock["graph_lock"]["nodes"]["3"]
self.assertNotIn("2", lock["graph_lock"]["nodes"])
if client.cache.config.revisions_enabled:
self.assertEqual(common["ref"], "common/0.1#f3367e0e7d170aa12abccb175fee5f97")
self.assertEqual(common["prev"], "83c38d3b4e5f1b8450434436eec31b00")
self.assertEqual(linux["ref"], "linux/0.1#f3367e0e7d170aa12abccb175fee5f97")
self.assertEqual(linux["prev"], "83c38d3b4e5f1b8450434436eec31b00")
else:
self.assertEqual(common["ref"], "common/0.1")
self.assertEqual(common["prev"], "0")
self.assertEqual(linux["ref"], "linux/0.1")
self.assertEqual(linux["prev"], "0")
self.assertEqual(common["options"], "")
self.assertEqual(common["package_id"], "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self.assertEqual(linux["package_id"], "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self.assertEqual(linux["options"], "")
client.run("lock create conanfile.py -s os=Windows "
"--lockfile-out=windows.lock --lockfile=conan.lock")
lock = json.loads(client.load("windows.lock"))
self.assertEqual(3, len(lock["graph_lock"]["nodes"]))
common = lock["graph_lock"]["nodes"]["1"]
win = lock["graph_lock"]["nodes"]["2"]
if client.cache.config.revisions_enabled:
self.assertEqual(common["ref"], "common/0.1#f3367e0e7d170aa12abccb175fee5f97")
self.assertEqual(common["prev"], "83c38d3b4e5f1b8450434436eec31b00")
self.assertEqual(win["ref"], "win/0.1#f3367e0e7d170aa12abccb175fee5f97")
self.assertEqual(win["prev"], "83c38d3b4e5f1b8450434436eec31b00")
else:
self.assertEqual(common["ref"], "common/0.1")
self.assertEqual(common["prev"], "0")
self.assertEqual(win["ref"], "win/0.1")
self.assertEqual(win["prev"], "0")
self.assertEqual(common["package_id"], "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self.assertEqual(common["options"], "")
self.assertEqual(win["package_id"], "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self.assertEqual(win["options"], "")
@pytest.mark.skipif(not get_env("TESTING_REVISIONS_ENABLED", False), reason="Only revisions")
def test_lose_rrev(self):
# https://github.com/conan-io/conan/issues/7595
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
files = {
"pkga/conanfile.py": GenConanfile(),
"pkgb/conanfile.py": GenConanfile().with_require("liba/[*]"),
}
client.save(files)
client.run("create pkga liba/0.1@")
client.run("lock create pkgb/conanfile.py --name=libb --version=0.1 "
"--lockfile-out=base.lock --base")
client.run("export pkgb libb/0.1@ --lockfile=base.lock --lockfile-out=libb_base.lock")
client.run("lock create --reference=libb/0.1@ --lockfile=libb_base.lock "
"--lockfile-out=libb_release.lock --build=missing")
libb_release = client.load("libb_release.lock")
self.assertIn('"ref": "libb/0.1#c2a641589d4b617387124f011905a97b"', libb_release)
client.run("create pkgb libb/0.1@ --lockfile=libb_release.lock")
self.assertIn("libb/0.1: Created package", client.out)
@pytest.mark.skipif(not get_env("TESTING_REVISIONS_ENABLED", False), reason="Only revisions")
def test_missing_configuration(self):
client = TestClient()
client.run("config set general.default_package_id_mode=package_revision_mode")
client.save({"conanfile.py": GenConanfile().with_setting("os")})
client.run("create . liba/0.1@ -s os=Windows")
self.assertIn("liba/0.1:3475bd55b91ae904ac96fde0f106a136ab951a5e - Build", client.out)
self.assertIn("liba/0.1: Created package revision d0f0357277b3417d3984b5a9a85bbab6",
client.out)
client.save({"conanfile.py": GenConanfile().with_require("liba/0.1")})
client.run("export . libb/0.1@")
client.run("lock create --reference=libb/0.1 --base --lockfile-out=conan.lock -s os=Windows")
client.run("lock create --reference=libb/0.1 -s os=Windows "
"--lockfile-out=windows.lock --lockfile=conan.lock "
"--build=libb/0.1 --build=missing")
self.assertIn("libb/0.1:d9a360017881eddb68099b9a3573a4c0d39f3df5 - Build", client.out)
client.run("lock create --reference=libb/0.1 -s os=Linux "
"--lockfile-out=linux.lock --lockfile=conan.lock "
"--build=libb/0.1 --build=missing")
self.assertIn("libb/0.1:Package_ID_unknown - Unknown", client.out)
@pytest.mark.skipif(not get_env("TESTING_REVISIONS_ENABLED", False), reason="Only revisions")
def test_missing_configuration_build_require(self):
client = TestClient()
client.run("config set general.default_package_id_mode=package_revision_mode")
client.save({"conanfile.py": GenConanfile()})
client.run("create . cmake/1.0@")
client.save({"conanfile.py": GenConanfile().with_setting("os"),
"myprofile": "[build_requires]\ncmake/1.0"})
client.run("create . liba/0.1@ -s os=Windows --profile=myprofile")
self.assertIn("liba/0.1:3475bd55b91ae904ac96fde0f106a136ab951a5e - Build", client.out)
self.assertIn("liba/0.1: Created package revision d0f0357277b3417d3984b5a9a85bbab6",
client.out)
client.save({"conanfile.py": GenConanfile().with_require("liba/0.1")})
client.run("lock create conanfile.py --name=libb --version=0.1 --base "
"--lockfile-out=conan.lock --profile=myprofile -s os=Windows --build")
client.run("export . libb/0.1@ --lockfile=conan.lock --lockfile-out=conan.lock")
client.run("lock create --reference=libb/0.1 -s os=Windows "
"--lockfile-out=windows.lock --lockfile=conan.lock "
"--build=libb/0.1 --build=missing --profile=myprofile")
self.assertIn("liba/0.1:3475bd55b91ae904ac96fde0f106a136ab951a5e - Cache", client.out)
self.assertIn("libb/0.1:d9a360017881eddb68099b9a3573a4c0d39f3df5 - Build", client.out)
self.assertIn("cmake/1.0:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache", client.out)
client.run("lock create --reference=libb/0.1 -s os=Linux "
"--lockfile-out=linux.lock --lockfile=conan.lock "
"--build=libb/0.1 --build=missing --profile=myprofile")
self.assertNotIn("ERROR: No package matching 'libb/0.1' pattern", client.out)
self.assertIn("liba/0.1:cb054d0b3e1ca595dc66bc2339d40f1f8f04ab31 - Build", client.out)
self.assertIn("libb/0.1:Package_ID_unknown - Unknown", client.out)
self.assertIn("cmake/1.0:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache", client.out)
|
|
"""This module contains a prototype implementation of the
TT-cross-based minimization procedure
"""
import numpy as np
import math
import tt
from ..maxvol import maxvol
from ..utils.rect_maxvol import rect_maxvol
def reshape(a, sz):
return np.reshape(a, sz, order='F')
def mkron(a, b):
return np.kron(a, b)
def mysvd(a, full_matrices=False):
try:
return np.linalg.svd(a, full_matrices)
except:
return np.linalg.svd(a + np.max(np.abs(a).flatten()) * 1e-14 *
np.random.randn(a.shape[0], a.shape[1]), full_matrices)
def min_func(fun, bounds_min, bounds_max, d=None, rmax=10,
n0=64, nswp=10, verb=True, smooth_fun=None):
"""Find (approximate) minimal value of the function on a d-dimensional grid."""
if d is None:
d = len(bounds_min)
a = np.asanyarray(bounds_min).copy()
b = np.asanyarray(bounds_max).copy()
else:
a = np.ones(d) * bounds_min
b = np.ones(d) * bounds_max
if smooth_fun is None:
smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam))
#smooth_fun = lambda p, lam: np.exp(-10*(p - lam))
# We do not need to store the cores, only the interfaces!
Rx = [[]] * (d + 1) # Python list for the interfaces
Rx[0] = np.ones((1, 1))
Rx[d] = np.ones((1, 1))
Jy = [np.empty(0)] * (d + 1)
ry = rmax * np.ones(d + 1, dtype=np.int)
ry[0] = 1
ry[d] = 1
n = n0 * np.ones(d, dtype=np.int)
fun_evals = 0
grid = [np.reshape(np.linspace(a[i], b[i], n[i]), (n[i], 1))
for i in range(d)]
for i in range(d - 1):
#cr1 = y[i]
ry[i + 1] = min(ry[i + 1], n[i] * ry[i])
cr1 = np.random.randn(ry[i], n[i], ry[i + 1])
cr1 = reshape(cr1, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cr1)
ind = maxvol(q)
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
# Jy{i+1} = [kron(ones(n(i),1), Jy{i}), kron((1:n(i))', ones(ry(i),1))];
# Jy{i+1} = Jy{i+1}(ind,:);
swp = 0
dirn = -1
i = d - 1
lm = 999999999999
while swp < nswp:
# Right-to-left sweep
# The idea: compute the current core; compute the function of it;
# Shift locally or globally? Local shift would be the first try
# Compute the current core
if np.size(Jy[i]) == 0:
w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w1 = mkron(np.ones((n[i] * ry[i + 1], 1)), Jy[i])
w2 = mkron(mkron(np.ones((ry[i + 1], 1)),
grid[i]), np.ones((ry[i], 1)))
if np.size(Jy[i + 1]) == 0:
w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1)))
J = np.hstack((w1, w2, w3))
# Just add some random indices to J, which is rnr x d, need to make rn (r + r0) x add,
# i.e., just generate random r, random n and random multiindex
cry = fun(J)
fun_evals += cry.size
cry = reshape(cry, (ry[i], n[i], ry[i + 1]))
min_cur = np.min(cry.flatten("F"))
ind_cur = np.argmin(cry.flatten("F"))
if lm > min_cur:
lm = min_cur
x_full = J[ind_cur, :]
val = fun(x_full)
if verb:
print('New record:', val, 'Point:', x_full, 'fevals:', fun_evals)
cry = smooth_fun(cry, lm)
if (dirn < 0 and i > 0):
cry = reshape(cry, (ry[i], n[i] * ry[i + 1]))
cry = cry.T
#q, r = np.linalg.qr(cry)
u, s, v = mysvd(cry, full_matrices=False)
ry[i] = min(ry[i], rmax)
q = u[:, :ry[i]]
ind = rect_maxvol(q)[0] # maxvol(q)
ry[i] = ind.size
w1 = mkron(np.ones((ry[i + 1], 1)), grid[i])
if np.size(Jy[i + 1]) == 0:
w2 = np.zeros((n[i] * ry[i + 1], 0))
else:
w2 = mkron(Jy[i + 1], np.ones((n[i], 1)))
Jy[i] = np.hstack((w1, w2))
Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1))
Jy[i] = Jy[i][ind, :]
if (dirn > 0 and i < d - 1):
cry = reshape(cry, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cry)
#ind = maxvol(q)
ind = rect_maxvol(q)[0]
ry[i + 1] = ind.size
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
i += dirn
if i == d or i == -1:
dirn = -dirn
i += dirn
swp = swp + 1
return val, x_full
def min_tens(tens, rmax=10, nswp=10, verb=True, smooth_fun=None):
"""Find (approximate) minimal element in a TT-tensor."""
if smooth_fun is None:
smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam))
d = tens.d
Rx = [[]] * (d + 1) # Python list for the interfaces
Rx[0] = np.ones((1, 1))
Rx[d] = np.ones((1, 1))
Jy = [np.empty(0)] * (d + 1)
ry = rmax * np.ones(d + 1, dtype=np.int)
ry[0] = 1
ry[d] = 1
n = tens.n
elements_seen = 0
phi_left = [np.empty(0)] * (d + 1)
phi_left[0] = np.array([1])
phi_right = [np.empty(0)] * (d + 1)
phi_right[d] = np.array([1])
cores = tt.tensor.to_list(tens)
# Fill initial multiindex J randomly.
grid = [np.reshape(list(range(n[i])), (n[i], 1)) for i in range(d)]
for i in range(d - 1):
ry[i + 1] = min(ry[i + 1], n[i] * ry[i])
ind = sorted(np.random.permutation(ry[i] * n[i])[0:ry[i + 1]])
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1)
phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1))
phi_left[i + 1] = phi_left[i + 1][ind, :]
swp = 0
dirn = -1
i = d - 1
lm = 999999999999
while swp < nswp:
# Right-to-left sweep
# The idea: compute the current core; compute the function of it;
# Shift locally or globally? Local shift would be the first try
# Compute the current core
if np.size(Jy[i]) == 0:
w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w1 = mkron(np.ones((n[i] * ry[i + 1], 1)), Jy[i])
w2 = mkron(mkron(np.ones((ry[i + 1], 1)),
grid[i]), np.ones((ry[i], 1)))
if np.size(Jy[i + 1]) == 0:
w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1)))
J = np.hstack((w1, w2, w3))
phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1)
phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1]))
cry = np.tensordot(
phi_left[i], np.tensordot(
cores[i], phi_right[
i + 1], 1), 1)
elements_seen += cry.size
cry = reshape(cry, (ry[i], n[i], ry[i + 1]))
min_cur = np.min(cry.flatten("F"))
ind_cur = np.argmin(cry.flatten("F"))
if lm > min_cur:
lm = min_cur
x_full = J[ind_cur, :]
val = tens[x_full]
if verb:
print('New record:', val, 'Point:', x_full, 'elements seen:', elements_seen)
cry = smooth_fun(cry, lm)
if dirn < 0 and i > 0:
cry = reshape(cry, (ry[i], n[i] * ry[i + 1]))
cry = cry.T
#q, r = np.linalg.qr(cry)
u, s, v = mysvd(cry, full_matrices=False)
ry[i] = min(ry[i], rmax)
q = u[:, :ry[i]]
ind = rect_maxvol(q)[0] # maxvol(q)
ry[i] = ind.size
w1 = mkron(np.ones((ry[i + 1], 1)), grid[i])
if np.size(Jy[i + 1]) == 0:
w2 = np.zeros((n[i] * ry[i + 1], 0))
else:
w2 = mkron(Jy[i + 1], np.ones((n[i], 1)))
Jy[i] = np.hstack((w1, w2))
Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1))
Jy[i] = Jy[i][ind, :]
phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1)
phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1]))
phi_right[i] = phi_right[i][:, ind]
if dirn > 0 and i < d - 1:
cry = reshape(cry, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cry)
#ind = maxvol(q)
ind = rect_maxvol(q)[0]
ry[i + 1] = ind.size
phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1)
phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1))
phi_left[i + 1] = phi_left[i + 1][ind, :]
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
i += dirn
if i == d or i == -1:
dirn = -dirn
i += dirn
swp = swp + 1
return val, x_full
|
|
import os
import json
import unittest
from unittest import mock
from pythonforandroid.bootstrap import Bootstrap
from pythonforandroid.distribution import Distribution
from pythonforandroid.recipe import Recipe
from pythonforandroid.util import BuildInterruptingException
from pythonforandroid.build import Context
dist_info_data = {
"dist_name": "sdl2_dist",
"bootstrap": "sdl2",
"archs": ["armeabi", "armeabi-v7a", "x86", "x86_64", "arm64-v8a"],
"ndk_api": 21,
"use_setup_py": False,
"recipes": ["hostpython3", "python3", "sdl2", "kivy", "requests"],
"hostpython": "/some/fake/hostpython3",
"python_version": "3.7",
}
class TestDistribution(unittest.TestCase):
"""
An inherited class of `unittest.TestCase`to test the module
:mod:`~pythonforandroid.distribution`.
"""
TEST_ARCH = 'armeabi-v7a'
def setUp(self):
"""Configure a :class:`~pythonforandroid.build.Context` so we can
perform our unittests"""
self.ctx = Context()
self.ctx.ndk_api = 21
self.ctx.android_api = 27
self.ctx._sdk_dir = "/opt/android/android-sdk"
self.ctx._ndk_dir = "/opt/android/android-ndk"
self.ctx.setup_dirs(os.getcwd())
self.ctx.recipe_build_order = [
"hostpython3",
"python3",
"sdl2",
"kivy",
]
def setUp_distribution_with_bootstrap(self, bs, **kwargs):
"""Extend the setUp by configuring a distribution, because some test
needs a distribution to be set to be properly tested"""
self.ctx.bootstrap = bs
self.ctx.bootstrap.distribution = Distribution.get_distribution(
self.ctx,
name=kwargs.pop("name", "test_prj"),
recipes=kwargs.pop("recipes", ["python3", "kivy"]),
archs=[self.TEST_ARCH],
**kwargs
)
def tearDown(self):
"""Here we make sure that we reset a possible bootstrap created in
`setUp_distribution_with_bootstrap`"""
self.ctx.bootstrap = None
def test_properties(self):
"""Test that some attributes has the expected result (for now, we check
that `__repr__` and `__str__` return the proper values"""
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx)
)
distribution = self.ctx.bootstrap.distribution
self.assertEqual(self.ctx, distribution.ctx)
expected_repr = (
"<Distribution: name test_prj with recipes (python3, kivy)>"
)
self.assertEqual(distribution.__str__(), expected_repr)
self.assertEqual(distribution.__repr__(), expected_repr)
@mock.patch("pythonforandroid.distribution.exists")
def test_folder_exist(self, mock_exists):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.folder_exist` is
called once with the proper arguments."""
mock_exists.return_value = False
self.setUp_distribution_with_bootstrap(
Bootstrap.get_bootstrap("sdl2", self.ctx)
)
self.ctx.bootstrap.distribution.folder_exists()
mock_exists.assert_called_with(
self.ctx.bootstrap.distribution.dist_dir
)
@mock.patch("pythonforandroid.distribution.rmtree")
def test_delete(self, mock_rmtree):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.delete` is
called once with the proper arguments."""
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx)
)
self.ctx.bootstrap.distribution.delete()
mock_rmtree.assert_called_once_with(
self.ctx.bootstrap.distribution.dist_dir
)
@mock.patch("pythonforandroid.distribution.exists")
def test_get_distribution_no_name(self, mock_exists):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.get_distribution`
returns the proper result which should `unnamed_dist_1`."""
mock_exists.return_value = False
self.ctx.bootstrap = Bootstrap().get_bootstrap("sdl2", self.ctx)
dist = Distribution.get_distribution(self.ctx, archs=[self.TEST_ARCH])
self.assertEqual(dist.name, "unnamed_dist_1")
@mock.patch("pythonforandroid.util.chdir")
@mock.patch("pythonforandroid.distribution.open", create=True)
def test_save_info(self, mock_open_dist_info, mock_chdir):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.save_info`
is called once with the proper arguments."""
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx)
)
self.ctx.hostpython = "/some/fake/hostpython3"
self.ctx.python_recipe = Recipe.get_recipe("python3", self.ctx)
self.ctx.python_modules = ["requests"]
mock_open_dist_info.side_effect = [
mock.mock_open(read_data=json.dumps(dist_info_data)).return_value
]
self.ctx.bootstrap.distribution.save_info("/fake_dir")
mock_open_dist_info.assert_called_once_with("dist_info.json", "w")
mock_open_dist_info.reset_mock()
@mock.patch("pythonforandroid.distribution.open", create=True)
@mock.patch("pythonforandroid.distribution.exists")
@mock.patch("pythonforandroid.distribution.glob.glob")
def test_get_distributions(
self, mock_glob, mock_exists, mock_open_dist_info
):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.get_distributions`
returns some expected values:
- A list of instances of class
`~pythonforandroid.distribution.Distribution
- That one of the distributions returned in the result has the
proper values (`name`, `ndk_api` and `recipes`)
"""
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx)
)
mock_glob.return_value = ["sdl2-python3"]
mock_open_dist_info.side_effect = [
mock.mock_open(read_data=json.dumps(dist_info_data)).return_value
]
dists = self.ctx.bootstrap.distribution.get_distributions(self.ctx)
self.assertIsInstance(dists, list)
self.assertEqual(len(dists), 1)
self.assertIsInstance(dists[0], Distribution)
self.assertEqual(dists[0].name, "sdl2_dist")
self.assertEqual(dists[0].dist_dir, "sdl2-python3")
self.assertEqual(dists[0].ndk_api, 21)
self.assertEqual(
dists[0].recipes,
["hostpython3", "python3", "sdl2", "kivy", "requests"],
)
mock_open_dist_info.assert_called_with("sdl2-python3/dist_info.json")
mock_open_dist_info.reset_mock()
@mock.patch("pythonforandroid.distribution.open", create=True)
@mock.patch("pythonforandroid.distribution.exists")
@mock.patch("pythonforandroid.distribution.glob.glob")
def test_get_distributions_error_ndk_api(
self, mock_glob, mock_exists, mock_open_dist_info
):
"""Test method
:meth:`~pythonforandroid.distribution.Distribution.get_distributions`
in case that `ndk_api` is not set..which should return a `None`.
"""
dist_info_data_no_ndk_api = dist_info_data.copy()
dist_info_data_no_ndk_api.pop("ndk_api")
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx)
)
mock_glob.return_value = ["sdl2-python3"]
mock_open_dist_info.side_effect = [
mock.mock_open(
read_data=json.dumps(dist_info_data_no_ndk_api)
).return_value
]
dists = self.ctx.bootstrap.distribution.get_distributions(self.ctx)
self.assertEqual(dists[0].ndk_api, None)
mock_open_dist_info.assert_called_with("sdl2-python3/dist_info.json")
mock_open_dist_info.reset_mock()
@mock.patch("pythonforandroid.distribution.Distribution.get_distributions")
@mock.patch("pythonforandroid.distribution.exists")
@mock.patch("pythonforandroid.distribution.glob.glob")
def test_get_distributions_error_ndk_api_mismatch(
self, mock_glob, mock_exists, mock_get_dists
):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.get_distribution`
raises an error in case that we have some distribution already build,
with a given `name` and `ndk_api`, and we try to get another
distribution with the same `name` but different `ndk_api`.
"""
expected_dist = Distribution.get_distribution(
self.ctx,
name="test_prj",
recipes=["python3", "kivy"],
archs=[self.TEST_ARCH],
)
mock_get_dists.return_value = [expected_dist]
mock_glob.return_value = ["sdl2-python3"]
with self.assertRaises(BuildInterruptingException) as e:
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx),
allow_replace_dist=False,
ndk_api=22,
)
self.assertEqual(
e.exception.args[0],
"Asked for dist with name test_prj with recipes (python3, kivy)"
" and NDK API 22, but a dist with this name already exists and has"
" either incompatible recipes (python3, kivy) or NDK API 21",
)
def test_get_distributions_error_extra_dist_dirs(self):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.get_distributions`
raises an exception of
:class:`~pythonforandroid.util.BuildInterruptingException` in case that
we supply the kwargs `extra_dist_dirs`.
"""
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx)
)
with self.assertRaises(BuildInterruptingException) as e:
self.ctx.bootstrap.distribution.get_distributions(
self.ctx, extra_dist_dirs=["/fake/extra/dist_dirs"]
)
self.assertEqual(
e.exception.args[0],
"extra_dist_dirs argument to get"
"_distributions is not yet implemented",
)
@mock.patch("pythonforandroid.distribution.Distribution.get_distributions")
def test_get_distributions_possible_dists(self, mock_get_dists):
"""Test that method
:meth:`~pythonforandroid.distribution.Distribution.get_distributions`
returns the proper
`:class:`~pythonforandroid.distribution.Distribution` in case that we
already have it build and we request the same
`:class:`~pythonforandroid.distribution.Distribution`.
"""
expected_dist = Distribution.get_distribution(
self.ctx,
name="test_prj",
recipes=["python3", "kivy"],
archs=[self.TEST_ARCH],
)
mock_get_dists.return_value = [expected_dist]
self.setUp_distribution_with_bootstrap(
Bootstrap().get_bootstrap("sdl2", self.ctx), name="test_prj"
)
dists = self.ctx.bootstrap.distribution.get_distributions(self.ctx)
self.assertEqual(dists[0], expected_dist)
|
|
from datetime import timedelta, datetime
from django.contrib.auth import get_user_model
from django.utils.timezone import now
from rest_framework.test import APITestCase
from callback_request.api_views import ManagersAvailabilityView, CreateCallbackThrottle
from callback_request.models import CallbackRequest, CallEntry
from callback_schedule.models import CallbackManager, CallbackManagerSchedule, CallbackManagerPhone
class CallbackRequestTest(APITestCase):
def test_api_requests_manager(self):
today = now()
user = get_user_model().objects.create_user('Manager#1')
manager = CallbackManager.objects.create(user=user)
CallbackManagerSchedule.objects.create(manager=manager, weekday=(today + timedelta(days=1)).weekday(),
available_from="00:00", available_till="23:59")
response = self.client.post('/api/callback/create.json', {
'phone': '+1 (234) 56-78-90',
'date': (today + timedelta(days=1)).isoformat(),
'immediate': False,
})
pk = response.data['id']
print('We have created request')
response = self.client.get('/api/callback/manage/requests.json')
self.assertEqual(403, response.status_code)
print('> Unauthorized user can\'t get requests list')
response = self.client.get('/api/callback/manage/requests/{}.json'.format(pk))
self.assertEqual(403, response.status_code)
print('> Unauthorized user can\'t get request')
admin = get_user_model().objects.create_superuser('admin', '[email protected]', 'test')
self.client.force_authenticate(admin)
print('Logged in as superuser')
response = self.client.get('/api/callback/manage/requests.json')
self.assertEqual(200, response.status_code)
print('> Admin can get requests list')
response = self.client.get('/api/callback/manage/requests/{}.json'.format(pk))
self.assertEqual(200, response.status_code)
print('> Admin can get request')
self.client.force_authenticate(None)
def test_callback_request_later(self):
today = now()
user = get_user_model().objects.create_user('Manager#1')
manager = CallbackManager.objects.create(user=user)
CallbackManagerSchedule.objects.create(manager=manager, weekday=(today + timedelta(days=1)).weekday(),
available_from="00:00", available_till="23:59")
response = self.client.post('/api/callback/create.json', {
'name': 'Test',
'phone': '+1 (234) 56-78-90',
'immediate': False,
})
self.assertEqual(response.status_code, 400, 'Shouldn\'t succeed without date')
response = self.client.post('/api/callback/create.json', {
'name': 'Test',
'immediate': False,
})
self.assertEqual(response.status_code, 400, 'Shouldn\'t succeed without phone')
response = self.client.post('/api/callback/create.json', {
'phone': '+1 (234) 56-78-90',
'date': (today + timedelta(days=1)).isoformat(),
'immediate': False,
})
self.assertEqual(response.status_code, 201)
response = self.client.post('/api/callback/create.json', {
'name': 'Test',
'phone': '+1 (234) 56-78-90',
'date': (today + timedelta(days=1)).isoformat(),
'immediate': False,
})
self.assertEqual(response.status_code, 201)
request_id = response.data['id']
request = CallbackRequest.objects.get(pk=request_id)
self.assertEqual(request.right_phone, '+1234567890')
def test_callback_request_now(self):
CreateCallbackThrottle.rate = '100/second'
with self.settings(CALLER_FUNCTION='callback_caller.utils.make_stub_call'):
response = self.client.post('/api/callback/create.json', {
'phone': '+1 (234) 56-78-90',
'immediate': True,
})
self.assertEqual(response.status_code, 400, 'No free managers, shouldn\'t accept request')
user = get_user_model().objects.create_user('Manager#1')
manager = CallbackManager.objects.create(user=user)
today = now()
CallbackManagerSchedule.objects.create(manager=manager, weekday=today.weekday(),
available_from='00:00:00',
available_till='23:59:59')
CallbackManagerPhone.objects.create(manager=manager, phone_type='phone', number='+12345')
response = self.client.post('/api/callback/create.json', {
'phone': '+1 (234) 56-78-90',
'immediate': True,
})
self.assertEqual(response.status_code, 201)
self.assertEqual(CallEntry.objects.all().count(), 1)
def test_call_entries(self):
user = get_user_model().objects.create_user('Manager#1')
manager = CallbackManager.objects.create(user=user)
today = now()
CallbackManagerSchedule.objects.create(manager=manager, weekday=today.weekday(),
available_from='00:00:00',
available_till='23:59:59')
CallbackManagerPhone.objects.create(manager=manager, phone_type='phone', number='+12345')
CallbackManagerPhone.objects.create(manager=manager, phone_type='phone', number='+12346', priority=1)
with self.settings(CALLER_FUNCTION='callback_caller.utils.make_stub_call'):
self.client.post('/api/callback/create.json', {
'phone': '+1 (234) 56-78-90',
'immediate': True,
})
self.assertEqual(2, CallEntry.objects.all().count())
entry_1 = CallEntry.objects.all()[0]
entry_1.fail()
self.assertEqual('failed', entry_1.state)
self.assertEqual(2, CallEntry.objects.all().count())
entry_2 = CallEntry.objects.get(state='waiting')
entry_2.fail()
self.assertEqual(2, CallEntry.objects.all().count())
self.client.post('/api/callback/create.json', {
'phone': '+1 (234) 56-78-90',
'immediate': True,
})
self.assertEqual(2, CallEntry.objects.filter(state='waiting').count())
entry_3 = CallEntry.objects.filter(state='waiting')[0]
entry_3.success()
self.assertEqual(0, CallEntry.objects.filter(state='processing').count())
def test_real_schedule(self):
user = get_user_model().objects.create_user('Manager#1')
manager = CallbackManager.objects.create(user=user)
today = datetime(2016, 11, 23, 12, 0) # Wednesday
CallbackManagerSchedule.objects.create(manager=manager, weekday=0,
available_from='12:00:00', available_till='12:30:00')
CallbackManagerSchedule.objects.create(manager=manager, weekday=3,
available_from='13:00:00', available_till='13:30:00')
CallbackManagerSchedule.objects.create(manager=manager, weekday=3,
available_from='13:00:00', available_till='13:20:00')
CallbackManagerSchedule.objects.create(manager=manager, weekday=2,
available_from='10:00:00', available_till='12:30:00')
schedule = ManagersAvailabilityView.get_real_schedule(today)
self.assertEqual(
[
datetime(2016, 11, 23, 12, 10),
datetime(2016, 11, 23, 12, 20),
datetime(2016, 11, 24, 13, 0),
datetime(2016, 11, 24, 13, 10),
datetime(2016, 11, 24, 13, 20),
datetime(2016, 11, 28, 12, 0),
datetime(2016, 11, 28, 12, 10),
datetime(2016, 11, 28, 12, 20),
],
schedule
)
def test_nearest_date(self):
response = self.client.get('/api/callback/availability.json')
self.assertEqual({'available': False, 'nearest': None, 'schedule': []}, response.data)
user = get_user_model().objects.create_user('Manager#1')
manager = CallbackManager.objects.create(user=user)
CallbackManagerPhone.objects.create(manager=manager, phone_type='phone', number='+12345')
today = now().replace(second=0, microsecond=0)
print('TODAY', today)
CallbackManagerSchedule.objects.create(manager=manager, weekday=(today.weekday() + 1) % 7,
available_from='12:00:00',
available_till='12:30:00')
response = self.client.get('/api/callback/availability.json')
self.assertEqual({'available': False,
'nearest': (today.replace(hour=12, minute=0) + timedelta(days=1)).isoformat()[:-6] + 'Z',
'schedule': [
(today.replace(hour=12, minute=0) + timedelta(days=1)).isoformat()[:-6] + 'Z',
(today.replace(hour=12, minute=10) + timedelta(days=1)).isoformat()[:-6] + 'Z',
(today.replace(hour=12, minute=20) + timedelta(days=1)).isoformat()[:-6] + 'Z',
]},
response.data)
CallbackManagerSchedule.objects.create(manager=manager, weekday=today.weekday(),
available_from='00:00:00',
available_till='23:59:59')
response = self.client.get('/api/callback/availability.json')
self.assertEqual(True, response.data['available'])
|
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
import inspect
import time
from postman_tests import PostmanTests
class PostmanTestsHelpers(PostmanTests):
def test_1_basic_auth_plain(self):
basic_auth_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(2)")
basic_auth_selector.click()
username = self.browser.find_element_by_id("request-helper-basicAuth-username")
password = self.browser.find_element_by_id("request-helper-basicAuth-password")
username.clear()
password.clear()
username.send_keys("Aladin")
password.send_keys("sesam open")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-basicAuth .request-helper-submit")
refresh_headers.click()
header_first_key = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key").get_attribute("value")
header_first_value = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value").get_attribute("value")
if header_first_key == "Authorization" and header_first_value == "Basic QWxhZGluOnNlc2FtIG9wZW4=":
return True
else:
return False
def test_2_basic_auth_environment(self):
self.reset_request()
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
time.sleep(0.1)
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:last-child a")
manage_env_link.click()
time.sleep(1)
add_env_button = self.browser.find_element_by_css_selector("#environments-list-wrapper .toolbar .environments-actions-add")
add_env_button.click()
time.sleep(0.3)
environment_name = self.browser.find_element_by_id("environment-editor-name")
environment_name.clear()
environment_name.send_keys("Test basic auth environment")
first_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("basic_key")
first_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("Aladin")
second_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_key.clear()
second_key.send_keys("basic_val")
second_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_val.clear()
second_val.send_keys("sesam open")
submit_button = self.browser.find_element_by_css_selector("#modal-environments .environments-actions-add-submit")
submit_button.click()
time.sleep(0.3)
close_button = self.browser.find_element_by_css_selector("#modal-environments .modal-header .close")
close_button.click()
time.sleep(1)
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
# Select the environment
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:nth-of-type(1) a")
manage_env_link.click()
basic_auth_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(2)")
basic_auth_selector.click()
username = self.browser.find_element_by_id("request-helper-basicAuth-username")
password = self.browser.find_element_by_id("request-helper-basicAuth-password")
username.clear()
password.clear()
username.send_keys("{{basic_key}}")
password.send_keys("{{basic_val}}")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-basicAuth .request-helper-submit")
refresh_headers.click()
header_first_key = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key").get_attribute("value")
header_first_value = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value").get_attribute("value")
if header_first_key == "Authorization" and header_first_value == "Basic QWxhZGluOnNlc2FtIG9wZW4=":
return True
else:
return False
def test_3_oauth1_plain_get(self):
self.reset_request()
oauth1_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(4)")
oauth1_selector.click()
consumer_key = self.browser.find_element_by_id("request-helper-oauth1-consumerKey")
consumer_secret = self.browser.find_element_by_id("request-helper-oauth1-consumerSecret")
token = self.browser.find_element_by_id("request-helper-oauth1-token")
token_secret = self.browser.find_element_by_id("request-helper-oauth1-tokenSecret")
timestamp = self.browser.find_element_by_id("request-helper-oauth1-timestamp")
nonce = self.browser.find_element_by_id("request-helper-oauth1-nonce")
version = self.browser.find_element_by_id("request-helper-oauth1-version")
# From OAuth example
self.set_url_field(self.browser, "http://photos.example.net/photos?size=original&file=vacation.jpg")
consumer_key.clear()
consumer_key.send_keys("dpf43f3p2l4k3l03")
nonce.clear()
nonce.send_keys("kllo9940pd9333jh")
timestamp.clear()
timestamp.send_keys("1191242096")
token.clear()
token.send_keys("nnch734d00sl2jdk")
consumer_secret.clear()
consumer_secret.send_keys("kd94hf93k423kf44")
token_secret.clear()
token_secret.send_keys("pfkkdhi9sl3r4s00")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-oAuth1 .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#url-keyvaleditor .keyvalueeditor-row")
found_oauth_signature = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("oauth_signature") > 0:
found_oauth_signature = True
if value.find("tR3+Ty81lMeYAr/Fid0kMTYa/WM=") > 0:
found_oauth_signature = True
else:
found_oauth_signature = False
if found_oauth_signature is True:
return True
else:
return False
def test_4_oauth1_formdata_post(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# From OAuth example
self.set_url_field(self.browser, "http://photos.example.net/photos")
first_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("original")
second_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("vacation.jpg")
oauth1_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(4)")
oauth1_selector.click()
consumer_key = self.browser.find_element_by_id("request-helper-oauth1-consumerKey")
consumer_secret = self.browser.find_element_by_id("request-helper-oauth1-consumerSecret")
token = self.browser.find_element_by_id("request-helper-oauth1-token")
token_secret = self.browser.find_element_by_id("request-helper-oauth1-tokenSecret")
timestamp = self.browser.find_element_by_id("request-helper-oauth1-timestamp")
nonce = self.browser.find_element_by_id("request-helper-oauth1-nonce")
version = self.browser.find_element_by_id("request-helper-oauth1-version")
consumer_key.clear()
consumer_key.send_keys("dpf43f3p2l4k3l03")
nonce.clear()
nonce.send_keys("kllo9940pd9333jh")
timestamp.clear()
timestamp.send_keys("1191242096")
token.clear()
token.send_keys("nnch734d00sl2jdk")
consumer_secret.clear()
consumer_secret.send_keys("kd94hf93k423kf44")
token_secret.clear()
token_secret.send_keys("pfkkdhi9sl3r4s00")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-oAuth1 .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row")
found_oauth_signature = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("oauth_signature") > 0:
found_oauth_signature = True
if value.find("wPkvxykrw+BTdCcGqKr+3I+PsiM=") > 0:
found_oauth_signature = True
else:
found_oauth_signature = False
if found_oauth_signature is True:
return True
else:
return False
def test_5_oauth1_formdata_post_missing_http(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# From OAuth example
self.set_url_field(self.browser, "photos.example.net/photos")
first_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("original")
second_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("vacation.jpg")
oauth1_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(4)")
oauth1_selector.click()
consumer_key = self.browser.find_element_by_id("request-helper-oauth1-consumerKey")
consumer_secret = self.browser.find_element_by_id("request-helper-oauth1-consumerSecret")
token = self.browser.find_element_by_id("request-helper-oauth1-token")
token_secret = self.browser.find_element_by_id("request-helper-oauth1-tokenSecret")
timestamp = self.browser.find_element_by_id("request-helper-oauth1-timestamp")
nonce = self.browser.find_element_by_id("request-helper-oauth1-nonce")
version = self.browser.find_element_by_id("request-helper-oauth1-version")
consumer_key.clear()
consumer_key.send_keys("dpf43f3p2l4k3l03")
nonce.clear()
nonce.send_keys("kllo9940pd9333jh")
timestamp.clear()
timestamp.send_keys("1191242096")
token.clear()
token.send_keys("nnch734d00sl2jdk")
consumer_secret.clear()
consumer_secret.send_keys("kd94hf93k423kf44")
token_secret.clear()
token_secret.send_keys("pfkkdhi9sl3r4s00")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-oAuth1 .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row")
found_oauth_signature = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("oauth_signature") > 0:
found_oauth_signature = True
if value.find("wPkvxykrw+BTdCcGqKr+3I+PsiM=") > 0:
found_oauth_signature = True
else:
found_oauth_signature = False
if found_oauth_signature is True:
return True
else:
return False
def test_6_oauth1_urlencoded_post(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# Select urlencoded
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(2)").click()
# From OAuth example
self.set_url_field(self.browser, "http://photos.example.net/photos")
first_formdata_key = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("original")
second_formdata_key = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("vacation.jpg")
oauth1_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(4)")
oauth1_selector.click()
consumer_key = self.browser.find_element_by_id("request-helper-oauth1-consumerKey")
consumer_secret = self.browser.find_element_by_id("request-helper-oauth1-consumerSecret")
token = self.browser.find_element_by_id("request-helper-oauth1-token")
token_secret = self.browser.find_element_by_id("request-helper-oauth1-tokenSecret")
timestamp = self.browser.find_element_by_id("request-helper-oauth1-timestamp")
nonce = self.browser.find_element_by_id("request-helper-oauth1-nonce")
version = self.browser.find_element_by_id("request-helper-oauth1-version")
consumer_key.clear()
consumer_key.send_keys("dpf43f3p2l4k3l03")
nonce.clear()
nonce.send_keys("kllo9940pd9333jh")
timestamp.clear()
timestamp.send_keys("1191242096")
token.clear()
token.send_keys("nnch734d00sl2jdk")
consumer_secret.clear()
consumer_secret.send_keys("kd94hf93k423kf44")
token_secret.clear()
token_secret.send_keys("pfkkdhi9sl3r4s00")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-oAuth1 .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row")
found_oauth_signature = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("oauth_signature") > 0:
found_oauth_signature = True
if value.find("wPkvxykrw+BTdCcGqKr+3I+PsiM=") > 0:
found_oauth_signature = True
else:
found_oauth_signature = False
if found_oauth_signature is True:
return True
else:
return False
def test_7_oauth1_post_headers(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# From OAuth example
self.set_url_field(self.browser, "http://photos.example.net/photos")
first_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("original")
second_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("vacation.jpg")
oauth1_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(4)")
oauth1_selector.click()
consumer_key = self.browser.find_element_by_id("request-helper-oauth1-consumerKey")
consumer_secret = self.browser.find_element_by_id("request-helper-oauth1-consumerSecret")
token = self.browser.find_element_by_id("request-helper-oauth1-token")
token_secret = self.browser.find_element_by_id("request-helper-oauth1-tokenSecret")
timestamp = self.browser.find_element_by_id("request-helper-oauth1-timestamp")
nonce = self.browser.find_element_by_id("request-helper-oauth1-nonce")
version = self.browser.find_element_by_id("request-helper-oauth1-version")
consumer_key.clear()
consumer_key.send_keys("dpf43f3p2l4k3l03")
nonce.clear()
nonce.send_keys("kllo9940pd9333jh")
timestamp.clear()
timestamp.send_keys("1191242096")
token.clear()
token.send_keys("nnch734d00sl2jdk")
consumer_secret.clear()
consumer_secret.send_keys("kd94hf93k423kf44")
token_secret.clear()
token_secret.send_keys("pfkkdhi9sl3r4s00")
add_to_header = self.browser.find_element_by_id("request-helper-oauth1-header")
add_to_header.click()
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-oAuth1 .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row")
found_oauth_signature = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("oauth_signature") > 0:
found_oauth_signature = True
if value.find("wPkvxykrw%2BBTdCcGqKr%2B3I%2BPsiM%3D") > 0:
if value.find("realm") > 0:
found_oauth_signature = True
else:
found_oauth_signature = False
else:
found_oauth_signature = False
if found_oauth_signature is True:
return True
else:
return False
def test_8_oauth1_post_environment(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# From OAuth example
self.set_url_field(self.browser, "{{url}}")
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
time.sleep(0.1)
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:last-child a")
manage_env_link.click()
time.sleep(1)
add_env_button = self.browser.find_element_by_css_selector("#environments-list-wrapper .toolbar .environments-actions-add")
add_env_button.click()
time.sleep(0.3)
environment_name = self.browser.find_element_by_id("environment-editor-name")
environment_name.clear()
environment_name.send_keys("Test oauth 1 environment")
first_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("consumer_key")
first_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("dpf43f3p2l4k3l03")
second_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_key.clear()
second_key.send_keys("consumer_secret")
second_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_val.clear()
second_val.send_keys("kd94hf93k423kf44")
third_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-key")
third_key.clear()
third_key.send_keys("token")
third_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-value")
third_val.clear()
third_val.send_keys("nnch734d00sl2jdk")
fourth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-key")
fourth_key.clear()
fourth_key.send_keys("token_secret")
fourth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-value")
fourth_val.clear()
fourth_val.send_keys("pfkkdhi9sl3r4s00")
fifth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-key")
fifth_key.clear()
fifth_key.send_keys("nonce")
fifth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-value")
fifth_val.clear()
fifth_val.send_keys("kllo9940pd9333jh")
sixth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-key")
sixth_key.clear()
sixth_key.send_keys("timestamp")
sixth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-value")
sixth_val.clear()
sixth_val.send_keys("1191242096")
seventh_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-key")
seventh_key.clear()
seventh_key.send_keys("url")
seventh_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-value")
seventh_val.clear()
seventh_val.send_keys("http://photos.example.net/photos")
eigth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-key")
eigth_key.clear()
eigth_key.send_keys("file")
eigth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-value")
eigth_val.clear()
eigth_val.send_keys("vacation.jpg")
ninth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-key")
ninth_key.clear()
ninth_key.send_keys("size")
ninth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-value")
ninth_val.clear()
ninth_val.send_keys("original")
submit_button = self.browser.find_element_by_css_selector("#modal-environments .environments-actions-add-submit")
submit_button.click()
time.sleep(0.3)
close_button = self.browser.find_element_by_css_selector("#modal-environments .modal-header .close")
close_button.click()
time.sleep(1)
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
# Select the environment
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:nth-of-type(2) a")
manage_env_link.click()
first_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("{{size}}")
second_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("{{file}}")
oauth1_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(4)")
oauth1_selector.click()
consumer_key = self.browser.find_element_by_id("request-helper-oauth1-consumerKey")
consumer_secret = self.browser.find_element_by_id("request-helper-oauth1-consumerSecret")
token = self.browser.find_element_by_id("request-helper-oauth1-token")
token_secret = self.browser.find_element_by_id("request-helper-oauth1-tokenSecret")
timestamp = self.browser.find_element_by_id("request-helper-oauth1-timestamp")
nonce = self.browser.find_element_by_id("request-helper-oauth1-nonce")
version = self.browser.find_element_by_id("request-helper-oauth1-version")
consumer_key.clear()
consumer_key.send_keys("{{consumer_key}}")
nonce.clear()
nonce.send_keys("{{nonce}}")
timestamp.clear()
timestamp.send_keys("{{timestamp}}")
token.clear()
token.send_keys("{{token}}")
token_secret.clear()
token_secret.send_keys("{{token_secret}}")
consumer_secret.clear()
consumer_secret.send_keys("{{consumer_secret}}")
add_to_header = self.browser.find_element_by_id("request-helper-oauth1-header")
add_to_header.click()
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-oAuth1 .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row")
found_oauth_signature = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("oauth_signature") > 0:
found_oauth_signature = True
if value.find("wPkvxykrw+BTdCcGqKr+3I+PsiM=") > 0:
found_oauth_signature = True
else:
found_oauth_signature = False
if found_oauth_signature is True:
return True
else:
return False
def test_9_digest_get_headers(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
# Example from the Python requests library
self.set_url_field(self.browser, "http://httpbin.org/digest-auth/auth/user/pass")
digest_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(3)")
digest_selector.click()
username = self.browser.find_element_by_id("request-helper-digestAuth-username")
realm = self.browser.find_element_by_id("request-helper-digestAuth-realm")
password = self.browser.find_element_by_id("request-helper-digestAuth-password")
nonce = self.browser.find_element_by_id("request-helper-digestAuth-nonce")
algorithm = self.browser.find_element_by_id("request-helper-digestAuth-algorithm")
qop = self.browser.find_element_by_id("request-helper-digestAuth-qop")
nonce_count = self.browser.find_element_by_id("request-helper-digestAuth-nonceCount")
client_nonce = self.browser.find_element_by_id("request-helper-digestAuth-clientNonce")
opaque = self.browser.find_element_by_id("request-helper-digestAuth-opaque")
username.clear()
realm.clear()
password.clear()
nonce.clear()
algorithm.clear()
qop.clear()
nonce_count.clear()
client_nonce.clear()
opaque.clear()
username.send_keys("user")
realm.send_keys("[email protected]")
password.send_keys("pass")
nonce.send_keys("59c177ca4c8aa616a0e0007717a2225d")
algorithm.send_keys("MD5")
qop.send_keys("auth")
nonce_count.send_keys("00000002")
client_nonce.send_keys("a621deed62b2ff96")
opaque.send_keys("c68f9b6d2ccdf56c49945e0788fd1017")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-digestAuth .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row")
found_digest_response = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("response") > 0:
found_digest_response = True
if value.find("bf0ed74d6a422565ba9aae6d0e36f7b9") > 0:
if value.find("realm") > 0:
found_digest_response = True
else:
found_digest_response = False
else:
found_digest_response = False
if found_digest_response is True:
return True
else:
return False
def test_9_digest_get_headers(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
# Example from the Python requests library
self.set_url_field(self.browser, "http://httpbin.org/digest-auth/auth/user/pass")
digest_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(3)")
digest_selector.click()
username = self.browser.find_element_by_id("request-helper-digestAuth-username")
realm = self.browser.find_element_by_id("request-helper-digestAuth-realm")
password = self.browser.find_element_by_id("request-helper-digestAuth-password")
nonce = self.browser.find_element_by_id("request-helper-digestAuth-nonce")
algorithm = self.browser.find_element_by_id("request-helper-digestAuth-algorithm")
qop = self.browser.find_element_by_id("request-helper-digestAuth-qop")
nonce_count = self.browser.find_element_by_id("request-helper-digestAuth-nonceCount")
client_nonce = self.browser.find_element_by_id("request-helper-digestAuth-clientNonce")
opaque = self.browser.find_element_by_id("request-helper-digestAuth-opaque")
username.clear()
realm.clear()
password.clear()
nonce.clear()
algorithm.clear()
qop.clear()
nonce_count.clear()
client_nonce.clear()
opaque.clear()
username.send_keys("user")
realm.send_keys("[email protected]")
password.send_keys("pass")
nonce.send_keys("59c177ca4c8aa616a0e0007717a2225d")
algorithm.send_keys("MD5")
qop.send_keys("auth")
nonce_count.send_keys("00000002")
client_nonce.send_keys("a621deed62b2ff96")
opaque.send_keys("c68f9b6d2ccdf56c49945e0788fd1017")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-digestAuth .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row")
found_digest_response = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("response") > 0:
found_digest_response = True
if value.find("bf0ed74d6a422565ba9aae6d0e36f7b9") > 0:
if value.find("realm") > 0:
found_digest_response = True
else:
found_digest_response = False
else:
found_digest_response = False
if found_digest_response is True:
return True
else:
return False
def test_10_digest_post_environment(self):
self.reset_request()
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
# From OAuth example
self.set_url_field(self.browser, "http://httpbin.org/digest-auth/auth/user/pass")
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
time.sleep(0.1)
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:last-child a")
manage_env_link.click()
time.sleep(1)
add_env_button = self.browser.find_element_by_css_selector("#environments-list-wrapper .toolbar .environments-actions-add")
add_env_button.click()
time.sleep(0.3)
environment_name = self.browser.find_element_by_id("environment-editor-name")
environment_name.clear()
environment_name.send_keys("Test digest environment")
first_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("username")
first_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("user")
second_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_key.clear()
second_key.send_keys("realm")
second_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_val.clear()
second_val.send_keys("[email protected]")
third_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-key")
third_key.clear()
third_key.send_keys("password")
third_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-value")
third_val.clear()
third_val.send_keys("pass")
fourth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-key")
fourth_key.clear()
fourth_key.send_keys("nonce")
fourth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-value")
fourth_val.clear()
fourth_val.send_keys("59c177ca4c8aa616a0e0007717a2225d")
fifth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-key")
fifth_key.clear()
fifth_key.send_keys("algorithm")
fifth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-value")
fifth_val.clear()
fifth_val.send_keys("MD5")
sixth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-key")
sixth_key.clear()
sixth_key.send_keys("qop")
sixth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-value")
sixth_val.clear()
sixth_val.send_keys("auth")
seventh_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-key")
seventh_key.clear()
seventh_key.send_keys("nonce_count")
seventh_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-value")
seventh_val.clear()
seventh_val.send_keys("00000002")
eigth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-key")
eigth_key.clear()
eigth_key.send_keys("client_nonce")
eigth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-value")
eigth_val.clear()
eigth_val.send_keys("a621deed62b2ff96")
ninth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-key")
ninth_key.clear()
ninth_key.send_keys("opaque")
ninth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-value")
ninth_val.clear()
ninth_val.send_keys("c68f9b6d2ccdf56c49945e0788fd1017")
submit_button = self.browser.find_element_by_css_selector("#modal-environments .environments-actions-add-submit")
submit_button.click()
time.sleep(0.3)
close_button = self.browser.find_element_by_css_selector("#modal-environments .modal-header .close")
close_button.click()
time.sleep(1)
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
# Select the environment
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:nth-of-type(2) a")
manage_env_link.click()
digest_selector = self.browser.find_element_by_css_selector("#request-types .request-helper-tabs li:nth-of-type(3)")
digest_selector.click()
username = self.browser.find_element_by_id("request-helper-digestAuth-username")
realm = self.browser.find_element_by_id("request-helper-digestAuth-realm")
password = self.browser.find_element_by_id("request-helper-digestAuth-password")
nonce = self.browser.find_element_by_id("request-helper-digestAuth-nonce")
algorithm = self.browser.find_element_by_id("request-helper-digestAuth-algorithm")
qop = self.browser.find_element_by_id("request-helper-digestAuth-qop")
nonce_count = self.browser.find_element_by_id("request-helper-digestAuth-nonceCount")
client_nonce = self.browser.find_element_by_id("request-helper-digestAuth-clientNonce")
opaque = self.browser.find_element_by_id("request-helper-digestAuth-opaque")
username.clear()
realm.clear()
password.clear()
nonce.clear()
algorithm.clear()
qop.clear()
nonce_count.clear()
client_nonce.clear()
opaque.clear()
username.send_keys("{{username}}")
realm.send_keys("{{realm}}")
password.send_keys("{{password}}")
nonce.send_keys("{{nonce}}")
algorithm.send_keys("{{algorithm}}")
qop.send_keys("{{qop}}")
nonce_count.send_keys("{{nonce_count}}")
client_nonce.send_keys("{{client_nonce}}")
opaque.send_keys("{{opaque}}")
refresh_headers = self.browser.find_element_by_css_selector("#request-helper-digestAuth .request-helper-submit")
refresh_headers.click()
input_elements = self.browser.find_elements_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row")
found_digest_response = False
for element in input_elements:
value = self.browser.execute_script("return arguments[0].innerHTML", element)
if value.find("response") > 0:
found_digest_response = True
if value.find("bf0ed74d6a422565ba9aae6d0e36f7b9") > 0:
if value.find("realm") > 0:
found_digest_response = True
else:
found_digest_response = False
else:
found_digest_response = False
if found_digest_response is True:
return True
else:
return False
PostmanTestsHelpers().run()
|
|
# limit memory usage..
import glob
import logging
import os
import cv2
import numpy as np
import pandas
# limit memory usage..
from keras import backend as K
from keras.layers import Input, Convolution3D, MaxPooling3D, Flatten, AveragePooling3D
from keras.metrics import binary_accuracy, binary_crossentropy, mean_absolute_error
from keras.models import Model
from keras.optimizers import SGD
from ...preprocess.lung_segmentation import rescale_patient_images
CUBE_SIZE = 32
CROP_SIZE = 32
MEAN_PIXEL_VALUE = 41
EXTRACTED_IMAGE_DIR = "data/extracted/"
NODULE_DETECTION_DIR = "data/detections/"
K.set_image_dim_ordering("tf")
POS_WEIGHT = 2
NEGS_PER_POS = 20
P_TH = 0.6
LEARN_RATE = 0.001
PREDICT_STEP = 12
BATCH_SIZE = 128
STEP = PREDICT_STEP
def load_patient_images(patient_id, base_dir=EXTRACTED_IMAGE_DIR, wildcard="*.*", exclude_wildcards=None):
exclude_wildcards = exclude_wildcards or []
src_dir = os.path.join(os.getcwd(), base_dir, patient_id)
src_img_paths = glob.glob(src_dir + wildcard)
for exclude_wildcard in exclude_wildcards:
exclude_img_paths = glob.glob(src_dir + exclude_wildcard)
src_img_paths = [im for im in src_img_paths if im not in exclude_img_paths]
src_img_paths.sort()
images = [cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) for img_path in src_img_paths]
images = [im.reshape((1,) + im.shape) for im in images]
res = np.vstack(images)
return res
def prepare_image_for_net3D(img):
img = img.astype(np.float32)
img -= MEAN_PIXEL_VALUE
img /= 255.
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2], 1)
return img
def filter_patient_nodules_predictions(df_nodule_predictions: pandas.DataFrame, patient_id, view_size):
patient_mask = load_patient_images(patient_id, wildcard="*_m.png")
delete_indices = []
for index, row in df_nodule_predictions.iterrows():
z_perc = row["coord_z"]
y_perc = row["coord_y"]
center_x = int(round(row["coord_x"] * patient_mask.shape[2]))
center_y = int(round(y_perc * patient_mask.shape[1]))
center_z = int(round(z_perc * patient_mask.shape[0]))
mal_score = row["diameter_mm"]
start_y = center_y - view_size / 2
start_x = center_x - view_size / 2
nodule_in_mask = False
for z_index in [-1, 0, 1]:
img = patient_mask[z_index + center_z]
start_x = int(start_x)
start_y = int(start_y)
view_size = int(view_size)
img_roi = img[start_y:start_y + view_size, start_x:start_x + view_size]
if img_roi.sum() > 255: # more than 1 pixel of mask.
nodule_in_mask = True
if not nodule_in_mask:
logging.info("Nodule not in mask: ", (center_x, center_y, center_z))
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
else:
if center_z < 30:
logging.info("Z < 30: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if mal_score > 0:
mal_score *= -1
df_nodule_predictions.loc[index, "diameter_mm"] = mal_score
if (z_perc > 0.75 or z_perc < 0.25) and y_perc > 0.85:
logging.info("SUSPICIOUS FALSEPOSITIVE: ", patient_id, " center z:", center_z, " y_perc: ", y_perc)
if center_z < 50 and y_perc < 0.30:
logging.info("SUSPICIOUS FALSEPOSITIVE OUT OF RANGE: ", patient_id, " center z:", center_z, " y_perc: ",
y_perc)
df_nodule_predictions.drop(df_nodule_predictions.index[delete_indices], inplace=True)
return df_nodule_predictions
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None) -> Model:
"""Load the pre-trained 3D ConvNet that should be used to predict a nodule and its malignancy.
Args:
input_shape: shape of the input layer. Defaults to (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1).
load_weight_path: path of the trained model weights.
Returns:
keras.models.Model
"""
inputs = Input(shape=input_shape, name="input_1")
x = inputs
x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), padding="same")(x)
x = Convolution3D(64, (3, 3, 3), activation='relu', padding='same', name='conv1', strides=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')(x)
# 2nd layer group
x = Convolution3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2', strides=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')(x)
# 3rd layer group
x = Convolution3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a', strides=(1, 1, 1))(x)
x = Convolution3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b', strides=(1, 1, 1))(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')(x)
# 4th layer group
x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a', strides=(1, 1, 1))(x)
x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b', strides=(1, 1, 1), )(x)
x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')(x)
last64 = Convolution3D(64, (2, 2, 2), activation="relu", name="last_64")(x)
out_class = Convolution3D(1, (1, 1, 1), activation="sigmoid", name="out_class_last")(last64)
out_class = Flatten(name="out_class")(out_class)
out_malignancy = Convolution3D(1, (1, 1, 1), activation=None, name="out_malignancy_last")(last64)
out_malignancy = Flatten(name="out_malignancy")(out_malignancy)
model = Model(input=inputs, output=[out_class, out_malignancy])
model.load_weights(load_weight_path)
model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True),
loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error},
metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})
return model
def prepare_data(patient_id, magnification=1):
"""By a given patient ID prepare_data returns three np.ndarray:
a 3D image array, a mask and a placeholder for a predict values.
Args:
patient_id: SeriesInstanceUID of the patient.
magnification: what magnification for the model to use, one of (1, 1.5, 2).
Returns:
np.ndarray a 3D image array.
np.ndarray a mask with a shape of the 3D image array.
np.ndarray a placeholder for a predict values.
"""
patient_img = load_patient_images(patient_id, wildcard="*_i.png", exclude_wildcards=[])
if magnification != 1:
patient_img = rescale_patient_images(patient_img, (1, 1, 1), magnification)
patient_mask = load_patient_images(patient_id, wildcard="*_m.png", exclude_wildcards=[])
if magnification != 1:
patient_mask = rescale_patient_images(patient_mask, (1, 1, 1), magnification, is_mask_image=True)
predict_volume_shape_list = [0, 0, 0]
for dim in range(3):
dim_indent = 0
while dim_indent + CROP_SIZE < patient_img.shape[dim]:
predict_volume_shape_list[dim] += 1
dim_indent += STEP
predict_volume_shape = (predict_volume_shape_list[0],
predict_volume_shape_list[1],
predict_volume_shape_list[2])
predict_volume = np.zeros(shape=predict_volume_shape, dtype=float)
return patient_img, patient_mask, predict_volume
def predict_cubes(model_path, patient_id, magnification=1, ext_name=""):
"""Return a DataFrame including position, diameter and chance of abnormal tissue to be a nodule.
Args:
model_path: path to the pre-trained model that should be used for the prediction
patient_id: SeriesInstanceUID of the patient
magnification: what magnification for the model to use, one of (1, 1.5, 2)
ext_name: external name of the model, one of ("luna16_fs", "luna_posnegndsb_v")
Returns:
dict: a dictionary containing anno_index, coord_x, coord_y, coord_z, diameter, nodule_chance, diameter_mm
of each found nodule for each patient, of the form::
{
patient_id (str): pandas.DataFrame,
..
}
"""
dst_dir = NODULE_DETECTION_DIR
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
dst_dir = os.path.join(dst_dir, "predictions" + str(int(magnification * 10)) + "_" + ext_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
model = get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1),
load_weight_path=model_path)
patients_dfs = {}
patient_ids = [patient_id]
# In the original Julian de Wit implementation `os.listdir` was used to extract
# all subdirectories from `EXTRACTED_IMAGE_DIR`. The order wasn't necessary there
# since each `base_name` represents a different patient directory.
# In the adopted version (see PR #118), `return df` statement returns a data frame only
# for the last patient, though. Since it's not the original behaviour followed by this,
# it was corrected in PR #172 to store all patients' data frames in a `patients_dfs`
# dictionary which will be returned.
for base_name in os.listdir(EXTRACTED_IMAGE_DIR):
if os.path.isdir(os.path.join(EXTRACTED_IMAGE_DIR, base_name)):
patient_ids.append(base_name)
for patient_index, patient_id in enumerate(reversed(patient_ids)):
logging.info(patient_index, ": ", patient_id)
patient_img, patient_mask, predict_volume = prepare_data(patient_id, magnification)
patient_predictions_csv = annotate(model, predict_volume, patient_img, patient_mask)
df = pandas.DataFrame(patient_predictions_csv,
columns=["anno_index", "coord_x", "coord_y", "coord_z",
"diameter", "nodule_chance", "diameter_mm"])
filter_patient_nodules_predictions(df, patient_id, CROP_SIZE * magnification)
patients_dfs[patient_id] = df
return patients_dfs
def annotate(model, predict_volume, patient_img, patient_mask):
"""Return a DataFrame including position, diameter and chance of abnormal tissue to be a nodule.
By a given model and a volumetric data.
Args:
model: 3D ConvNet that should be used to predict a nodule and its malignancy.
predict_volume:
patient_img:
patient_mask:
Returns:
pandas.DataFrame containing anno_index, coord_x, coord_y, coord_z, diameter, nodule_chance, diameter_mm
of each found nodule.
"""
done_count = 0
skipped_count = 0
annotation_index = 0
batch_list = []
batch_list_coords = []
patient_predictions_csv = []
logging.info("Predicted Volume Shape:" + str(predict_volume.shape))
for z, y, x in np.ndindex(predict_volume.shape[:3]):
# if cube_img is None:
cube_img = patient_img[z * STEP: z * STEP + CROP_SIZE,
y * STEP: y * STEP + CROP_SIZE,
x * STEP: x * STEP + CROP_SIZE]
cube_mask = patient_mask[z * STEP: z * STEP + CROP_SIZE,
y * STEP: y * STEP + CROP_SIZE,
x * STEP: x * STEP + CROP_SIZE]
done_count += 1
if done_count % 10000 == 0:
logging.info("Done: ", done_count, " skipped:", skipped_count)
if cube_mask.sum() < 2000:
skipped_count += 1
continue
if CROP_SIZE != CUBE_SIZE:
cube_img = rescale_patient_images(cube_img, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))
# if you want to consider CROP_SIZE != CUBE_SIZE, see PR #147 for rescale_patient_images2 which
# rescales input images to support this case
batch_list_coords.append((z, y, x))
img_prep = prepare_image_for_net3D(cube_img)
batch_list.append(img_prep)
if len(batch_list) % BATCH_SIZE == 0:
batch_data = np.vstack(batch_list)
p = model.predict(batch_data, batch_size=BATCH_SIZE)
ppc, annotation_index = stats_from_batch(p, patient_img.shape, predict_volume,
batch_list_coords, annotation_index)
patient_predictions_csv.extend(ppc)
batch_list[:] = []
batch_list_coords[:] = []
return patient_predictions_csv
def stats_from_batch(p, p_shape, predict_volume, batch_list_coords, annotation_index):
"""Return a list of DataFrame including position, diameter and chance of abnormal tissue to be a nodule
for each nodule in a batch.
Args:
p : an output from th 3D ConvNet, length of p[0] is equal to a batch size.
p_shape (list[int]): a shape of the patient 3D image.
predict_volume (np.ndarray): a volumetric placeholder for nodule probability storage.
batch_list_coords (list[list[int]]): list of corresponding coordinates for each sample of a batch, in zyx order.
annotation_index (int): index in the general sequence.
Returns:
list[pandas.DataFrame] containing anno_index, coord_x, coord_y, coord_z, diameter, nodule_chance, diameter_mm
of s in a batch.
"""
patient_predictions_csv = []
for i in range(len(p[0])):
p_coord = np.array(batch_list_coords[i])
nodule_chance = p[0][i][0]
predict_volume[tuple(p_coord)] = nodule_chance
if nodule_chance > P_TH:
p_coord = p_coord * STEP + CROP_SIZE / 2
p_perc = np.round(p_coord / np.array(p_shape), 4)
diameter_mm = round(p[1][i][0], 4)
diameter_perc = round(diameter_mm / p_shape[2], 4)
nodule_chance = round(nodule_chance, 4)
patient_predictions_csv_line = [annotation_index, p_perc[0], p_perc[1], p_perc[2],
diameter_perc, nodule_chance, diameter_mm]
patient_predictions_csv.append(patient_predictions_csv_line)
annotation_index += 1
return patient_predictions_csv, annotation_index
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from multiprocessing import Pool
import torch
from fairseq import utils
from fairseq.binarizer import safe_readline
from fairseq.data import data_utils
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
pad="<pad>",
eos="</s>",
unk="<unk>",
bos="<s>",
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(t, bpe_symbol, escape_unk, extra_symbols_to_ignore)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = " ".join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.process_bpe_symbol(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with PathManager.open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file."
.format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(
filename, tokenize, eos_word, worker_id=0, num_workers=1
):
counter = Counter()
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if f.tell() > end:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(
pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary._add_file_to_dictionary_single_worker(
filename, tokenize, dict.eos_word
)
)
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(
wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__),
{},
)
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import socket
import mock
import testtools
import webob
from neutron.agent.metadata import agent
from neutron.common import utils
from neutron.tests import base
class FakeConf(object):
admin_user = 'neutron'
admin_password = 'password'
admin_tenant_name = 'tenant'
auth_url = 'http://127.0.0.1'
auth_strategy = 'keystone'
auth_region = 'region'
endpoint_type = 'adminURL'
nova_metadata_ip = '9.9.9.9'
nova_metadata_port = 8775
metadata_proxy_shared_secret = 'secret'
class TestMetadataProxyHandler(base.BaseTestCase):
def setUp(self):
super(TestMetadataProxyHandler, self).setUp()
self.qclient_p = mock.patch('neutronclient.v2_0.client.Client')
self.qclient = self.qclient_p.start()
self.addCleanup(self.qclient_p.stop)
self.log_p = mock.patch.object(agent, 'LOG')
self.log = self.log_p.start()
self.addCleanup(self.log_p.stop)
self.handler = agent.MetadataProxyHandler(FakeConf)
def test_call(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = ('instance_id', 'tenant_id')
with mock.patch.object(self.handler, '_proxy_request') as proxy:
proxy.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
def test_call_no_instance_match(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = None, None
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_call_internal_server_error(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,
networks=None, router_id=None):
headers['X-Forwarded-For'] = '192.168.1.1'
req = mock.Mock(headers=headers)
def mock_list_ports(*args, **kwargs):
return {'ports': list_ports_retval.pop(0)}
self.qclient.return_value.list_ports.side_effect = mock_list_ports
instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)
expected = [
mock.call(
username=FakeConf.admin_user,
tenant_name=FakeConf.admin_tenant_name,
region_name=FakeConf.auth_region,
auth_url=FakeConf.auth_url,
password=FakeConf.admin_password,
auth_strategy=FakeConf.auth_strategy,
auth_token=None,
endpoint_url=None,
endpoint_type=FakeConf.endpoint_type)
]
if router_id:
expected.append(
mock.call().list_ports(
device_id=router_id,
device_owner='network:router_interface'
)
)
expected.append(
mock.call().list_ports(
network_id=networks or [],
fixed_ips=['ip_address=192.168.1.1'])
)
self.qclient.assert_has_calls(expected)
return (instance_id, tenant_id)
def test_get_instance_id_router_id(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ['net1', 'net2']
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[{'device_id': 'device_id', 'tenant_id': 'tenant_id'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
('device_id', 'tenant_id')
)
def test_get_instance_id_router_id_no_match(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ['net1', 'net2']
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
(None, None)
)
def test_get_instance_id_network_id(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [
[{'device_id': 'device_id',
'tenant_id': 'tenant_id'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=['the_id']),
('device_id', 'tenant_id')
)
def test_get_instance_id_network_id_no_match(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [[]]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=['the_id']),
(None, None)
)
def _proxy_request_test_helper(self, response_code=200, method='GET'):
hdrs = {'X-Forwarded-For': '8.8.8.8'}
body = 'body'
req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,
method=method, body=body)
resp = mock.Mock(status=response_code)
with mock.patch.object(self.handler, '_sign_instance_id') as sign:
sign.return_value = 'signed'
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('the_id', 'tenant_id',
req)
mock_http.assert_has_calls([
mock.call().request(
'http://9.9.9.9:8775/the_path',
method=method,
headers={
'X-Forwarded-For': '8.8.8.8',
'X-Instance-ID-Signature': 'signed',
'X-Instance-ID': 'the_id',
'X-Tenant-ID': 'tenant_id'
},
body=body
)]
)
return retval
def test_proxy_request_post(self):
self.assertEqual('content',
self._proxy_request_test_helper(method='POST'))
def test_proxy_request_200(self):
self.assertEqual('content', self._proxy_request_test_helper(200))
def test_proxy_request_403(self):
self.assertIsInstance(self._proxy_request_test_helper(403),
webob.exc.HTTPForbidden)
def test_proxy_request_404(self):
self.assertIsInstance(self._proxy_request_test_helper(404),
webob.exc.HTTPNotFound)
def test_proxy_request_409(self):
self.assertIsInstance(self._proxy_request_test_helper(409),
webob.exc.HTTPConflict)
def test_proxy_request_500(self):
self.assertIsInstance(self._proxy_request_test_helper(500),
webob.exc.HTTPInternalServerError)
def test_proxy_request_other_code(self):
with testtools.ExpectedException(Exception):
self._proxy_request_test_helper(302)
def test_sign_instance_id(self):
self.assertEqual(
self.handler._sign_instance_id('foo'),
'773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'
)
class TestUnixDomainHttpProtocol(base.BaseTestCase):
def test_init_empty_client(self):
u = agent.UnixDomainHttpProtocol(mock.Mock(), '', mock.Mock())
self.assertEqual(u.client_address, ('<local>', 0))
def test_init_with_client(self):
u = agent.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock())
self.assertEqual(u.client_address, 'foo')
class TestUnixDomainWSGIServer(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainWSGIServer, self).setUp()
self.eventlet_p = mock.patch.object(agent, 'eventlet')
self.eventlet = self.eventlet_p.start()
self.addCleanup(self.eventlet_p.stop)
self.server = agent.UnixDomainWSGIServer('test')
def test_start(self):
mock_app = mock.Mock()
with mock.patch.object(self.server, 'pool') as pool:
self.server.start(mock_app, '/the/path')
self.eventlet.assert_has_calls([
mock.call.listen(
'/the/path',
family=socket.AF_UNIX,
backlog=128
)]
)
pool.spawn_n.assert_called_once_with(
self.server._run,
mock_app,
self.eventlet.listen.return_value
)
def test_run(self):
with mock.patch.object(agent, 'logging') as logging:
self.server._run('app', 'sock')
self.eventlet.wsgi.server.called_once_with(
'sock',
'app',
self.server.pool,
agent.UnixDomainHttpProtocol,
mock.ANY
)
self.assertTrue(len(logging.mock_calls))
class TestUnixDomainMetadataProxy(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainMetadataProxy, self).setUp()
self.cfg_p = mock.patch.object(agent, 'cfg')
self.cfg = self.cfg_p.start()
looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_mock = looping_call_p.start()
self.addCleanup(mock.patch.stopall)
self.cfg.CONF.metadata_proxy_socket = '/the/path'
def test_init_doesnot_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.makedirs') as makedirs:
isdir.return_value = False
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
makedirs.assert_called_once_with('/the', 0o755)
def test_init_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
isdir.return_value = True
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_no_file(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = False
unlink.side_effect = OSError
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
unlink.assert_called_once_with('/the/path')
exists.assert_called_once_with('/the/path')
def test_init_exists_unlink_fails_file_still_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = True
unlink.side_effect = OSError
with testtools.ExpectedException(OSError):
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
unlink.assert_called_once_with('/the/path')
exists.assert_called_once_with('/the/path')
def test_run(self):
with mock.patch.object(agent, 'MetadataProxyHandler') as handler:
with mock.patch.object(agent, 'UnixDomainWSGIServer') as server:
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.makedirs') as makedirs:
isdir.return_value = False
p = agent.UnixDomainMetadataProxy(self.cfg.CONF)
p.run()
isdir.assert_called_once_with('/the')
makedirs.assert_called_once_with('/the', 0o755)
server.assert_has_calls([
mock.call('neutron-metadata-agent'),
mock.call().start(handler.return_value,
'/the/path'),
mock.call().wait()]
)
def test_main(self):
with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:
with mock.patch('eventlet.monkey_patch') as eventlet:
with mock.patch.object(agent, 'config') as config:
with mock.patch.object(agent, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg'):
agent.main()
self.assertTrue(eventlet.called)
self.assertTrue(config.setup_logging.called)
proxy.assert_has_calls([
mock.call(cfg.CONF),
mock.call().run()]
)
def test_init_state_reporting(self):
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.looping_mock.assert_called_once_with(proxy._report_state)
self.looping_mock.return_value.start.assert_called_once_with(
interval=mock.ANY)
def test_report_state(self):
with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.assertTrue(proxy.agent_state['start_flag'])
proxy._report_state()
self.assertNotIn('start_flag', proxy.agent_state)
state_api_inst = state_api.return_value
state_api_inst.report_state.assert_called_once_with(
proxy.context, proxy.agent_state, use_call=True)
|
|
"""
borrowed from jython
https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py
"""
#import java.nio.channels.SelectableChannel
#import java.nio.channels.SelectionKey
#import java.nio.channels.Selector
#from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
import errno
import os
import queue
import socket
class error(Exception): pass
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
#(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
}
def _map_exception(exc, circumstance=ALL):
try:
mapped_exception = _exception_map[(exc.__class__, circumstance)]
mapped_exception.java_exception = exc
return mapped_exception
except KeyError:
return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
POLLIN = 1
POLLOUT = 2
# The following event types are completely ignored on jython
# Java does not support them, AFAICT
# They are declared only to support code compatibility with cpython
POLLPRI = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
def _getselectable(selectable_object):
try:
channel = selectable_object.getchannel()
except:
try:
channel = selectable_object.fileno().getChannel()
except:
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
return channel
class poll:
def __init__(self):
self.selector = java.nio.channels.Selector.open()
self.chanmap = {}
self.unconnected_sockets = []
def _register_channel(self, socket_object, channel, mask):
jmask = 0
if mask & POLLIN:
# Note that OP_READ is NOT a valid event on server socket channels.
if channel.validOps() & OP_ACCEPT:
jmask = OP_ACCEPT
else:
jmask = OP_READ
if mask & POLLOUT:
if channel.validOps() & OP_WRITE:
jmask |= OP_WRITE
if channel.validOps() & OP_CONNECT:
jmask |= OP_CONNECT
selectionkey = channel.register(self.selector, jmask)
self.chanmap[channel] = (socket_object, selectionkey)
def _check_unconnected_sockets(self):
temp_list = []
for socket_object, mask in self.unconnected_sockets:
channel = _getselectable(socket_object)
if channel is not None:
self._register_channel(socket_object, channel, mask)
else:
temp_list.append( (socket_object, mask) )
self.unconnected_sockets = temp_list
def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
try:
channel = _getselectable(socket_object)
if channel is None:
# The socket is not yet connected, and thus has no channel
# Add it to a pending list, and return
self.unconnected_sockets.append( (socket_object, mask) )
return
self._register_channel(socket_object, channel, mask)
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def unregister(self, socket_object):
try:
channel = _getselectable(socket_object)
self.chanmap[channel][1].cancel()
del self.chanmap[channel]
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _dopoll(self, timeout):
if timeout is None or timeout < 0:
self.selector.select()
else:
try:
timeout = int(timeout)
if not timeout:
self.selector.selectNow()
else:
# No multiplication required: both cpython and java use millisecond timeouts
self.selector.select(timeout)
except ValueError as vx:
raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
# The returned selectedKeys cannot be used from multiple threads!
return self.selector.selectedKeys()
def poll(self, timeout=None):
try:
self._check_unconnected_sockets()
selectedkeys = self._dopoll(timeout)
results = []
for k in selectedkeys.iterator():
jmask = k.readyOps()
pymask = 0
if jmask & OP_READ: pymask |= POLLIN
if jmask & OP_WRITE: pymask |= POLLOUT
if jmask & OP_ACCEPT: pymask |= POLLIN
if jmask & OP_CONNECT: pymask |= POLLOUT
# Now return the original userobject, and the return event mask
results.append( (self.chanmap[k.channel()][0], pymask) )
return results
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _deregister_all(self):
try:
for k in self.selector.keys():
k.cancel()
# Keys are not actually removed from the selector until the next select operation.
self.selector.selectNow()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
self._deregister_all()
self.selector.close()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _calcselecttimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except Exception as x:
raise TypeError("Select timeout value must be a number or None")
if value < 0:
raise error("Select timeout value cannot be negative", errno.EINVAL)
if floatvalue < 0.000001:
return 0
return int(floatvalue * 1000) # Convert to milliseconds
# This cache for poll objects is required because of a bug in java on MS Windows
# http://bugs.jython.org/issue1291
class poll_object_cache:
def __init__(self):
self.is_windows = os.name == 'nt'
if self.is_windows:
self.poll_object_queue = Queue.Queue()
import atexit
atexit.register(self.finalize)
def get_poll_object(self):
if not self.is_windows:
return poll()
try:
return self.poll_object_queue.get(False)
except Queue.Empty:
return poll()
def release_poll_object(self, pobj):
if self.is_windows:
pobj._deregister_all()
self.poll_object_queue.put(pobj)
else:
pobj.close()
def finalize(self):
if self.is_windows:
while True:
try:
p = self.poll_object_queue.get(False)
p.close()
except Queue.Empty:
return
_poll_object_cache = poll_object_cache()
def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
timeout = _calcselecttimeoutvalue(timeout)
# First create a poll object to do the actual watching.
pobj = _poll_object_cache.get_poll_object()
try:
registered_for_read = {}
# Check the read list
for fd in read_fd_list:
pobj.register(fd, POLLIN)
registered_for_read[fd] = 1
# And now the write list
for fd in write_fd_list:
if fd in registered_for_read:
# registering a second time overwrites the first
pobj.register(fd, POLLIN|POLLOUT)
else:
pobj.register(fd, POLLOUT)
results = pobj.poll(timeout)
# Now start preparing the results
read_ready_list, write_ready_list, oob_ready_list = [], [], []
for fd, mask in results:
if mask & POLLIN:
read_ready_list.append(fd)
if mask & POLLOUT:
write_ready_list.append(fd)
return read_ready_list, write_ready_list, oob_ready_list
finally:
_poll_object_cache.release_poll_object(pobj)
select = native_select
def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
# First turn all sockets to non-blocking
# keeping track of which ones have changed
modified_channels = []
try:
for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
for s in socket_list:
channel = _getselectable(s)
if channel.isBlocking():
modified_channels.append(channel)
channel.configureBlocking(0)
return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
finally:
for channel in modified_channels:
channel.configureBlocking(1)
|
|
import binascii
import socket as syssock
import struct
import sys
import time
import random
# encryption libraries
import nacl.utils
import nacl.secret
import nacl.utils
from nacl.public import PrivateKey, Box
# the public and private keychains in hex format
global publicKeysHex
global privateKeysHex
# the public and private keychains in binary format
global publicKeys
global privateKeys
# the encryption flag
global ENCRYPT
publicKeysHex = {}
privateKeysHex = {}
publicKeys = {}
privateKeys = {}
# this is 0xEC
ENCRYPT = 236
# Defined the variables that'll be constant throughout packets
# Calculated by adding number of bits in header and dividing it by 8
# because header length is in terms of bytes
# 8 + 8 + 8 + 8 + 16 + 16 + 32 + 32 + 64 + 64 + 32 + 32 = 320/8 = 40
SOCK352_SYN = 0x01
SOCK352_FIN = 0x02
SOCK352_ACK = 0x04
SOCK352_RESET = 0x08
SOCK352_HAS_OPT = 0xA0
version = 1
opt_ptr = 0
protocol = 0
header_len = 40
checksum = 0
source_port = 0
dest_port = 0
window = 0
# these functions are global to the class and
# define the UDP ports all messages are sent
# and received from
def init(UDPportTx, UDPportRx): # initialize your UDP socket here
global Tx, Rx, s, connections
Tx = int(UDPportTx)
Rx = int(UDPportRx)
s = syssock.socket(syssock.AF_INET, syssock.SOCK_DGRAM)
s.bind(('localhost', Rx))
connections = []
return
# read the keyfile. The result should be a private key and a keychain of
# public keys
def readKeyChain(filename):
global publicKeysHex
global privateKeysHex
global publicKeys
global privateKeys
if (filename):
try:
keyfile_fd = open(filename, "r")
for line in keyfile_fd:
words = line.split()
# check if a comment
# more than 2 words, and the first word does not have a
if ((len(words) >= 4) and (words[0].find("#") == -1)):
host = words[1]
port = words[2]
print((host, port))
keyInHex = words[3]
if (words[0] == "private"):
privateKeysHex[(host, port)] = keyInHex
privateKeys[(host, port)] = nacl.public.PrivateKey(keyInHex, nacl.encoding.HexEncoder)
elif (words[0] == "public"):
publicKeysHex[(host, port)] = keyInHex
publicKeys[(host, port)] = nacl.public.PublicKey(keyInHex, nacl.encoding.HexEncoder)
except Exception, e:
print("error: opening keychain file: %s %s" % (filename, repr(e)))
else:
print("error: No filename presented")
return (publicKeys, privateKeys)
class socket:
def __init__(self):
self.receivedACK = []
self.receivedSeq_no = []
self.encrypt = False
return
def bind(self, address):
return
def connect(self, *args):
# Check for encryption
global ENCRYPT, privateKeys, publicKeys
if (len(args) >= 1):
address = args[0]
if (len(args) >= 2):
if (args[1] == ENCRYPT):
self.encrypt = True
self.box = Box(privateKeys[('*', '*')], publicKeys[(address[0], str(Tx))])
# Fill in header values
global version, opt_ptr, protocol, header_len, checksum, source_port, dest_port, window
flags = SOCK352_SYN
sequence_no = random.random()
ack_no = 0
payload_len = 0
# Pack the data
sock352PktHdrData = '!BBBBHHLLQQLL'
udpPkt_hdr_data = struct.Struct(sock352PktHdrData)
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
# Connect with the server
destination = address[0]
s.connect((destination, Tx))
print('Trying to connect w/ server..')
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
# Send SYN flagged header to server and receive the server's response and check to see if SYN/ACK
# If the server had another response, resend the packet
while flags != SOCK352_SYN + SOCK352_ACK:
s.send(header)
receivedHeader = ''
# Check for encrypion before receiving
if self.encrypt:
receivedHeader = s.recv(header_len + 40)
receivedHeader = self.box.decrypt(receivedHeader)
else:
receivedHeader = s.recv(header_len)
(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len) = udpPkt_hdr_data.unpack(receivedHeader)
# Record received ACK/seq_no
self.receivedACK.append(ack_no)
self.receivedSeq_no.append(sequence_no)
# After receiving SYN/ACK from server, send ACK
flags = SOCK352_ACK
temp = sequence_no
sequence_no = ack_no
ack_no = temp + 1
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
s.send(header)
return
def listen(self, backlog):
return
def accept(self, *args):
# Wait to receive header data
print('waiting for connection...')
# Check for encryption
if (len(args) >= 1):
if (args[0] == ENCRYPT):
self.encrypt = True
(clientsocket, address) = self.__sock352_get_packet()
print(address)
return (clientsocket, address)
def close(self): # fill in your code here
# Fill in header values, make sure flags is FIN
global version, opt_ptr, protocol, header_len, checksum, source_port, dest_port, window
flags = SOCK352_FIN
sequence_no = 0
ack_no = 0
payload_len = 0
# Pack the data
sock352PktHdrData = '!BBBBHHLLQQLL'
udpPkt_hdr_data = struct.Struct(sock352PktHdrData)
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
# Send header and close the socket
s.send(header)
s.close()
return
def send(self, buffer): # fill in your code here
# Fill in header values: sequence_no will be the last receivedACK, and ack_no
# will be the last received sequence_no + 1
global version, opt_ptr, protocol, header_len, checksum, source_port, dest_port, window
flags = SOCK352_ACK
sequence_no = self.receivedACK[-1]
ack_no = self.receivedSeq_no[-1] + 1
# Hard-coded fragmentsize because can't handle it dynamically :(
# This is the max # of bytes that the server can receive
# as defined in server1
index = 0;
FRAGMENTSIZE = 4096
fragment = ''
while (index != len(buffer)):
if (len(buffer) - index > FRAGMENTSIZE):
payload_len = FRAGMENTSIZE
# Pack the header data and send it to server
sock352PktHdrData = '!BBBBHHLLQQLL'
udpPkt_hdr_data = struct.Struct(sock352PktHdrData)
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
s.send(header)
# Send fragment to server
fragment = buffer[index:(index + FRAGMENTSIZE)]
# Encrypt fragment if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
fragment = self.box.encrypt(fragment, nonce)
# Set timeout and send fragment
try:
s.settimeout(.2)
s.send(fragment)
except syssock.timeout:
s.send(fragment)
finally:
s.settimeout(None)
print('sent packet: '), len(fragment), ('bytes')
# TODO: receive ACK?
# Check for encrypion before receiving
if self.encrypt:
receivedHeader = s.recv(header_len + 40)
receivedHeader = self.box.decrypt(receivedHeader)
else:
receivedHeader = s.recv(header_len)
# Increment index
index += FRAGMENTSIZE
else:
payload_len = len(buffer) - index
# Pack the header data
sock352PktHdrData = '!BBBBHHLLQQLL'
udpPkt_hdr_data = struct.Struct(sock352PktHdrData)
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
s.send(header)
# Send fragment to server
fragment = buffer[index:len(buffer)]
# Encrypt fragment if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
fragment = self.box.encrypt(fragment, nonce)
# Set timeout and send fragment
try:
s.settimeout(.2)
s.send(fragment)
except syssock.timeout:
s.send(fragment)
finally:
s.settimeout(None)
print('sent packet: '), len(fragment), ('bytes')
# TODO: receive ACK?
# Check for encrypion before receiving
if self.encrypt:
receivedHeader = s.recv(header_len + 40)
receivedHeader = self.box.decrypt(receivedHeader)
else:
receivedHeader = s.recv(header_len)
# Increment index
index = payload_len
break;
return len(buffer)
def recv(self, nbytes):
# Fill in header values
global version, opt_ptr, protocol, header_len, checksum, source_port, dest_port, window
# Receive and unpack header data from client
receivedHeader = ''
# Check for encrypion before receiving
if self.encrypt:
receivedHeader = s.recv(header_len + 40)
receivedHeader = self.box.decrypt(receivedHeader)
else:
receivedHeader = s.recv(header_len)
sock352PktHdrData = '!BBBBHHLLQQLL'
udpPkt_hdr_data = struct.Struct(sock352PktHdrData)
(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len) = udpPkt_hdr_data.unpack(receivedHeader)
# Receive the bytes dictated by the payload_len
# Check for encrypion before receiving
if self.encrypt:
bytesreceived = s.recv(payload_len + 40)
bytesreceived = self.box.decrypt(bytesreceived)
else:
bytesreceived = s.recv(payload_len)
# Give ack_no the value of the next sequence number the client should send over
# And give sequence_no the value of what the client is asking for
temp = ack_no
ack_no = sequence_no + payload_len + 1
sequence_no = temp
flags = SOCK352_ACK
# Pack and send the ACK to the client
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
s.send(header)
print('received '), len(bytesreceived), (' bytes')
return bytesreceived
def __sock352_get_packet(self):
global version, opt_ptr, protocol, header_len, checksum, source_port, dest_port, window
# Receive and unpack the data
receivedHeader = ''
addr = (1, 1)
# If encryption is enabled, receive a longer encrypted message and decrypt it
if self.encrypt:
(receivedHeader, addr) = s.recvfrom(header_len + 40)
if addr[0] == '127.0.0.1':
self.box = Box(privateKeys[('*', '*')], publicKeys[('localhost', str(Tx))])
else:
self.box = Box(privateKeys[('*', '*')], publicKeys[(addr[0], str(Tx))])
receivedHeader = self.box.decrypt(receivedHeader)
else:
(receivedHeader, addr) = s.recvfrom(header_len)
sock352PktHdrData = '!BBBBHHLLQQLL'
udpPkt_hdr_data = struct.Struct(sock352PktHdrData)
(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len) = udpPkt_hdr_data.unpack(receivedHeader)
# If the header flag was SYN
if flags == SOCK352_SYN:
# Check to see if the address is in the list of connections
# and if it's not, send back a random sequence_no and a
# and set the ack_no to the incoming sequence_no + 1
# Also instantiate a second socket to communicate w/ client
if addr not in connections:
connections.append(addr)
ack_no = sequence_no + 1
sequence_no = random.random()
flags = SOCK352_SYN + SOCK352_ACK
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
s.connect(addr)
# Check for encryption before sending
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
# Send out the SYN/ACK flagged header, and wait for
# ACK response from client
while flags != SOCK352_ACK:
s.send(header)
# Check for encrypion before receiving
if self.encrypt:
receivedHeader = s.recv(header_len + 40)
receivedHeader = self.box.decrypt(receivedHeader)
else:
receivedHeader = s.recv(header_len)
(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len) = udpPkt_hdr_data.unpack(receivedHeader)
print('Connected to:')
return (self, addr)
# If it is in the list, the connection is reset
else:
sequence_no = sequence_no + 1
flags = SOCK352_RESET
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
s.connect(addr)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
s.send(header)
return
else:
# If the header flag is FIN, send back a FIN and remove the addr
# from connections and clear the fragments
if flags == SOCK352_FIN:
flags = SOCK352_FIN
header = udpPkt_hdr_data.pack(version, flags, opt_ptr, protocol, header_len, checksum,
source_port, dest_port, sequence_no, ack_no, window,
payload_len)
connections.remove(addr)
self.fragments.clear()
s.connect(addr)
# Encrypt message if encrypt is enabled
if self.encrypt:
nonce = nacl.utils.random(Box.NONCE_SIZE)
header = self.box.encrypt(header, nonce)
s.send(header)
print('closing connection')
|
|
import os
import textwrap
import time
import unittest
import pytest
from parameterized import parameterized
from conans.model.ref import ConanFileReference
from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient, TestServer, \
NO_SETTINGS_PACKAGE_ID, GenConanfile
from conans.test.utils.scm import create_local_git_repo
class PythonExtendTest(unittest.TestCase):
def test_with_python_requires(self):
# https://github.com/conan-io/conan/issues/5140
client = TestClient()
client.save({"conanfile.py": GenConanfile().with_name("dep").with_version("0.1")})
client.run("export . user/testing")
conanfile = textwrap.dedent("""
from conans import ConanFile, python_requires
p = python_requires("dep/0.1@user/testing")
class APck(ConanFile):
pass
""")
client.save({"conanfile.py": conanfile})
client.run('editable add . pkg/0.1@user/testing')
self.assertIn("Reference 'pkg/0.1@user/testing' in editable mode", client.out)
client.run("editable remove pkg/0.1@user/testing")
client.run("create . pkg/0.1@user/testing")
client.run("copy pkg/0.1@user/testing company/stable")
self.assertIn("Copied pkg/0.1@user/testing to pkg/0.1@company/stable", client.out)
# imports should raise either
client.run("install .")
client.run("imports .")
def _define_base(self, client):
conanfile = """from conans import ConanFile
class MyConanfileBase(ConanFile):
def source(self):
self.output.info("My cool source!")
def build(self):
self.output.info("My cool build!")
def package(self):
self.output.info("My cool package!")
def package_info(self):
self.output.info("My cool package_info!")
"""
client.save({"conanfile.py": conanfile})
client.run("export . MyConanfileBase/1.1@lasote/testing")
def test_with_alias(self):
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
self._define_base(client)
client.run("alias MyConanfileBase/LATEST@lasote/testing MyConanfileBase/1.1@lasote/testing")
reuse = """from conans import python_requires
base = python_requires("MyConanfileBase/LATEST@lasote/testing")
class PkgTest(base.MyConanfileBase):
pass
"""
client.save({"conanfile.py": reuse}, clean_first=True)
client.run("create . Pkg/0.1@lasote/testing")
def test_reuse(self):
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
self._define_base(client)
reuse = """from conans import python_requires
base = python_requires("MyConanfileBase/1.1@lasote/testing")
class PkgTest(base.MyConanfileBase):
pass
"""
client.save({"conanfile.py": reuse}, clean_first=True)
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: My cool source!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool build!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package_info!", client.out)
client.run("upload * --all --confirm")
client.run("remove * -f")
client.run("install Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: My cool package_info!", client.out)
client.run("remove * -f")
client.run("download Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Package installed %s" % NO_SETTINGS_PACKAGE_ID,
client.out)
def test_reuse_version_ranges(self):
client = TestClient()
self._define_base(client)
reuse = """from conans import python_requires
base = python_requires("MyConanfileBase/[>1.0,<1.2]@lasote/testing")
class PkgTest(base.MyConanfileBase):
pass
"""
client.save({"conanfile.py": reuse}, clean_first=True)
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Python requires", str(client.out).splitlines())
self.assertIn(" MyConanfileBase/1.1@lasote/testing", str(client.out).splitlines())
self.assertIn("Pkg/0.1@lasote/testing: My cool source!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool build!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package_info!", client.out)
def test_invalid(self):
client = TestClient()
reuse = """from conans import ConanFile, python_requires
class PkgTest(ConanFile):
def source(self):
base = python_requires("MyConanfileBase/1.0@lasote/testing")
"""
client.save({"conanfile.py": reuse})
client.run("create . Pkg/0.1@lasote/testing", assert_error=True)
self.assertIn("ERROR: Pkg/0.1@lasote/testing: Error in source() method, line 4", client.out)
self.assertIn('base = python_requires("MyConanfileBase/1.0@lasote/testing', client.out)
self.assertIn("ConanException: Invalid use of python_requires"
"(MyConanfileBase/1.0@lasote/testing)", client.out)
def test_invalid2(self):
client = TestClient()
reuse = """import conans
class PkgTest(conans.ConanFile):
def source(self):
base = conans.python_requires("MyConanfileBase/1.0@lasote/testing")
"""
client.save({"conanfile.py": reuse})
client.run("create . Pkg/0.1@lasote/testing", assert_error=True)
self.assertIn("ERROR: Pkg/0.1@lasote/testing: Error in source() method, line 4",
client.out)
self.assertIn('base = conans.python_requires("MyConanfileBase/1.0@lasote/testing',
client.out)
self.assertIn("ConanException: Invalid use of python_requires"
"(MyConanfileBase/1.0@lasote/testing)", client.out)
def test_invalid3(self):
client = TestClient()
reuse = """from conans import ConanFile
from helpers import my_print
class PkgTest(ConanFile):
exports = "helpers.py"
def source(self):
my_print()
"""
base = """from conans import python_requires
def my_print():
base = python_requires("MyConanfileBase/1.0@lasote/testing")
"""
client.save({"conanfile.py": reuse, "helpers.py": base})
client.run("create . Pkg/0.1@lasote/testing", assert_error=True)
self.assertIn("ERROR: Pkg/0.1@lasote/testing: Error in source() method, line 7",
client.out)
self.assertIn('my_print()', client.out)
self.assertIn("ConanException: Invalid use of python_requires"
"(MyConanfileBase/1.0@lasote/testing)", client.out)
def test_invalid4(self):
client = TestClient()
reuse = """from conans import ConanFile
from helpers import my_print
class PkgTest(ConanFile):
exports = "helpers.py"
def source(self):
my_print()
"""
base = """import conans
def my_print():
base = conans.python_requires("MyConanfileBase/1.0@lasote/testing")
"""
client.save({"conanfile.py": reuse, "helpers.py": base})
client.run("create . Pkg/0.1@lasote/testing", assert_error=True)
self.assertIn("ERROR: Pkg/0.1@lasote/testing: Error in source() method, line 7",
client.out)
self.assertIn('my_print()', client.out)
self.assertIn("ConanException: Invalid use of python_requires"
"(MyConanfileBase/1.0@lasote/testing)", client.out)
def test_multiple_reuse(self):
client = TestClient()
conanfile = """from conans import ConanFile
class SourceBuild(ConanFile):
def source(self):
self.output.info("My cool source!")
def build(self):
self.output.info("My cool build!")
"""
client.save({"conanfile.py": conanfile})
client.run("export . SourceBuild/1.0@user/channel")
conanfile = """from conans import ConanFile
class PackageInfo(ConanFile):
def package(self):
self.output.info("My cool package!")
def package_info(self):
self.output.info("My cool package_info!")
"""
client.save({"conanfile.py": conanfile})
client.run("export . PackageInfo/1.0@user/channel")
conanfile = """from conans import ConanFile, python_requires
source = python_requires("SourceBuild/1.0@user/channel")
package = python_requires("PackageInfo/1.0@user/channel")
class MyConanfileBase(source.SourceBuild, package.PackageInfo):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: My cool source!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool build!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package_info!", client.out)
def test_transitive_py_requires(self):
# https://github.com/conan-io/conan/issues/5529
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Base(ConanFile):
pass
""")
client.save({"conanfile.py": conanfile})
client.run("export . base/1.0@user/channel")
conanfile = textwrap.dedent("""
from conans import ConanFile, python_requires
py_req = python_requires("base/1.0@user/channel")
class PackageInfo(ConanFile):
pass
""")
client.save({"conanfile.py": conanfile})
client.run("export . helper/1.0@user/channel")
conanfile = textwrap.dedent("""
from conans import ConanFile, python_requires
source = python_requires("helper/1.0@user/channel")
class MyConanfileBase(ConanFile):
pass
""")
client.save({"conanfile.py": conanfile})
client.run("install . pkg/0.1@user/channel")
lockfile = client.load("conan.lock")
if client.cache.config.revisions_enabled:
self.assertIn("base/1.0@user/channel#e41727b922c6ae54b216a58442893f3a", lockfile)
self.assertIn("helper/1.0@user/channel#98457e1f8d9174ed053747634ce0ea1a", lockfile)
else:
self.assertIn("base/1.0@user/channel", lockfile)
self.assertIn("helper/1.0@user/channel", lockfile)
client.run("source .")
self.assertIn("conanfile.py (pkg/0.1@user/channel): Configuring sources in", client.out)
def test_multiple_requires_error(self):
client = TestClient()
conanfile = """from conans import ConanFile
myvar = 123
def myfunct():
return 123
class Pkg(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("export . Pkg1/1.0@user/channel")
conanfile = """from conans import ConanFile
myvar = 234
def myfunct():
return 234
class Pkg(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("export . Pkg2/1.0@user/channel")
conanfile = """from conans import ConanFile, python_requires
pkg1 = python_requires("Pkg1/1.0@user/channel")
pkg2 = python_requires("Pkg2/1.0@user/channel")
class MyConanfileBase(ConanFile):
def build(self):
self.output.info("PKG1 : %s" % pkg1.myvar)
self.output.info("PKG2 : %s" % pkg2.myvar)
self.output.info("PKG1F : %s" % pkg1.myfunct())
self.output.info("PKG2F : %s" % pkg2.myfunct())
"""
client.save({"conanfile.py": conanfile})
client.run("create . Consumer/0.1@lasote/testing")
self.assertIn("Consumer/0.1@lasote/testing: PKG1 : 123", client.out)
self.assertIn("Consumer/0.1@lasote/testing: PKG2 : 234", client.out)
self.assertIn("Consumer/0.1@lasote/testing: PKG1F : 123", client.out)
self.assertIn("Consumer/0.1@lasote/testing: PKG2F : 234", client.out)
def test_local_import(self):
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
import mydata
class MyConanfileBase(ConanFile):
exports = "*.py"
def source(self):
self.output.info(mydata.src)
def build(self):
self.output.info(mydata.build)
def package(self):
self.output.info(mydata.pkg)
def package_info(self):
self.output.info(mydata.info)
"""
mydata = """src = "My cool source!"
build = "My cool build!"
pkg = "My cool package!"
info = "My cool package_info!"
"""
client.save({"conanfile.py": conanfile,
"mydata.py": mydata})
client.run("export . MyConanfileBase/1.1@lasote/testing")
reuse = """from conans import ConanFile, python_requires
base = python_requires("MyConanfileBase/1.1@lasote/testing")
class PkgTest(base.MyConanfileBase):
pass
"""
client.save({"conanfile.py": reuse}, clean_first=True)
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: My cool source!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool build!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: My cool package_info!", client.out)
client.run("upload * --all --confirm")
client.run("remove * -f")
client.run("install Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: My cool package_info!", client.out)
client.run("remove * -f")
client.run("download Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Package installed %s" % NO_SETTINGS_PACKAGE_ID,
client.out)
@pytest.mark.tool_git
def test_reuse_scm(self):
client = TestClient()
conanfile = """from conans import ConanFile
scm = {"type" : "git",
"url" : "somerepo",
"revision" : "auto"}
class MyConanfileBase(ConanFile):
scm = scm
"""
create_local_git_repo({"conanfile.py": conanfile}, branch="my_release",
folder=client.current_folder)
client.run("export . MyConanfileBase/1.1@lasote/testing")
client.run("get MyConanfileBase/1.1@lasote/testing")
# The global scm is left as-is
self.assertIn("""scm = {"type" : "git",
"url" : "somerepo",
"revision" : "auto"}""", client.out)
# but the class one is replaced
self.assertNotIn("scm = scm", client.out)
self.assertIn(' scm = {"revision":', client.out)
self.assertIn('"type": "git",', client.out)
self.assertIn('"url": "somerepo"', client.out)
reuse = """from conans import python_requires
base = python_requires("MyConanfileBase/1.1@lasote/testing")
class PkgTest(base.MyConanfileBase):
scm = base.scm
other = 123
def _my_method(self):
pass
"""
client.save({"conanfile.py": reuse})
# Commit changes so it replaces and exports the scm data
client.run_command('git add .')
client.run_command('git commit -m "Modified conanfile"')
client.run("export . Pkg/0.1@lasote/testing")
client.run("get Pkg/0.1@lasote/testing")
self.assertNotIn("scm = base.scm", client.out)
self.assertIn('scm = {"revision":', client.out)
self.assertIn('"type": "git",', client.out)
self.assertIn('"url": "somerepo"', client.out)
def test_reuse_class_members(self):
client = TestClient()
conanfile = """from conans import ConanFile
class MyConanfileBase(ConanFile):
license = "MyLicense"
author = "[email protected]"
exports = "*.txt"
exports_sources = "*.h"
short_paths = True
generators = "cmake"
"""
client.save({"conanfile.py": conanfile})
client.run("export . Base/1.1@lasote/testing")
reuse = """from conans import python_requires
import os
base = python_requires("Base/1.1@lasote/testing")
class PkgTest(base.MyConanfileBase):
def build(self):
self.output.info("Exports sources! %s" % self.exports_sources)
self.output.info("Short paths! %s" % self.short_paths)
self.output.info("License! %s" % self.license)
self.output.info("Author! %s" % self.author)
assert os.path.exists("conanbuildinfo.cmake")
"""
client.save({"conanfile.py": reuse,
"file.h": "header",
"other.txt": "text"})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Exports sources! *.h", client.out)
self.assertIn("Pkg/0.1@lasote/testing exports: Copied 1 '.txt' file: other.txt",
client.out)
self.assertIn("Pkg/0.1@lasote/testing exports_sources: Copied 1 '.h' file: file.h",
client.out)
self.assertIn("Pkg/0.1@lasote/testing: Short paths! True", client.out)
self.assertIn("Pkg/0.1@lasote/testing: License! MyLicense", client.out)
self.assertIn("Pkg/0.1@lasote/testing: Author! [email protected]", client.out)
ref = ConanFileReference.loads("Pkg/0.1@lasote/testing")
self.assertTrue(os.path.exists(os.path.join(client.cache.package_layout(ref).export(),
"other.txt")))
def test_reuse_name_version(self):
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class BasePkg(ConanFile):
name = "Base"
version = "1.1"
""")
client.save({"conanfile.py": conanfile})
client.run("export . lasote/testing")
reuse = textwrap.dedent("""
from conans import python_requires
base = python_requires("Base/1.1@lasote/testing")
class PkgTest(base.BasePkg):
pass
""")
client.save({"conanfile.py": reuse})
client.run("create . Pkg/0.1@lasote/testing", assert_error=True)
self.assertIn("ERROR: Package recipe with name Pkg!=Base", client.out)
reuse = textwrap.dedent("""
from conans import python_requires
base = python_requires("Base/1.1@lasote/testing")
class PkgTest(base.BasePkg):
name = "Pkg"
version = "0.1"
""")
client.save({"conanfile.py": reuse})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Created package ", client.out)
client.run("create . lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: Created package ", client.out)
def test_reuse_set_name_set_version(self):
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile, load
class BasePkg(ConanFile):
def set_name(self):
self.name = load("name.txt")
def set_version(self):
self.version = load("version.txt")
""")
client.save({"conanfile.py": conanfile,
"name.txt": "Base",
"version.txt": "1.1"})
client.run("export . user/testing")
reuse = textwrap.dedent("""
from conans import python_requires
base = python_requires("Base/1.1@user/testing")
class PkgTest(base.BasePkg):
pass
""")
client.save({"conanfile.py": reuse,
"name.txt": "Pkg",
"version.txt": "2.3"})
client.run("create . user/testing")
self.assertIn("Pkg/2.3@user/testing: Created package", client.out)
def test_reuse_exports_conflict(self):
conanfile = """from conans import ConanFile
class Base(ConanFile):
exports_sources = "*.h"
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"header.h": "my header Base!!"})
client.run("export . Base/0.1@user/testing")
conanfile = """from conans import python_requires, load
base = python_requires("Base/0.1@user/testing")
class Pkg2(base.Base):
def build(self):
self.output.info("Exports sources: %s" % self.exports_sources)
self.output.info("HEADER CONTENT!: %s" % load("header.h"))
"""
client.save({"conanfile.py": conanfile,
"header.h": "my header Pkg!!"}, clean_first=True)
client.run("create . Pkg/0.1@user/testing")
self.assertIn("Pkg/0.1@user/testing: HEADER CONTENT!: my header Pkg!!", client.out)
def test_transitive_imports_conflicts(self):
# https://github.com/conan-io/conan/issues/3874
client = TestClient()
conanfile = """from conans import ConanFile
import myhelper
class SourceBuild(ConanFile):
exports = "*.py"
"""
helper = """def myhelp(output):
output.info("MyHelperOutput!")
"""
client.save({"conanfile.py": conanfile,
"myhelper.py": helper})
client.run("export . base1/1.0@user/channel")
client.save({"myhelper.py": helper.replace("MyHelperOutput!", "MyOtherHelperOutput!")})
client.run("export . base2/1.0@user/channel")
conanfile = """from conans import ConanFile, python_requires
base2 = python_requires("base2/1.0@user/channel")
base1 = python_requires("base1/1.0@user/channel")
class MyConanfileBase(ConanFile):
def build(self):
base1.myhelper.myhelp(self.output)
base2.myhelper.myhelp(self.output)
"""
# This should work, even if there is a local "myhelper.py" file, which could be
# accidentaly imported (and it was, it was a bug)
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: MyHelperOutput!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: MyOtherHelperOutput!", client.out)
# Now, the same, but with "clean_first=True", should keep working
client.save({"conanfile.py": conanfile}, clean_first=True)
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: MyHelperOutput!", client.out)
self.assertIn("Pkg/0.1@lasote/testing: MyOtherHelperOutput!", client.out)
def test_update(self):
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
somevar = 42
class MyConanfileBase(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("export . MyConanfileBase/1.1@lasote/testing")
client.run("upload * --confirm")
client2 = TestClient(servers=client.servers, users={"default": [("lasote", "mypass")]})
reuse = """from conans import python_requires
base = python_requires("MyConanfileBase/1.1@lasote/testing")
class PkgTest(base.MyConanfileBase):
def configure(self):
self.output.info("PYTHON REQUIRE VAR %s" % base.somevar)
"""
client2.save({"conanfile.py": reuse})
client2.run("install .")
self.assertIn("conanfile.py: PYTHON REQUIRE VAR 42", client2.out)
client.save({"conanfile.py": conanfile.replace("42", "143")})
time.sleep(1) # guarantee time offset
client.run("export . MyConanfileBase/1.1@lasote/testing")
client.run("upload * --confirm")
client2.run("install . --update")
self.assertIn("conanfile.py: PYTHON REQUIRE VAR 143", client2.out)
def test_update_ranges(self):
# https://github.com/conan-io/conan/issues/4650#issuecomment-497464305
client = TestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
somevar = 42
class MyConanfileBase(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("export . MyConanfileBase/1.1@lasote/testing")
client.run("upload * --confirm")
client2 = TestClient(servers=client.servers, users={"default": [("lasote", "mypass")]})
reuse = """from conans import python_requires
base = python_requires("MyConanfileBase/[>1.0]@lasote/testing")
class PkgTest(base.MyConanfileBase):
def configure(self):
self.output.info("PYTHON REQUIRE VAR %s" % base.somevar)
"""
client2.save({"conanfile.py": reuse})
client2.run("install .")
self.assertIn("conanfile.py: PYTHON REQUIRE VAR 42", client2.out)
client.save({"conanfile.py": conanfile.replace("42", "143")})
# Make sure to bump the version!
client.run("export . MyConanfileBase/1.2@lasote/testing")
client.run("upload * --confirm")
client2.run("install . --update")
self.assertIn("conanfile.py: PYTHON REQUIRE VAR 143", client2.out)
def test_duplicate_pyreq(self):
t = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class PyReq(ConanFile):
pass
""")
t.save({"conanfile.py": conanfile})
t.run("export . pyreq/1.0@user/channel")
t.run("export . pyreq/2.0@user/channel")
conanfile = textwrap.dedent("""
from conans import ConanFile, python_requires
pyreq1 = python_requires("pyreq/1.0@user/channel")
pyreq2 = python_requires("pyreq/2.0@user/channel")
class Lib(ConanFile):
pass
""")
t.save({"conanfile.py": conanfile})
t.run("create . name/version@user/channel", assert_error=True)
self.assertIn("ERROR: Error loading conanfile", t.out)
self.assertIn("Same python_requires with different versions not allowed for a conanfile",
t.out)
def test_short_paths(self):
# https://github.com/conan-io/conan/issues/5814
client = TestClient(default_server_user=True)
conanfile = textwrap.dedent("""
from conans import ConanFile
class MyConanfileBase(ConanFile):
exports = "*.txt"
exports_sources = "*.h"
""")
client.save({"conanfile.py": conanfile,
"file.h": "header",
"other.txt": "text"})
client.run("create . Base/1.2@lasote/testing")
reuse = textwrap.dedent("""
from conans import python_requires
base = python_requires("Base/1.2@lasote/testing")
class PkgTest(base.MyConanfileBase):
short_paths = True
name = "consumer"
version = "1.0.0"
""")
client.save({"conanfile.py": reuse}, clean_first=True)
client.run("create . lasote/testing")
self.assertIn("consumer/1.0.0@lasote/testing: Created package revision", client.out)
class PythonRequiresNestedTest(unittest.TestCase):
@parameterized.expand([(False, False), (True, False), (True, True), ])
def test_python_requires_with_alias(self, use_alias, use_alias_of_alias):
assert use_alias if use_alias_of_alias else True
version_str = "latest2" if use_alias_of_alias else "latest" if use_alias else "1.0"
client = TestClient()
# Create python_requires
client.save({CONANFILE: """
from conans import ConanFile
class PythonRequires0(ConanFile):
def build(self):
super(PythonRequires0, self).build()
self.output.info(">>> PythonRequires0::build (v={{}})".format(self.version))
""".format(v=version_str)})
client.run("export . python_requires0/1.0@jgsogo/test")
client.run("alias python_requires0/latest@jgsogo/test "
"python_requires0/1.0@jgsogo/test")
client.run("alias python_requires0/latest2@jgsogo/test "
"python_requires0/latest@jgsogo/test")
# Create python requires, that require the previous one
client.save({CONANFILE: """
from conans import ConanFile, python_requires
base = python_requires("python_requires0/{v}@jgsogo/test")
class PythonRequires1(base.PythonRequires0):
def build(self):
super(PythonRequires1, self).build()
self.output.info(">>> PythonRequires1::build (v={{}})".format(self.version))
""".format(v=version_str)})
client.run("export . python_requires1/1.0@jgsogo/test")
client.run("alias python_requires1/latest@jgsogo/test python_requires1/1.0@jgsogo/test")
client.run("alias python_requires1/latest2@jgsogo/test python_requires1/latest@jgsogo/test")
# Create python requires
client.save({CONANFILE: """
from conans import ConanFile, python_requires
class PythonRequires11(ConanFile):
def build(self):
super(PythonRequires11, self).build()
self.output.info(">>> PythonRequires11::build (v={{}})".format(self.version))
""".format(v=version_str)})
client.run("export . python_requires11/1.0@jgsogo/test")
client.run("alias python_requires11/latest@jgsogo/test python_requires11/1.0@jgsogo/test")
client.run("alias python_requires11/latest2@jgsogo/test "
"python_requires11/latest@jgsogo/test")
# Create python requires, that require the previous one
client.save({CONANFILE: """
from conans import ConanFile, python_requires
base = python_requires("python_requires0/{v}@jgsogo/test")
class PythonRequires22(base.PythonRequires0):
def build(self):
super(PythonRequires22, self).build()
self.output.info(">>> PythonRequires22::build (v={{}})".format(self.version))
""".format(v=version_str)})
client.run("export . python_requires22/1.0@jgsogo/test")
client.run("alias python_requires22/latest@jgsogo/test python_requires22/1.0@jgsogo/test")
client.run(
"alias python_requires22/latest2@jgsogo/test python_requires22/latest@jgsogo/test")
# Another python_requires, that requires the previous python requires
client.save({CONANFILE: """
from conans import ConanFile, python_requires
base_class = python_requires("python_requires1/{v}@jgsogo/test")
base_class2 = python_requires("python_requires11/{v}@jgsogo/test")
class PythonRequires2(base_class.PythonRequires1, base_class2.PythonRequires11):
def build(self):
super(PythonRequires2, self).build()
self.output.info(">>> PythonRequires2::build (v={{}})".format(self.version))
""".format(v=version_str)})
client.run("export . python_requires2/1.0@jgsogo/test")
client.run("alias python_requires2/latest@jgsogo/test python_requires2/1.0@jgsogo/test")
client.run("alias python_requires2/latest2@jgsogo/test python_requires2/latest@jgsogo/test")
# My project, will consume the latest python requires
client.save({CONANFILE: """
from conans import ConanFile, python_requires
base_class = python_requires("python_requires2/{v}@jgsogo/test")
base_class2 = python_requires("python_requires22/{v}@jgsogo/test")
class Project(base_class.PythonRequires2, base_class2.PythonRequires22):
def build(self):
super(Project, self).build()
self.output.info(">>> Project::build (v={{}})".format(self.version))
""".format(v=version_str)})
client.run("create . project/1.0@jgsogo/test --build=missing")
# Check that everything is being built
self.assertIn("project/1.0@jgsogo/test: >>> PythonRequires11::build (v=1.0)", client.out)
self.assertIn("project/1.0@jgsogo/test: >>> PythonRequires0::build (v=1.0)", client.out)
self.assertIn("project/1.0@jgsogo/test: >>> PythonRequires22::build (v=1.0)", client.out)
self.assertIn("project/1.0@jgsogo/test: >>> PythonRequires1::build (v=1.0)", client.out)
self.assertIn("project/1.0@jgsogo/test: >>> PythonRequires2::build (v=1.0)", client.out)
self.assertIn("project/1.0@jgsogo/test: >>> Project::build (v=1.0)", client.out)
# Check that all the graph is printed properly
# - requirements
self.assertIn(" project/1.0@jgsogo/test from local cache - Cache", client.out)
# - python requires
self.assertIn(" python_requires11/1.0@jgsogo/test", client.out)
self.assertIn(" python_requires0/1.0@jgsogo/test", client.out)
self.assertIn(" python_requires22/1.0@jgsogo/test", client.out)
self.assertIn(" python_requires1/1.0@jgsogo/test", client.out)
self.assertIn(" python_requires2/1.0@jgsogo/test", client.out)
# - packages
self.assertIn(" project/1.0@jgsogo/test:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Build",
client.out)
# - no mention to alias
self.assertNotIn("alias", client.out)
self.assertNotIn("alias2", client.out)
def test_local_build(self):
client = TestClient()
client.save({"conanfile.py": "var=42\n"+
str(GenConanfile().with_name("Tool").with_version("0.1"))})
client.run("export . Tool/0.1@user/channel")
conanfile = """from conans import ConanFile, python_requires
pkg1 = python_requires("Tool/0.1@user/channel")
class MyConanfileBase(ConanFile):
def source(self):
self.output.info("Pkg1 source: %s" % pkg1.var)
def build(self):
self.output.info("Pkg1 build: %s" % pkg1.var)
def package(self):
self.output.info("Pkg1 package: %s" % pkg1.var)
"""
client.save({"conanfile.py": conanfile})
client.run("source .")
self.assertIn("conanfile.py: Pkg1 source: 42", client.out)
client.run("install .")
client.run("build .")
self.assertIn("conanfile.py: Pkg1 build: 42", client.out)
client.run("package .")
self.assertIn("conanfile.py: Pkg1 package: 42", client.out)
client.run("export-pkg . pkg1/0.1@user/testing")
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
APIRequest class
"""
import datetime
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from nova import log as logging
LOG = logging.getLogger("nova.api.request")
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def _camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
def _underscore_to_xmlcase(str):
res = _underscore_to_camelcase(str)
return res[:1].lower() + res[1:]
def _database_to_isoformat(datetimeobj):
"""Return a xs:dateTime parsable string from datatime"""
return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True
'False' False
'0', '-0' 0
0xN, -0xN int from hex (postitive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
if len(value) == 0:
return ''
if value == 'None':
return None
if value == 'True':
return True
if value == 'False':
return False
valueneg = value[1:] if value[0] == '-' else value
if valueneg == '0':
return 0
if valueneg == '':
return value
if valueneg[0] == '0':
if valueneg[1] in 'xX':
return int(value, 16)
elif valueneg[1] in 'bB':
return int(value, 2)
else:
try:
return int(value, 8)
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
return complex(value)
except ValueError:
return value
class APIRequest(object):
def __init__(self, controller, action, version, args):
self.controller = controller
self.action = action
self.version = version
self.args = args
def invoke(self, context):
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
controller = self.controller
action = self.action
_error = _('Unsupported API request: controller = %(controller)s,'
' action = %(action)s') % locals()
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in self.args.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if isinstance(value, str) or isinstance(value, unicode):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
d[parts[1]] = value
value = d
args[key] = value
for key in args.keys():
# NOTE(vish): Turn numeric dict keys into lists
if isinstance(args[key], dict):
if args[key] != {} and args[key].keys()[0].isdigit():
s = args[key].items()
s.sort()
args[key] = [v for k, v in s]
result = method(context, **args)
return self._render_response(result, context.request_id)
def _render_response(self, response_data, request_id):
xml = minidom.Document()
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
'http://ec2.amazonaws.com/doc/%s/' % self.version)
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
if(response_data == True):
self._render_dict(xml, response_el, {'return': 'true'})
else:
self._render_dict(xml, response_el, response_data)
xml.appendChild(response_el)
response = xml.toxml()
xml.unlink()
LOG.debug(response)
return response
def _render_dict(self, xml, el, data):
try:
for key in data.keys():
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
LOG.debug(data)
raise
def _render_data(self, xml, el_name, data):
el_name = _underscore_to_xmlcase(el_name)
data_el = xml.createElement(el_name)
if isinstance(data, list):
for item in data:
data_el.appendChild(self._render_data(xml, 'item', item))
elif isinstance(data, dict):
self._render_dict(xml, data_el, data)
elif hasattr(data, '__dict__'):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif isinstance(data, datetime.datetime):
data_el.appendChild(
xml.createTextNode(_database_to_isoformat(data)))
elif data is not None:
data_el.appendChild(xml.createTextNode(str(data)))
return data_el
|
|
import os
import app
import unittest
import json
from base64 import b64encode
import app
class AppTestCase(unittest.TestCase):
def setUp(self):
app.app.config['TESTING'] = True
self.test_client = app.app.test_client()
#self.app = app.app
app.init_db()
def tearDown(self):
pass
def login(self, data=None):
rv = self.test_client.post('/authorizations/',data=json.dumps(data), content_type='application/json')
if 'users' in json.loads(rv.data):
return json.loads(rv.data)['users'][0]['token']
else:
return None
def test_table_of_contents(self):
rv = self.test_client.get('/')
assert 'links' in json.loads(rv.data)
#
# Widgets
#
# test getting one widget by id
def test_get_by_id(self):
rv = self.test_client.get('/widgets/widget1')
data = json.loads(rv.data)
assert 'widgets' in data
assert len(data['widgets']) == 1
assert data['widgets'][0]['id'] == "widget1"
# test getting collection of widgets
def test_get_collection(self):
rv = self.test_client.get('/widgets/')
data = json.loads(rv.data)
assert 'widgets' in data
assert len(data['widgets']) >= 1
assert data['widgets'][0]['id'] == "widget1"
# test not found for widget with unfound id
def test_get_by_id_not_found(self):
rv = self.test_client.get('/widgets/ansdnfasdfnasdfasdfffff')
data = json.loads(rv.data)
assert 'errors' in data
assert len(data['errors']) > 0
assert rv.status_code == 404
# delete a widget
def test_delete_widget_yes(self):
rv = self.test_client.delete('/widgets/widget1')
assert '' == rv.data
assert rv.status_code == 204
rv = self.test_client.delete('/widgets/widget1sdffffffsasdfasdf')
assert 'errors' in json.loads(rv.data)
assert 'not found' in json.loads(rv.data)['errors'][0]['message']
assert 404 == rv.status_code
# update a widget
def test_update_widget(self):
data = [{"op":"replace","path":"name","value":"updated name"}]
rv = self.test_client.patch('/widgets/widget1',data=json.dumps(data), content_type='application/json')
assert '' == rv.data
assert 204 == rv.status_code
# fail when updating widget that does not exist
data = [{"op":"replace","path":"name","value":"updated name"}]
rv = self.test_client.patch('/widgets/nohaynadaaqui',data=json.dumps(data), content_type='application/json')
assert 'errors' in json.loads(rv.data)
assert 'not found' in json.loads(rv.data)['errors'][0]['message']
assert 404 == rv.status_code
# create widget
def test_create_widget(self):
data = {"widgets":[{"name":"widget new name"}]}
rv = self.test_client.post('/widgets/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'widgets' in data
assert len(data['widgets']) == 1
assert 201 == rv.status_code
# fail if missing required fields
data = {"widgets":[{"invalidfield":"widget invalid field"}]}
rv = self.test_client.post('/widgets/',data=json.dumps(data), content_type='application/json')
assert 'errors' in json.loads(rv.data)
assert 'name is required' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 422 == rv.status_code
#
# Authorizations
#
# valid login
def test_valid_login(self):
data = {'authorizations':[{"id":"[email protected]","password":"password1"}] }
rv = self.test_client.post('/login/',data=json.dumps(data), content_type='application/json')
assert 'users' in json.loads(rv.data)
assert '[email protected]' == json.loads(rv.data)['users'][0]['id']
assert 'token' in json.loads(rv.data)['users'][0]
assert json.loads(rv.data)['users'][0]['token'] not in [None,'']
data = {'authorizations':[{"id":"[email protected]","password":"password1"}] }
rv = self.test_client.post('/authorizations/',data=json.dumps(data), content_type='application/json')
assert 'users' in json.loads(rv.data)
assert '[email protected]' == json.loads(rv.data)['users'][0]['id']
assert 'token' in json.loads(rv.data)['users'][0]
assert json.loads(rv.data)['users'][0]['token'] not in [None,'']
# invalid login
def test_invalid_login(self):
data = {'authorizations':[{"id":"[email protected]","password":"password1"}] }
rv = self.test_client.post('/login/',data=json.dumps(data), content_type='application/json')
assert 'errors' in json.loads(rv.data)
assert 'Access Denied' in json.loads(rv.data)['errors'][0]['message']
assert 'token' not in json.loads(rv.data)['errors'][0]
assert 401 == rv.status_code
data = {'crazydata':[{"id":"[email protected]","password":"password1"}] }
rv = self.test_client.post('/login/',data=json.dumps(data), content_type='application/json')
assert 'errors' in json.loads(rv.data)
assert 'bad request' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 400 == rv.status_code
#
# Users
#
def test_get_user(self):
#regular user
data = {'authorizations':[{"id":"[email protected]","password":"password2"}] }
user_token = self.login(data)
#admin user
data = {'authorizations':[{"id":"[email protected]","password":"password1"}] }
admin_token = self.login(data)
#invalid user
data = {'authorizations':[{"id":"[email protected]","password":"invalid"}] }
invalid_token = self.login(data)
assert invalid_token is None
# a bad token should return access denied
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format('badtokenasdf', ''))
}
rv = self.test_client.get('/users/',headers=headers)
data = json.loads(rv.data)
assert 'errors' in data
assert 'bad token' in data['errors'][0]['message']
assert 'token' not in data['errors'][0]
assert 400 == rv.status_code
# admin user should see full list of users
# no password fields should be listed
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format(admin_token, ''))
}
rv = self.test_client.get('/users/',headers=headers)
data = json.loads(rv.data)
assert 'users' in data
assert len(data['users']) > 1
for u in data['users']:
assert 'password' not in u
# me should just return the current user
rv = self.test_client.get('/me/',headers=headers)
data = json.loads(rv.data)
assert 'users' in data
assert len(data['users']) == 1
assert data['users'][0]['id'] == '[email protected]'
for u in data['users']:
assert 'password' not in u
# regular user should see just one
# no password fields should be listed
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format(user_token, ''))
}
rv = self.test_client.get('/users/',headers=headers)
data = json.loads(rv.data)
assert 'users' in data
assert len(data['users']) == 1
for u in data['users']:
assert 'password' not in u
# regular user should not see other users
rv = self.test_client.get('/users/[email protected]',headers=headers)
data = json.loads(rv.data)
assert 'errors' in data
assert 'not found' in data['errors'][0]['message'].lower()
assert 'token' not in data['errors'][0]
assert 404 == rv.status_code
# a bad token should return access denied
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format('badtokenasdf', ''))
}
rv = self.test_client.get('/users/',headers=headers)
data = json.loads(rv.data)
assert 'errors' in data
assert 'bad token' in data['errors'][0]['message']
assert 'token' not in data['errors'][0]
assert 400 == rv.status_code
def test_delete_user(self):
#regular user
data = {'authorizations':[{"id":"[email protected]","password":"password2"}] }
user_token = self.login(data)
#admin user
data = {'authorizations':[{"id":"[email protected]","password":"password1"}] }
admin_token = self.login(data)
# user cannot delete other users
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format(user_token, ''))
}
rv = self.test_client.delete('/users/[email protected]',headers=headers)
data = json.loads(rv.data)
assert 'errors' in data
assert 'not found' in data['errors'][0]['message'].lower()
assert 'token' not in data['errors'][0]
assert 404 == rv.status_code
# user can delete self only
rv = self.test_client.delete('/users/[email protected]',headers=headers)
#assert '' == rv.data
assert rv.status_code == 204
# admin can delete anyone
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format(admin_token, ''))
}
rv = self.test_client.delete('/users/[email protected]',headers=headers)
#assert '' == rv.data
assert rv.status_code == 204
# users that do not exist
rv = self.test_client.delete('/users/[email protected]',headers=headers)
assert rv.status_code == 404
def test_create_user(self):
# id and password fields are required and user attribute
data = {"widgets":[{"invalidfield":"widget invalid field"}]}
rv = self.test_client.post('/users/',data=json.dumps(data), content_type='application/json')
assert 'errors' in json.loads(rv.data)
assert 'bad request' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 400 == rv.status_code
data = {"users":[{"p":"com",'d':'f'}]}
rv = self.test_client.post('/users/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'errors' in data
assert len(data['errors']) >= 1
assert 'id is required' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 'password is required' in json.loads(rv.data)['errors'][1]['message'].lower()
assert 422 == rv.status_code
data = {"users":[{"id":None,'password':None}]}
rv = self.test_client.post('/users/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'errors' in data
assert len(data['errors']) >= 1
assert 'id is required' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 'password is required' in json.loads(rv.data)['errors'][1]['message'].lower()
assert 422 == rv.status_code
data = {"users":[{"id":'','password':''}]}
rv = self.test_client.post('/users/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'errors' in data
assert len(data['errors']) >= 1
assert 'id is required' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 'password is required' in json.loads(rv.data)['errors'][1]['message'].lower()
assert 422 == rv.status_code
# anyone should be able to create an account
data = {"users":[{"id":"[email protected]",'password':'newuser1'}]}
rv = self.test_client.post('/users/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'users' in data
assert len(data['users']) == 1
assert 'token' in data['users'][0]
assert 201 == rv.status_code
# error if user id already exists
data = {"users":[{"id":"[email protected]",'password':'newuser1'}]}
rv = self.test_client.post('/users/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'errors' in data
assert len(data['errors']) >= 1
assert 'id already exists' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 422 == rv.status_code
# can also use signup url to create an account
data = {"users":[{"id":"[email protected]",'password':'newuser2'}]}
rv = self.test_client.post('/signup/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'users' in data
assert len(data['users']) == 1
assert 'token' in data['users'][0]
assert 201 == rv.status_code
# regular users cannot assign roles
data = {"users":[{"id":"[email protected]",'password':'newuser3','roles':[1] }]}
rv = self.test_client.post('/signup/',data=json.dumps(data), content_type='application/json')
data = json.loads(rv.data)
assert 'errors' in data
assert len(data['errors']) >= 1
assert 'must be admin' in json.loads(rv.data)['errors'][0]['message'].lower()
assert 403 == rv.status_code
# administrators can assign roles
data = {'authorizations':[{"id":"[email protected]","password":"password1"}] }
admin_token = self.login(data)
headers = {
'Authorization': 'Basic ' + b64encode("{0}:{1}".format(admin_token, ''))
}
data = {"users":[{"id":"[email protected]",'password':'newuser3','roles':[1] }]}
rv = self.test_client.post('/signup/',data=json.dumps(data), content_type='application/json', headers=headers)
data = json.loads(rv.data)
assert 'users' in data
assert len(data['users']) == 1
assert data['users'][0]['roles'] == [1]
assert 'admin users' in data['users'][0]['permissions']
assert 'token' in data['users'][0]
assert 201 == rv.status_code
if __name__ == '__main__':
unittest.main()
|
|
import unittest
import struct
import sys
from test import test_support, string_tests
class StrTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUserStringTest,
string_tests.MixinStrUnicodeTest,
):
type2test = str
# We don't need to propagate to str
def fixtype(self, obj):
return obj
def test_basic_creation(self):
self.assertEqual(str(''), '')
self.assertEqual(str(0), '0')
self.assertEqual(str(0L), '0')
self.assertEqual(str(()), '()')
self.assertEqual(str([]), '[]')
self.assertEqual(str({}), '{}')
a = []
a.append(a)
self.assertEqual(str(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(str(a), '{0: {...}}')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
self.assertRaises(OverflowError, '%c'.__mod__, 0x1234)
@test_support.cpython_only
def test_formatting_huge_precision(self):
from _testcapi import INT_MAX
format_string = "%.{}f".format(INT_MAX + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_formatting_huge_width(self):
format_string = "%{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_conversion(self):
# Make sure __str__() behaves properly
class Foo0:
def __unicode__(self):
return u"foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return u"foo"
class Foo4(unicode):
def __str__(self):
return u"foo"
class Foo5(str):
def __str__(self):
return u"foo"
class Foo6(str):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo7(unicode):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "string"
def __unicode__(self):
return "not unicode"
self.assertTrue(str(Foo0()).startswith("<")) # this is different from __unicode__
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foos")
self.assertEqual(str(Foo7("bar")), "foos")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
@unittest.skipIf(sys.maxint > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxint)
def test__format__(self):
def test(value, format, expected):
# test both with and without the trailing 's'
self.assertEqual(value.__format__(format), expected)
self.assertEqual(value.__format__(format + 's'), expected)
test('', '', '')
test('abc', '', 'abc')
test('abc', '.3', 'abc')
test('ab', '.3', 'ab')
test('abcdef', '.3', 'abc')
test('abcdef', '.0', '')
test('abc', '3.3', 'abc')
test('abc', '2.3', 'abc')
test('abc', '2.2', 'ab')
test('abc', '3.2', 'ab ')
test('result', 'x<0', 'result')
test('result', 'x<5', 'result')
test('result', 'x<6', 'result')
test('result', 'x<7', 'resultx')
test('result', 'x<8', 'resultxx')
test('result', ' <7', 'result ')
test('result', '<7', 'result ')
test('result', '>7', ' result')
test('result', '>8', ' result')
test('result', '^8', ' result ')
test('result', '^9', ' result ')
test('result', '^10', ' result ')
test('a', '10000', 'a' + ' ' * 9999)
test('', '10000', ' ' * 10000)
test('', '10000000', ' ' * 10000000)
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
# class that returns a bad type from __format__
class H:
def __format__(self, format_spec):
return 1.0
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r and !s coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0}'.format(E('data')), 'E(data)')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
msg = 'object.__format__ with a non-empty format string is deprecated'
with test_support.check_warnings((msg, PendingDeprecationWarning)):
self.assertEqual('{0:^10}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:^10s}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "{".format)
self.assertRaises(ValueError, "}".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(IndexError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
def test_format_huge_precision(self):
format_string = ".{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_width(self):
format_string = "{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_item_number(self):
format_string = "{{{}:.6f}}".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string.format(2.34)
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_buffer_is_readonly(self):
self.assertRaises(TypeError, sys.stdin.readinto, b"")
def test_encode_and_decode_kwargs(self):
self.assertEqual('abcde'.encode('ascii', 'replace'),
'abcde'.encode('ascii', errors='replace'))
self.assertEqual('abcde'.encode('ascii', 'ignore'),
'abcde'.encode(encoding='ascii', errors='ignore'))
self.assertEqual('Andr\202 x'.decode('ascii', 'ignore'),
'Andr\202 x'.decode('ascii', errors='ignore'))
self.assertEqual('Andr\202 x'.decode('ascii', 'replace'),
'Andr\202 x'.decode(encoding='ascii', errors='replace'))
def test_startswith_endswith_errors(self):
with self.assertRaises(UnicodeDecodeError):
'\xff'.startswith(u'x')
with self.assertRaises(UnicodeDecodeError):
'\xff'.endswith(u'x')
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('unicode', exc)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
def test_main():
test_support.run_unittest(StrTest)
if __name__ == "__main__":
test_main()
|
|
#
#
# Copyright (C) 2006, 2007, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for the LUXI protocol
This module implements the local unix socket protocol. You only need
this module and the opcodes module in the client program in order to
communicate with the master.
The module is also used by the master daemon.
"""
from ganeti import constants
from ganeti import pathutils
from ganeti import objects
import ganeti.rpc.client as cl
from ganeti.rpc.errors import RequestError
from ganeti.rpc.transport import Transport
__all__ = [
# classes:
"Client"
]
REQ_SUBMIT_JOB = constants.LUXI_REQ_SUBMIT_JOB
REQ_SUBMIT_JOB_TO_DRAINED_QUEUE = constants.LUXI_REQ_SUBMIT_JOB_TO_DRAINED_QUEUE
REQ_SUBMIT_MANY_JOBS = constants.LUXI_REQ_SUBMIT_MANY_JOBS
REQ_PICKUP_JOB = constants.LUXI_REQ_PICKUP_JOB
REQ_WAIT_FOR_JOB_CHANGE = constants.LUXI_REQ_WAIT_FOR_JOB_CHANGE
REQ_CANCEL_JOB = constants.LUXI_REQ_CANCEL_JOB
REQ_ARCHIVE_JOB = constants.LUXI_REQ_ARCHIVE_JOB
REQ_CHANGE_JOB_PRIORITY = constants.LUXI_REQ_CHANGE_JOB_PRIORITY
REQ_AUTO_ARCHIVE_JOBS = constants.LUXI_REQ_AUTO_ARCHIVE_JOBS
REQ_QUERY = constants.LUXI_REQ_QUERY
REQ_QUERY_FIELDS = constants.LUXI_REQ_QUERY_FIELDS
REQ_QUERY_JOBS = constants.LUXI_REQ_QUERY_JOBS
REQ_QUERY_FILTERS = constants.LUXI_REQ_QUERY_FILTERS
REQ_REPLACE_FILTER = constants.LUXI_REQ_REPLACE_FILTER
REQ_DELETE_FILTER = constants.LUXI_REQ_DELETE_FILTER
REQ_QUERY_INSTANCES = constants.LUXI_REQ_QUERY_INSTANCES
REQ_QUERY_NODES = constants.LUXI_REQ_QUERY_NODES
REQ_QUERY_GROUPS = constants.LUXI_REQ_QUERY_GROUPS
REQ_QUERY_NETWORKS = constants.LUXI_REQ_QUERY_NETWORKS
REQ_QUERY_EXPORTS = constants.LUXI_REQ_QUERY_EXPORTS
REQ_QUERY_CONFIG_VALUES = constants.LUXI_REQ_QUERY_CONFIG_VALUES
REQ_QUERY_CLUSTER_INFO = constants.LUXI_REQ_QUERY_CLUSTER_INFO
REQ_QUERY_TAGS = constants.LUXI_REQ_QUERY_TAGS
REQ_SET_DRAIN_FLAG = constants.LUXI_REQ_SET_DRAIN_FLAG
REQ_SET_WATCHER_PAUSE = constants.LUXI_REQ_SET_WATCHER_PAUSE
REQ_ALL = constants.LUXI_REQ_ALL
DEF_RWTO = constants.LUXI_DEF_RWTO
WFJC_TIMEOUT = constants.LUXI_WFJC_TIMEOUT
class Client(cl.AbstractClient):
"""High-level client implementation.
This uses a backing Transport-like class on top of which it
implements data serialization/deserialization.
"""
def __init__(self, address=None, timeouts=None, transport=Transport):
"""Constructor for the Client class.
Arguments are the same as for L{AbstractClient}.
"""
super(Client, self).__init__(timeouts, transport)
# Override the version of the protocol:
self.version = constants.LUXI_VERSION
# Store the socket address
if address is None:
address = pathutils.QUERY_SOCKET
self.address = address
self._InitTransport()
def _GetAddress(self):
return self.address
def SetQueueDrainFlag(self, drain_flag):
return self.CallMethod(REQ_SET_DRAIN_FLAG, (drain_flag, ))
def SetWatcherPause(self, until):
return self.CallMethod(REQ_SET_WATCHER_PAUSE, (until, ))
def PickupJob(self, job):
return self.CallMethod(REQ_PICKUP_JOB, (job,))
def SubmitJob(self, ops):
ops_state = [op.__getstate__()
if not isinstance(op, objects.ConfigObject)
else op.ToDict(_with_private=True)
for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB, (ops_state, ))
def SubmitJobToDrainedQueue(self, ops):
ops_state = [op.__getstate__() for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB_TO_DRAINED_QUEUE, (ops_state, ))
def SubmitManyJobs(self, jobs):
jobs_state = []
for ops in jobs:
jobs_state.append([op.__getstate__() for op in ops])
return self.CallMethod(REQ_SUBMIT_MANY_JOBS, (jobs_state, ))
@staticmethod
def _PrepareJobId(request_name, job_id):
try:
return int(job_id)
except ValueError:
raise RequestError("Invalid parameter passed to %s as job id: "
" expected integer, got value %s" %
(request_name, job_id))
def CancelJob(self, job_id, kill=False):
job_id = Client._PrepareJobId(REQ_CANCEL_JOB, job_id)
return self.CallMethod(REQ_CANCEL_JOB, (job_id, kill))
def ArchiveJob(self, job_id):
job_id = Client._PrepareJobId(REQ_ARCHIVE_JOB, job_id)
return self.CallMethod(REQ_ARCHIVE_JOB, (job_id, ))
def ChangeJobPriority(self, job_id, priority):
job_id = Client._PrepareJobId(REQ_CHANGE_JOB_PRIORITY, job_id)
return self.CallMethod(REQ_CHANGE_JOB_PRIORITY, (job_id, priority))
def AutoArchiveJobs(self, age):
timeout = (DEF_RWTO - 1) / 2
return self.CallMethod(REQ_AUTO_ARCHIVE_JOBS, (age, timeout))
def WaitForJobChangeOnce(self, job_id, fields,
prev_job_info, prev_log_serial,
timeout=WFJC_TIMEOUT):
"""Waits for changes on a job.
@param job_id: Job ID
@type fields: list
@param fields: List of field names to be observed
@type prev_job_info: None or list
@param prev_job_info: Previously received job information
@type prev_log_serial: None or int/long
@param prev_log_serial: Highest log serial number previously received
@type timeout: int/float
@param timeout: Timeout in seconds (values larger than L{WFJC_TIMEOUT} will
be capped to that value)
"""
assert timeout >= 0, "Timeout can not be negative"
return self.CallMethod(REQ_WAIT_FOR_JOB_CHANGE,
(job_id, fields, prev_job_info,
prev_log_serial,
min(WFJC_TIMEOUT, timeout)))
def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial):
job_id = Client._PrepareJobId(REQ_WAIT_FOR_JOB_CHANGE, job_id)
while True:
result = self.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial)
if result != constants.JOB_NOTCHANGED:
break
return result
def Query(self, what, fields, qfilter):
"""Query for resources/items.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: List of strings
@param fields: List of requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: L{objects.QueryResponse}
"""
result = self.CallMethod(REQ_QUERY, (what, fields, qfilter))
return objects.QueryResponse.FromDict(result)
def QueryFields(self, what, fields):
"""Query for available fields.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: None or list of strings
@param fields: List of requested fields
@rtype: L{objects.QueryFieldsResponse}
"""
result = self.CallMethod(REQ_QUERY_FIELDS, (what, fields))
return objects.QueryFieldsResponse.FromDict(result)
def QueryJobs(self, job_ids, fields):
return self.CallMethod(REQ_QUERY_JOBS, (job_ids, fields))
def QueryFilters(self, uuids, fields):
return self.CallMethod(REQ_QUERY_FILTERS, (uuids, fields))
def ReplaceFilter(self, uuid, priority, predicates, action, reason):
return self.CallMethod(REQ_REPLACE_FILTER,
(uuid, priority, predicates, action, reason))
def DeleteFilter(self, uuid):
return self.CallMethod(REQ_DELETE_FILTER, (uuid, ))
def QueryInstances(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_INSTANCES, (names, fields, use_locking))
def QueryNodes(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NODES, (names, fields, use_locking))
def QueryGroups(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_GROUPS, (names, fields, use_locking))
def QueryNetworks(self, names, fields, use_locking):
return self.CallMethod(REQ_QUERY_NETWORKS, (names, fields, use_locking))
def QueryExports(self, nodes, use_locking):
return self.CallMethod(REQ_QUERY_EXPORTS, (nodes, use_locking))
def QueryClusterInfo(self):
return self.CallMethod(REQ_QUERY_CLUSTER_INFO, ())
def QueryConfigValues(self, fields):
return self.CallMethod(REQ_QUERY_CONFIG_VALUES, (fields, ))
def QueryTags(self, kind, name):
return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
|
|
#!/usr/bin/env python
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <[email protected]> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
import struct
import math
import sys
import string
import pprint
import types
def hexDump(bytes):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(bytes)):
sys.stdout.write("%2x " % (ord(bytes[i])))
if (i+1) % 8 == 0:
print repr(bytes[i-7:i+1])
if(len(bytes) % 8 != 0):
print string.rjust("", 11), repr(bytes[i-len(bytes)%8:i+1])
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self):
self.address = ""
self.typetags = ","
self.message = ""
def setAddress(self, address):
self.address = address
def setMessage(self, message):
self.message = message
def setTypetags(self, typetags):
self.typetags = typetags
def clear(self):
self.address = ""
self.clearData()
def clearData(self):
self.typetags = ","
self.message = ""
def append(self, argument, typehint = None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.rawAppend(binary[1])
def rawAppend(self, data):
"""Appends raw data to the message. Use append()."""
self.message = self.message + data
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
def readString(data):
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def readBlob(data):
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def readInt(data):
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer."""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def readFloat(data):
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def OSCBlob(next):
"""Convert a string into an OSC Blob,
returning a (typetag, data) tuple."""
if type(next) == type(""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag = 'b'
else:
tag = ''
binary = ''
return (tag, binary)
def OSCArgument(next):
"""Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple."""
if type(next) == type(""):
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
binary = struct.pack(">%ds" % (OSCstringLength), next)
tag = "s"
elif type(next) == type(42.5):
binary = struct.pack(">f", next)
tag = "f"
elif type(next) == type(13):
binary = struct.pack(">i", next)
tag = "i"
else:
binary = ""
tag = ""
return (tag, binary)
def parseArgs(args):
"""Given a list of strings, produces a list
where those strings have been parsed (where
possible) as floats or integers."""
parsed = []
for arg in args:
print arg
arg = arg.strip()
interpretation = None
try:
interpretation = float(arg)
if string.find(arg, ".") == -1:
interpretation = int(interpretation)
except:
# Oh - it was a string.
interpretation = arg
pass
parsed.append(interpretation)
return parsed
def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
table = {"i":readInt, "f":readFloat, "s":readString, "b":readBlob}
decoded = []
address, rest = readString(data)
typetags = ""
if address == "#bundle":
time, rest = readLong(rest)
# decoded.append(address)
# decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest) > 0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags[0] == ",":
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print "Oops, typetag lacks the magic ,"
return decoded
class CallbackManager:
"""This utility class maps OSC addresses to callables.
The CallbackManager calls its callbacks with a list
of decoded OSC arguments, including the address and
the typetags as the first two arguments."""
def __init__(self):
self.callbacks = {}
self.add(self.unbundler, "#bundle")
def handle(self, data, source = None):
"""Given OSC data, tries to call the callback with the
right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source)
def dispatch(self, message, source = None):
"""Sends decoded OSC data to an appropriate calback"""
msgtype = ""
try:
if type(message[0]) == str:
# got a single message
address = message[0]
self.callbacks[address](message)
elif type(message[0]) == list:
for msg in message:
self.dispatch(msg)
except KeyError, key:
print 'address %s not found, %s: %s' % (address, key, message)
pprint.pprint(message)
except IndexError, e:
print '%s: %s' % (e, message)
pass
except None, e:
print "Exception in", address, "callback :", e
return
def add(self, callback, name):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[name]
else:
self.callbacks[name] = callback
def unbundler(self, messages):
"""Dispatch the messages in a decoded bundle."""
# first two elements are #bundle and the time tag, rest are messages.
for message in messages[2:]:
self.dispatch(message)
if __name__ == "__main__":
hexDump("Welcome to the OSC testing program.")
print
message = OSCMessage()
message.setAddress("/foo/play")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
hexDump(message.getBinary())
print "Making and unmaking a message.."
strings = OSCMessage()
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
hexDump(raw)
print "Retrieving arguments..."
data = raw
for i in range(6):
text, data = readString(data)
print text
number, data = readFloat(data)
print number
number, data = readFloat(data)
print number
number, data = readInt(data)
print number
hexDump(raw)
print decodeOSC(raw)
print decodeOSC(message.getBinary())
print "Testing Blob types."
blob = OSCMessage()
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
hexDump(blob.getBinary())
print decodeOSC(blob.getBinary())
def printingCallback(*stuff):
sys.stdout.write("Got: ")
for i in stuff:
sys.stdout.write(str(i) + " ")
sys.stdout.write("\n")
print "Testing the callback manager."
c = CallbackManager()
c.add(printingCallback, "/print")
c.handle(message.getBinary())
message.setAddress("/print")
c.handle(message.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
c.handle(print1.getBinary())
bundle = OSCMessage()
bundle.setAddress("")
bundle.append("#bundle")
bundle.append(0)
bundle.append(0)
bundle.append(print1.getBinary(), 'b')
bundle.append(print1.getBinary(), 'b')
bundlebinary = bundle.message
print "sending a bundle to the callback manager"
c.handle(bundlebinary)
|
|
#!/usr/bin/python
import sys
import string
import hashlib
import os
import random
import struct
import getpass
import datetime
import json
import requests #pip install requests
import traceback
import subprocess
from datetime import timedelta
from Crypto.Cipher import AES
from pybitcoin import BitcoinPrivateKey, make_op_return_tx, BlockchainInfoClient, send_to_address
from OpenSSL import crypto, SSL
from ecdsa import SigningKey
#For pybitcoin download and install from:
#https://github.com/blockstack/pybitcoin.git
art = r'''
____ _ _ _____ _____ _
| _ \| | | | / ____/ ____| |
| |_) | | ___ ___| | _______| (___| (___ | |
| _ <| |/ _ \ / __| |/ /______\___ \\___ \| |
| |_) | | (_) | (__| < ____) |___) | |____
|____/|_|\___/ \___|_|\_\ |_____/_____/|______|
Block-SSL - SSL/TLS Certificate Authority Replacement
through the BitCoin Blockchain
Thesis Project - Aristotle University of Thessaloniki
By Cr0wTom
------------------------------------------------------
'''
def identityCreation():
print art
print "\nIdentity Creation Script - Block SSL\n"
keybaseCheck()
ans1 = raw_input("Do you own a Keybase.io account? [Y]es [N]o, default: [Y]\n")
if ans1 == "Y" or ans1 == "y" or ans1 == "" or ans1 == " ":
os.system("keybase version") #check for keybase version
print "Checking for Updates...\n"
os.system("echo 3 | keybase update check >/dev/null 2>&1") #check for keybase updates without terminal output
os.system("keybase login") #login to keybase through terminal
else:
os.system("keybase version")
print "Checking for Updates...\n"
os.system('echo 3 | keybase update check >/dev/null 2>&1')
os.system("keybase signup") #signup to keybase through terminal
ex = raw_input("Do you already own a BitCoin address that you want to use? [Y]es [N]o, default: [Y]")
if ex == "Y" or ex == "y" or ex == "" or ex == " ":
gen_priv = raw_input("Which is your private key? (in hexadecimal format)\n") #Private Key of the owner
gen_priv = BitcoinPrivateKey(gen_priv)
print "Saving to Generation_Private.pem file..."
open("Generation_Private.pem", "w").write(gen_priv.to_pem()) #Saving to file
print "Generating \"Generation\" Public Key..."
gen_pub = gen_priv.public_key() #Generate the "Generation" public key from gen_priv
print "Saving to Generation_Public.pem file..."
open("Generation_Public.pem", "w").write(gen_pub.to_pem()) #Saving to file
print "Public/Private key pair creation:"
print "Warning: This is a pseudo-random generation."
print "Warning: If you want complete randomness consider other ways of Public/Private key pair generation."
else:
print "Public/Private key pair creation:"
print "Warning: This is a pseudo-random generation."
print "Warning: If you want complete randomness consider other ways of Public/Private key pair generation.\n"
print "Generating \"Generation\" Private Key..."
gen_priv = BitcoinPrivateKey() #Generate the "Generation" private key
print "Saving to Generation_Private.pem file..."
open("Generation_Private.pem", "w").write(gen_priv.to_pem()) #Saving to file
print "Generating \"Generation\" Public Key..."
gen_pub = gen_priv.public_key() #Generate the "Generation" public key from gen_priv
print "Saving to Generation_Public.pem file..."
open("Generation_Public.pem", "w").write(gen_pub.to_pem()) #Saving to file
print "Generating \"Certificate\" Private Key..."
cert_priv = BitcoinPrivateKey() #Generate the "Certificate" private key
print "Saving to Certificate_Private.pem file..."
open("Certificate_Private.pem", "w").write(cert_priv.to_pem()) #Saving to file
print "Generating \"Certificate\" Public Key..."
cert_pub = cert_priv.public_key() #Generate the "Certificate" public key from cert_priv
print "Saving to Certificate_Public.pem file..."
open("Certificate_Public.pem", "w").write(cert_pub.to_pem()) #Saving to file
print "Generating \"Revocation\" Private Key..."
rev_priv = BitcoinPrivateKey() #Generate the "Revocation" private key
print "Saving to Revocation_Private.pem file..."
open("Revocation_Private.pem", "w").write(rev_priv.to_pem()) #Saving to file
print "Generating \"Revocation\" Public Key..."
rev_pub = rev_priv.public_key() #Generate the "Revocation" public key from rev_priv
print "Saving to Revocation_Public.pem file..."
open("Revocation_Public.pem", "w").write(rev_pub.to_pem()) #Saving to file
open("Gen_Address.txt", "w").write(gen_pub.address()) #save the addresses to make the hash later
open("Rev_Address.txt", "w").write(rev_pub.address())
open("Cert_Address.txt", "w").write(cert_pub.address()) #save it for the transaction
print "\nYour addresses are:"
print "\nGeneration Address: ", gen_pub.address()
print "\nCertificate Address: ", cert_pub.address()
print "\nRevocation Address: ", rev_pub.address()
certadd = cert_pub.address()
os.system("echo 3 | keybase currency add --force " + certadd + " >/dev/null 2>&1") #add cert address to keybase account
print "Certificate address added to your keybase.io account.\n"
print "\nPlease load your Generation and Revocation addresses with some satoshis."
print "\nWarning: Please keep your Revocation address secret!"
ans = raw_input("Do you want to encrypt your private key files? [Y]es [N]o, default: [Y]")
if ans == "Y" or ans == "y" or ans == "" or ans == " ":
password = getpass.getpass("Give a strong password: ") #Ask for encryption password
key = hashlib.sha256(password).digest()
encrypt_file(key, "Generation_Private.pem")
os.remove("Generation_Private.pem")
encrypt_file(key, "Certificate_Private.pem")
os.remove("Certificate_Private.pem")
encrypt_file(key, "Revocation_Private.pem")
os.remove("Revocation_Private.pem")
sys.exit()
else:
sys.exit()
def identityUpdate():
print art
print "\nIdentity Update Script - Block SSL\n"
keybaseCheck()
print "Which way do you want to update your digital Identity - Generation Address? default: [1]\n"
print "\t1. Generate Social Media Proof\n"
print "\t2. DNS Proof\n"
print "\t3. Add PGP Key\n"
print "\t4. Create a new PGP Key\n"
print "\t5. Follow a user\n"
ans1 = raw_input()
if ans1 == "1" or ans1 == "" or ans1 == " ":
print "\nSelect the service you want to Proof:\n"
print "\t1. Facebook\n"
print "\t2. GitHub\n"
print "\t3. Twitter\n"
print "\t4. HackerNews\n"
print "\t5. Reddit\n"
ans2 = raw_input()
if ans2 == "1":
service = "facebook "
username = raw_input("\nGive your Facebook Username: ")
s = True
elif ans2 == "2":
service = "github "
username = raw_input("\nGive your GitHub Username: ")
s = True
elif ans2 == "3":
service = "twitter "
username = raw_input("\nGive your Twitter Username: ")
s = True
elif ans2 == "4":
service = "hackernews "
username = raw_input("\nGive your HackerNews Username: ")
s = True
elif ans2 == "5":
service = "reddit "
username = raw_input("\nGive your Reddit Username: ")
s = True
else:
s = False
if s == True:
command = "keybase prove " + service + username
os.system(command)
else:
print "\nPlease run the script again, with a valid option."
sys.exit()
elif ans1 == "2":
dns = raw_input("\nGive your DNS: ")
dns = "keybase prove dns " + dns
os.system(dns)
elif ans1 == "3":
raw_input("Plase your private pgp key to pgp.txt file, in the scripts directory, and press Enter.")
os.system("keybase pgp import -i pgp.txt")
elif ans1 == "4":
print "\nCreating a new key pair:"
print "Warning: This is a pseudo-random generation.\n"
os.system("keybase pgp gen")
elif ans1 == "5":
user = raw_input("\nGive the users username: ")
user = "keybase track " + user
os.system(user)
else:
print "\nPlease run the script again, with a valid option."
sys.exit()
#generate new proof keybase prove
sys.exit()
def identityCheck():
#checks for the personal keybase identity of the user
print art
keybaseCheck()
print "Keybase Followers: \n"
os.system("keybase list-followers")
print "\nKeybase Following: \n"
os.system("keybase list-following")
print "\nKeybase Devices: \n"
os.system("keybase device list")
print "\nGeneral User Info: \n"
os.system("keybase id")
print "\n"
sys.exit()
def certificateCreation():
print art
print "\nCertificate Creation Script - Block SSL\n"
# create a key pair
print "Creating a new key pair:"
print "Warning: This is a pseudo-random generation.\n"
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
createCert(k, cert)
open("certificate.crt", "wt").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open("keys.key", "wt").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
print "\nCertificate created in file: certificate.crt"
print "\nKeys saved in file: keys.key\n"
ans2 = raw_input("Do you have available satoshis in your Generation address? [Y]es [N]o, default: [Y]")
if ans2 == "Y" or ans2 == "y" or ans2 == "" or ans2 == " ":
#Opening Generation private key from pem file
if os.path.isfile('./Generation_Private.pem'):
print "\nGeneration Private Key file exists."
sk = SigningKey.from_pem(open("Generation_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
elif os.path.isfile('./Generation_Private.pem.enc'):
print "\nGeneration Private Key encoded file exists."
decrypt_file(key, "Generation_Private.pem.enc")
print "\nDecrypting Generation Private Key..."
print "Saving to Generation_Private.pem..."
sk = SigningKey.from_pem(open("Generation_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
else:
print "\nGeneration Private Key does not exist."
print "\nPlease place the file in the script directory or run -i option for a new key pair.\n"
sys.exit()
try:
recipient_address = open("Cert_Address.txt", "rb").read()
blockchain_client = BlockchainInfoClient("dacc6a40-1b8f-4dbb-afc7-bc9657603e83")
send_to_address(recipient_address, 164887, sk, blockchain_client) #make a ~10$ transactrion to cert address
print "\nWait at least 20 minutes, and run the script with option -s to send the certificate to the blockchain."
except Exception:
print "\nNo balance in your Generation address.\n"
print "Please load some bitcoins in order to submit your certificate.\n"
else:
print "Please load your Generation address and run the script again."
sys.exit()
def certificateUpdate():
print art
print "\nCertificate Update Script - Block SSL\n"
# create a key pair or use the old one
ans = raw_input("Do you have your old keys.key file with your key pair? [Y]es [N]o, default: [Y]\n")
if ans == "n" or ans == "N":
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
print "Creating a new key pair:"
print "Warning: This is a pseudo-random generation.\n"
else:
print "Place your keys.key file in the scripts directory.\n"
k = crypto.PKey()
with open("keys.key", "r") as k:
k = crypto.load_privatekey(crypto.FILETYPE_PEM, k.read())
# create a self-signed cert
cert = crypto.X509()
createCert(k, cert)
open("certificate.crt", "wt").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
print "\nCertificate created in file: certificate.crt"
if ans == "n" or ans == "N":
open("keys.key", "wt").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
print "\nKeys saved in file: keys.key\n"
ans2 = raw_input("Do you want to send your certificate to the blockchain? [Y]es [N]o, default: [Y]")
if ans2 == "Y" or ans2 == "y" or ans2 == "" or ans2 == " ":
i = 2
sendCertificate(i)
sys.exit()
def certificateRevocation():
print art
print "\nCertificate Revocation Script - Block SSL\n"
print "In which of your addresses do you still have access? default: [1]\n"
print "\t1. All of the addresses. (Generation, Certificate, Revocation)"
print "\t2. Only Certificate address.\n"
print "\t3. Only Revocation address.\n"
print "\t4. Revocation and Generation addresses.\n"
ans = raw_input()
blockchain_client = BlockchainInfoClient("dacc6a40-1b8f-4dbb-afc7-bc9657603e83")
if ans == "1" or ans == "" or ans == " " or ans == "2":
address = open("Cert_Address.txt", "r").read()
address = address.strip()
url = "https://blockchain.info/balance?format=json&active=" + address
r = requests.get(url)
try:
balance = r.json()[address]
balance = str(balance)
x = 1
i = 19
final_balance = ""
while x == 1:
if balance[i] == ",":
x += 1
else:
final_balance = final_balance + balance[i]
i += 1
print " Your Certificate address balance is: " + final_balance
#Opening Generation private key from pem file
if os.path.isfile('./Certificate_Private.pem'):
print "\nCertificate Private Key file exists."
sk = SigningKey.from_pem(open("Certificate_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
elif os.path.isfile('./Certificate_Private.pem.enc'):
print "\nCertificate Private Key encoded file exists."
decrypt_file(key, "Certificate_Private.pem.enc")
print "\nDecrypting Certificate Private Key..."
print "Saving to Certificate_Private.pem..."
sk = SigningKey.from_pem(open("Certificate_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
else:
print "\nCertificate Private Key does not exist."
print "\nPlease place the .pem file in the script directory.\n"
sys.exit()
except ValueError, e:
raise Exception('Invalid response from blockchain.info.')
if ans == "1" or ans == "" or ans == " ":
recepient_address = open("Gen_Address.txt", "rb").read()
ans3 = raw_input("Which is your revocation reason?\n")
size = len(ans3)
while size > 75:
print "String too long for OP_RETURN transaction, please repeat.\n"
ans3 = raw_input("Which is your revocation reason?\n")
size = len(ans3)
data = "R1: " + ans3
else:
recepient_address = raw_input("Give the address that you want to sent the certificate balance, for revocation purposes:\n")
data = "R2: No access to Generation address"
#todo - check if the address is correct
try:
tx = make_op_return_tx(data, sk, blockchain_client, fee=1000, format='bin')
broadcast_transaction(tx, blockchain_client)
final_balance = final_balance - 1000
send_to_address(recipient_address, final_balance, sk, blockchain_client)
except Exception:
print "\nNo balance in your Certificate address.\n"
print "If the Certificate address has 0 balance, it has been already been revoced.\n"
elif ans == "3" or ans == "4":
if os.path.isfile('./Revocation_Private.pem'):
print "\nRevocation Private Key file exists."
sk = SigningKey.from_pem(open("Revocation_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
elif os.path.isfile('./Revocation_Private.pem.enc'):
print "\nRevocation Private Key encoded file exists."
decrypt_file(key, "Revocation_Private.pem.enc")
print "\nDecrypting Revocation Private Key..."
print "Saving to Revocation_Private.pem..."
sk = SigningKey.from_pem(open("Revocation_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
else:
print "\nRevocation Private Key does not exist."
print "\nPlease place the .pem file in the script directory.\n"
sys.exit()
if os.path.isfile('./Cert_Address.txt'):
recepient_address = open("Cert_Address.txt", "rb").read()
else:
print "\nCert_Address.txt does not exist."
recepient_address = raw_input("Give the Certificate address of your certificate, for the Extreme revocation transaction:\n")
if ans == "3":
data = "ER1: No Access to Generation and Certificate address"
else:
data = "ER2: No Access to Certificate address"
#send all the balance to given address address
print "\nYour revocation reason is: ", data
print "\nAdding revocation reason to OP_RETURN..."
try:
send_to_address(recipient_address, 10000, sk, blockchain_client)
tx = make_op_return_tx(data, sk, blockchain_client, fee=1000, format='bin')
broadcast_transaction(tx, blockchain_client)
except Exception:
print "\nNo balance in your Revocation address.\n"
print "Please load some bitcoins in order to submit your revocation reason.\n"
sys.exit()
def createCert(k, cert):
# create a self-signed cert
country = raw_input("Country Name (2 letter code): ")
cert.get_subject().C = country
state = raw_input("State or Province Name (full name): ")
cert.get_subject().ST = state
local = raw_input("Locality Name (eg, city): ")
cert.get_subject().L = local
org = raw_input("Organization Name (eg, company): ")
cert.get_subject().O = org
orgu = raw_input("Organizational Unit Name (eg, section): ")
cert.get_subject().OU = orgu
cn = raw_input("Common Name (eg, fully qualified host name): ")
cert.get_subject().CN = cn
email = raw_input("email Address: ")
cert.get_subject().emailAddress = email
cert.set_serial_number(1000) #todo - take the last number from merkle tree
cert.gmtime_adj_notBefore(0)
now = datetime.datetime.now() #setting the time right now
tr = 0
while tr == 0:
an = int(raw_input("For how long do you need to update the certificate in days? (maximum: 365)\n"))
if an < 366 and an > 0:
cert.gmtime_adj_notAfter(60*60*24*an)
tr += 1
else:
print "Please give a number smaller than 366.\n"
tr = 0
diff = datetime.timedelta(an)
future = now + diff
print future.strftime("\nYour certificate expires on %m/%d/%Y") #print the expiration date
print "\nAdding the GE and RV signatures to the issuer field..."
message_gen = open("Gen_Address.txt", "rb").read()
m1 = hashlib.sha256()
m1.update(message_gen)
m1 = m1.hexdigest()
message_rev = open("Rev_Address.txt", "rb").read()
m2 = hashlib.sha256()
m2.update(message_rev)
m2 = m2.hexdigest()
cert.get_issuer().CN = m1 #Generation address at the CN issuer field
cert.get_issuer().O = m2 #Revocation address at the O issuer field
cert.set_pubkey(k)
cert.sign(k, 'sha256')
return cert
def sendCertificate(i):
if i == 1:
ans = raw_input("Do you want to Create or Update your certificate? [C]reate [U]pdate, default: [C]")
if ans == "C" or ans == "c" or ans == "" or ans == " ":
mode = "CC: "
else:
mode = "UC: "
elif i == 2:
mode = "UC: "
#Hashing of the certificate
f = open("certificate.crt", "rb") #read file in binary mode
fr = f.read()
cert_hash = hashlib.sha256() #use the SHA256 hashing algorithm
cert_hash.update(fr)
data = cert_hash.hexdigest()
print "\nYour Certificate hash is: ", data
data = mode + data
print "\nAdding to OP_RETURN..."
#Opening Generation private key from pem file
if os.path.isfile('./Certificate_Private.pem'):
print "\nCertificate Private Key file exists."
sk = SigningKey.from_pem(open("Certificate_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
elif os.path.isfile('./Certificate_Private.pem.enc'):
print "\nCertificate Private Key encoded file exists."
decrypt_file(key, "Certificate_Private.pem.enc")
print "\nDecrypting Certificate Private Key..."
print "Saving to Certificate_Private.pem..."
sk = SigningKey.from_pem(open("Certificate_Private.pem").read())
sk_string = sk.to_string()
sk = str(sk_string)
sk = sk.encode("hex")
else:
print "\nCertificate Private Key does not exist."
print "\nPlease place the file in the script directory or run -i option for a new key pair.\n"
sys.exit()
try:
blockchain_client = BlockchainInfoClient("dacc6a40-1b8f-4dbb-afc7-bc9657603e83")
tx = make_op_return_tx(data, sk, blockchain_client, fee=10000, format='bin')
broadcast_transaction(tx, blockchain_client)
except Exception:
print "\nNo balance in your Certificate address.\n"
print "Please first run the -cc or the -u script.\n"
def encrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):
#Thanks to Eli Bendersky: https://eli.thegreenplace.net/2010/06/25/aes-encryption-of-files-in-python-with-pycrypto/
if not out_filename:
out_filename = in_filename + '.enc'
iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
def decrypt_file(key, in_filename, out_filename=None, chunksize=24*1024):
#Thanks to Eli Bendersky: https://eli.thegreenplace.net/2010/06/25/aes-encryption-of-files-in-python-with-pycrypto/
if not out_filename:
out_filename = os.path.splitext(in_filename)[0]
with open(in_filename, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_filename, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
def keybaseCheck():
name = "keybase"
try: #check if keybase exists
devnull = open(os.devnull)
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
print "\tKeybase exists.\n"
except OSError as e: #install keybase - os specific
if e.errno == os.errno.ENOENT:
if sys.platform == "linux" or sys.platform == "linux2": #todo - currently only ubuntu based .deb files
print "Downloading Keybase.dmg"
os.system("curl -O https://prerelease.keybase.io/keybase_amd64.deb")
print "Type your SuperUser Password:\n"
os.system("sudo dpkg -i keybase_amd64.deb")
os.system("sudo apt-get install -f")
elif sys.platform == "win32": #all Windows versions - run with powershell
print "Downloading Keybase.exe\n"
#download and run of the installation .exe with powershell
subprocess.call(["C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe", "$down = New-Object System.Net.WebClient; $url = 'https://prerelease.keybase.io/keybase_setup_386.exe'; $file = 'keybase_setup_386.exe'; $down.DownloadFile($url,$file); $exec = New-Object -com shell.application; $exec.shellexecute($file); exit;"])
elif sys.platform == "darwin": #all OSX versions
print "Downloading Keybase.dmg"
os.system("curl -O https://prerelease.keybase.io/Keybase.dmg")
print "Type your SuperUser Password:\n"
os.system("sudo hdiutil attach keybase.dmg")
os.system("sudo cp -ir /Volumes/Keybase/Keybase.app /Applications")
def main(argu):
try:
if argu[1] == "--help" or argu[1] == "-h":
#Option to helo with the usage of the script
print art
print "Usage: \"Block_SSL.py <option>\""
print "\nFor a list of options use the --list option."
print "\n"
elif argu[1] == "--list":
#List of available options that the user can use
print art
print "Usage: \"Block_SSL.py <option>\""
print "This is the list of options you can use with Block_SSL. \n"
print "\t -i\t Identity Creation"
print "\t -i\t Identity Update"
print "\t -ii\t Identity Check"
print "\t -cc\t Certificate Creation"
print "\t -u\t Certificate Update"
print "\t -r\t Certificate Revocation"
print "\t -s\t Send Certificate"
print "\t -d\t Decrypt Private Key files"
print "\n"
elif argu[1] == "-i":
#Identity Creation Script
identityCreation()
elif argu[1] == "-iu":
#Identity Update Script
identityUpdate()
elif argu[1] == "-ii":
#Identity Check Script
identityCheck()
elif argu[1] == "-cc":
#Certificate Creation Script
certificateCreation()
elif argu[1] == "-u":
#Certificate Update Script
certificateUpdate()
elif argu[1] == "-r":
#Certificate Revocation Script
certificateRevocation()
elif argu[1] == "-s":
#Certificate Revocation Script
i = 1
sendCertificate(i)
elif argu[1] == "-d":
print art
#Private Key Decryption Script
password = getpass.getpass("Give your password: ") #Ask for encryption password
key = hashlib.sha256(password).digest()
#Generation Private key decryption
if os.path.isfile('./Generation_Private.pem.enc'):
print "\n Generation Private Key exists."
decrypt_file(key, "Generation_Private.pem.enc")
print "\nDecrypting Generation Private Key..."
print "Saving to Generation_Private.pem..."
else:
print "\n Generation Private Key does not exist."
#Certificate Private key decryption
if os.path.isfile('./Certificate_Private.pem.enc'):
print "\n Certificate Private Key exists."
decrypt_file(key, "Certificate_Private.pem.enc")
print "\nDecrypting Certificate Private Key..."
print "Saving to Certificate_Private.pem..."
else:
print "\n Certificate Private Key does not exist."
#Revocation Private key decryption
if os.path.isfile('./Revocation_Private.pem.enc'):
print "\n Revocation Private Key exists."
decrypt_file(key, "Revocation_Private.pem.enc")
print "\nDecrypting Revocation Private Key..."
print "Saving to Revocation_Private.pem..."
else:
print "\n Revocation Private Key does not exist."
else:
print "\nUsage: \"Block_SSL.py <option>\""
print "\nFor a list of options use the --list option."
print "\nFor help use the --help or -h option."
print "\n"
except IndexError:
print "\nUsage: \"Block_SSL.py <option>\""
print "\nFor a list of options use the --list option."
print "\nFor help use the --help or -h option."
print "\n"
if __name__ == "__main__":
main(sys.argv)
|
|
# jsb.plugs.common/twitter.py
#
#
""" a twitter plugin for the JSONBOT. uses tweepy oauth. """
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.pdol import Pdol
from jsb.utils.textutils import html_unescape
from jsb.utils.generic import waitforqueue, strippedtxt, splittxt
from jsb.lib.persist import PlugPersist
from jsb.utils.twitter import twitterapi, twittertoken
from jsb.lib.datadir import getdatadir
from jsb.lib.jsbimport import _import_byfile
## tweppy imports
from jsb.contrib.tweepy.auth import OAuthHandler
from jsb.contrib.tweepy.api import API
from jsb.contrib.tweepy import oauth
from jsb.contrib.tweepy.error import TweepError
from jsb.contrib.tweepy.models import Status, User
from jsb.contrib import tweepy
go = True
## basic imports
import os
import urllib2
import types
import logging
## credentials
def getcreds(datadir):
try:
mod = _import_byfile("credentials", datadir + os.sep + "config" + os.sep + "credentials.py")
except (IOError, ImportError):
logging.info("the twitter plugin needs the credentials.py file in the %s/config dir. see %s/examples" % (datadir, datadir))
return (None, None)
return mod.CONSUMER_KEY, mod.CONSUMER_SECRET
## defines
auth = None
def getauth(datadir):
global auth
if auth: return auth
key, secret = getcreds(datadir)
auth = OAuthHandler(key, secret)
return auth
## functions
def postmsg(username, txt):
try:
result = splittxt(txt, 139)
twitteruser = TwitterUsers("users")
key, secret = getcreds(getdatadir())
token = twittertoken(key, secret, twitteruser, username)
if not token:
raise TweepError("Can't get twitter token")
twitter = twitterapi(key, secret, token)
for txt in result:
status = twitter.update_status(txt)
logging.info("logged %s tweets for %s" % (len(result), username))
except TweepError, ex: logging.error("twitter - error: %s" % str(ex))
return len(result)
## classes
class TwitterUsers(PlugPersist):
def add(self, user, token):
user = user.strip().lower()
self.data[user] = token
self.save()
def remove(self, user):
user = user.strip().lower()
if user in self.data:
del self.data[user]
self.save()
def size(self):
return len(self.data)
def __contains__(self, user):
user = user.strip().lower()
return user in self.data
## commands
def handle_twitter(bot, ievent):
""" send a twitter message. """
if not go:
ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples")
return
if not ievent.rest:
ievent.missing('<text>')
return
else:
try:
nritems = postmsg(ievent.user.data.name, ievent.rest)
ievent.reply("%s tweet posted" % nritems)
except TweepError, ex:
if "token" in str(ex): ievent.reply("you are not registered yet.. use !twitter-auth")
except (TweepError, urllib2.HTTPError), e:
ievent.reply('twitter failed: %s' % (str(e),))
cmnds.add('twitter', handle_twitter, ['USER', 'GUEST'])
examples.add('twitter', 'adds a message to your twitter account', 'twitter just found the http://gozerbot.org project')
def handle_twittercmnd(bot, ievent):
""" do a twitter API cmommand. """
if not go:
ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data//config dir. see .jsb/data/examples")
return
if not ievent.args:
ievent.missing('<text>')
return
target = strippedtxt(ievent.args[0])
try:
twitteruser = TwitterUsers("users")
token = twitteruser.data.get(ievent.user.data.name)
if not token:
ievent.reply("you are not logged in yet .. run the twitter-auth command.")
return
key, secret = getcreds(getdatadir())
token = oauth.OAuthToken(key, secret).from_string(token)
twitter = twitterapi(key, secret, token)
cmndlist = dir(twitter)
cmnds = []
for cmnd in cmndlist:
if cmnd.startswith("_") or cmnd == "auth":
continue
else:
cmnds.append(cmnd)
if target not in cmnds:
ievent.reply("choose one of: %s" % ", ".join(cmnds))
return
try:
method = getattr(twitter, target)
except AttributeError:
ievent.reply("choose one of: %s" % ", ".join(cmnds))
return
# do the thing
result = method()
res = []
for item in result:
try:
res.append("%s - %s" % (item.screen_name, item.text))
except AttributeError:
try:
res.append("%s - %s" % (item.screen_name, item.description))
except AttributeError:
try:
res.append(unicode(item.__getstate__()))
except AttributeError:
res.append(dir(i))
res.append(unicode(item))
ievent.reply("result of %s: " % target, res)
except KeyError:
#handle_exception()
ievent.reply('you are not logged in yet. see the twitter-auth command.')
except (TweepError, urllib2.HTTPError), e:
ievent.reply('twitter failed: %s' % (str(e),))
cmnds.add('twitter-cmnd', handle_twittercmnd, 'OPER')
examples.add('twitter-cmnd', 'do a cmnd on the twitter API', 'twitter-cmnd home_timeline')
def handle_twitter_confirm(bot, ievent):
""" confirm auth with PIN. """
if not go:
ievent.reply("the twitter plugin needs the credentials.py file in the %s/config dir. see .jsb/data/examples" % getdatadir())
return
pin = ievent.args[0]
if not pin:
ievent.missing("<PIN> .. see the twitter-auth command.")
return
try:
access_token = getauth(getdatadir()).get_access_token(pin)
except (TweepError, urllib2.HTTPError), e:
ievent.reply('twitter failed: %s' % (str(e),))
return
twitteruser = TwitterUsers("users")
twitteruser.add(ievent.user.data.name, access_token.to_string())
ievent.reply("access token saved.")
cmnds.add('twitter-confirm', handle_twitter_confirm, ['USER', 'GUEST'])
examples.add('twitter-confirm', 'confirm your twitter account', '1) twitter-confirm 6992762')
def handle_twitter_auth(bot, ievent):
""" get auth url. """
if not go:
ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples")
return
try:
auth_url = getauth(getdatadir()).get_authorization_url()
except (TweepError, urllib2.HTTPError), e:
ievent.reply('twitter failed: %s' % (str(e),))
return
if bot.type == "irc":
bot.say(ievent.nick, "sign in at %s" % auth_url)
bot.say(ievent.nick, "use the provided code in the twitter-confirm command.")
else:
ievent.reply("sign in at %s" % auth_url)
ievent.reply("use the provided code in the twitter-confirm command.")
cmnds.add('twitter-auth', handle_twitter_auth, ['USER', 'GUEST'])
examples.add('twitter-auth', 'adds your twitter account', '1) twitter-auth')
def handle_twitterfriends(bot, ievent):
""" do a twitter API cmommand. """
if not go:
ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples")
return
try:
twitteruser = TwitterUsers("users")
token = twitteruser.data.get(ievent.user.data.name)
if not token:
ievent.reply("you are not logged in yet .. run the twitter-auth command.")
return
key , secret = getcreds(getdatadir())
token = oauth.OAuthToken(key, secret).from_string(token)
twitter = twitterapi(key, secret, token)
method = getattr(twitter, "friends_timeline")
# do the thing
result = method()
res = []
for item in result:
try:
res.append("%s - %s" % (item.author.screen_name, item.text))
#logging.warn("twitter - %s" % dir(item.author))
#res.append(unicode(item.__getstate__()))
except Exception, ex:
handle_exception()
ievent.reply("results: ", res)
except KeyError:
#handle_exception()
ievent.reply('you are not logged in yet. see the twitter-auth command.')
except (TweepError, urllib2.HTTPError), e:
ievent.reply('twitter failed: %s' % (str(e),))
cmnds.add('twitter-friends', handle_twitterfriends, ['USER', 'GUEST'])
examples.add('twitter-friends', 'show your friends_timeline', 'twitter-friends')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_user
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage local user accounts on Juniper JUNOS devices
description:
- This module manages locally configured user accounts on remote
network devices running the JUNOS operating system. It provides
a set of arguments for creating, removing and updating locally
defined accounts
extends_documentation_fragment: junos
options:
aggregate:
description:
- The C(aggregate) argument defines a list of users to be configured
on the remote device. The list of users will be compared against
the current users and only changes will be added or removed from
the device configuration. This argument is mutually exclusive with
the name argument. alias C(users).
version_added: "2.4"
required: False
default: null
name:
description:
- The C(name) argument defines the username of the user to be created
on the system. This argument must follow appropriate usernaming
conventions for the target device running JUNOS. This argument is
mutually exclusive with the C(aggregate) argument.
required: false
default: null
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
required: false
default: null
role:
description:
- The C(role) argument defines the role of the user account on the
remote system. User accounts can have more than one role
configured.
required: false
choices: ['operator', 'read-only', 'super-user', 'unauthorized']
sshkey:
description:
- The C(sshkey) argument defines the public SSH key to be configured
for the user account on the remote system. This argument must
be a valid SSH key
required: false
default: null
purge:
description:
- The C(purge) argument instructs the module to consider the
users definition absolute. It will remove any previously configured
users on the device with the exception of the current defined
set of aggregate.
required: false
default: false
state:
description:
- The C(state) argument configures the state of the user definitions
as it relates to the device operational configuration. When set
to I(present), the user should be configured in the device active
configuration and when set to I(absent) the user should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
choices: [True, False]
version_added: "2.4"
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
"""
EXAMPLES = """
- name: create new user account
junos_user:
name: ansible
role: super-user
sshkey: "{{ lookup('file', '~/.ssh/ansible.pub') }}"
state: present
- name: remove a user account
junos_user:
name: ansible
state: absent
- name: remove all user accounts except ansible
junos_user:
aggregate:
- name: ansible
purge: yes
- name: Create list of users
junos_user:
aggregate:
- {name: test_user1, full_name: test_user2, role: operator, state: present}
- {name: test_user2, full_name: test_user2, role: read-only, state: present}
- name: Delete list of users
junos_user:
aggregate:
- {name: test_user1, full_name: test_user2, role: operator, state: absent}
- {name: test_user2, full_name: test_user2, role: read-only, state: absent}
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit system login]
+ user test-user {
+ uid 2005;
+ class read-only;
+ }
"""
from functools import partial
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.netconf import send_request
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.junos import commit_configuration, discard_changes
from ansible.module_utils.junos import load_config, locked_config
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement, tostring
except ImportError:
from xml.etree.ElementTree import Element, SubElement, tostring
ROLES = ['operator', 'read-only', 'super-user', 'unauthorized']
USE_PERSISTENT_CONNECTION = True
def handle_purge(module, want):
want_users = [item['name'] for item in want]
element = Element('system')
login = SubElement(element, 'login')
reply = send_request(module, Element('get-configuration'), ignore_warning=False)
users = reply.xpath('configuration/system/login/user/name')
if users:
for item in users:
name = item.text
if name not in want_users and name != 'root':
user = SubElement(login, 'user', {'operation': 'delete'})
SubElement(user, 'name').text = name
if element.xpath('/system/login/user/name'):
return element
def map_obj_to_ele(module, want):
element = Element('system')
login = SubElement(element, 'login')
for item in want:
if item['state'] != 'present':
if item['name'] == 'root':
module.fail_json(msg="cannot delete the 'root' account.")
operation = 'delete'
else:
operation = 'merge'
user = SubElement(login, 'user', {'operation': operation})
SubElement(user, 'name').text = item['name']
if operation == 'merge':
if item['active']:
user.set('active', 'active')
else:
user.set('inactive', 'inactive')
if item['role']:
SubElement(user, 'class').text = item['role']
if item.get('full_name'):
SubElement(user, 'full-name').text = item['full_name']
if item.get('sshkey'):
auth = SubElement(user, 'authentication')
ssh_rsa = SubElement(auth, 'ssh-rsa')
key = SubElement(ssh_rsa, 'name').text = item['sshkey']
return element
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
aggregate = module.params['aggregate']
if not aggregate:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='missing required argument: name')
else:
collection = [{'name': module.params['name']}]
else:
collection = list()
for item in aggregate:
if not isinstance(item, dict):
collection.append({'username': item})
elif 'name' not in item:
module.fail_json(msg='missing required argument: name')
else:
collection.append(item)
objects = list()
for item in collection:
get_value = partial(get_param_value, item=item, module=module)
item.update({
'full_name': get_value('full_name'),
'role': get_value('role'),
'sshkey': get_value('sshkey'),
'state': get_value('state'),
'active': get_value('active')
})
for key, value in iteritems(item):
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
objects.append(item)
return objects
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
full_name=dict(),
role=dict(choices=ROLES),
sshkey=dict(),
state=dict(choices=['present', 'absent'], default='present'),
active=dict(type='bool', default=True)
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['collection', 'users']),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(junos_argument_spec)
mutually_exclusive = [['aggregate', 'name']]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
ele = map_obj_to_ele(module, want)
purge_request = None
if module.params['purge']:
purge_request = handle_purge(module, want)
with locked_config(module):
if purge_request:
load_config(module, tostring(purge_request), warnings, action='replace')
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
|
|
from datetime import datetime
import httplib as http
import logging
import json
import time
import urlparse
import httpretty
from nose.tools import * # noqa
import pytz
from oauthlib.oauth2 import OAuth2Error
from framework.auth import authenticate
from framework.exceptions import PermissionsError, HTTPError
from framework.sessions import session
from website.oauth.models import (
ExternalAccount,
ExternalProvider,
OAUTH1,
OAUTH2,
)
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase
from osf_tests.factories import (
AuthUserFactory,
ExternalAccountFactory,
MockOAuth2Provider,
UserFactory,
)
SILENT_LOGGERS = ['oauthlib', 'requests_oauthlib']
for logger in SILENT_LOGGERS:
logging.getLogger(logger).setLevel(logging.ERROR)
class MockOAuth1Provider(ExternalProvider):
_oauth_version = OAUTH1
name = "Mock OAuth 1.0a Provider"
short_name = "mock1a"
client_id = "mock1a_client_id"
client_secret = "mock1a_client_secret"
auth_url_base = "http://mock1a.com/auth"
request_token_url = "http://mock1a.com/request"
callback_url = "http://mock1a.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
def _prepare_mock_oauth2_handshake_response(expires_in=3600):
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body=json.dumps({
'access_token': 'mock_access_token',
'expires_at': time.time() + expires_in,
'expires_in': expires_in,
'refresh_token': 'mock_refresh_token',
'scope': ['all'],
'token_type': 'bearer',
}),
status=200,
content_type='application/json',
)
def _prepare_mock_500_error():
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body='{"error": "not found"}',
status=503,
content_type='application/json',
)
def _prepare_mock_401_error():
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body='{"error": "user denied access"}',
status=401,
content_type='application/json',
)
class TestExternalAccount(OsfTestCase):
# Test the ExternalAccount object and associated views.
#
# Functionality not specific to the OAuth version used by the
# ExternalProvider should go here.
def setUp(self):
super(TestExternalAccount, self).setUp()
self.user = AuthUserFactory()
self.provider = MockOAuth2Provider()
def test_disconnect(self):
# Disconnect an external account from a user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.add(external_account)
self.user.save()
# If the external account isn't attached, this test has no meaning
assert_equal(ExternalAccount.find().count(), 1)
assert_in(
external_account,
self.user.external_accounts.all(),
)
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# external_account.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts.all(),
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
def test_disconnect_with_multiple_connected(self):
# Disconnect an account connected to multiple users from one user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.add(external_account)
self.user.save()
other_user = UserFactory()
other_user.external_accounts.add(external_account)
other_user.save()
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts.all(),
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
other_user.reload()
# External account is still associated with the other user
assert_in(
external_account,
other_user.external_accounts.all(),
)
class TestExternalProviderOAuth1(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 1.0a
def setUp(self):
super(TestExternalProviderOAuth1, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth1Provider()
@httpretty.activate
def test_start_flow(self):
# Request temporary credentials from provider, provide auth redirect
httpretty.register_uri(httpretty.POST, 'http://mock1a.com/request',
body='{"oauth_token_secret": "temp_secret", '
'"oauth_token": "temp_token", '
'"oauth_callback_confirmed": "true"}',
status=200,
content_type='application/json')
with self.app.app.test_request_context('/oauth/connect/mock1a/'):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# The URL to which the user would be redirected
assert_equal(url, "http://mock1a.com/auth?oauth_token=temp_token")
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_equal(creds['token'], 'temp_token')
assert_equal(creds['secret'], 'temp_secret')
@httpretty.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# mock a successful call to the provider to exchange temp keys for
# permanent keys
httpretty.register_uri(
httpretty.POST,
'http://mock1a.com/callback',
body=(
'oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true'
),
)
user = UserFactory()
# Fake a request context for the callback
ctx = self.app.app.test_request_context(
path='/oauth/callback/mock1a/',
query_string='oauth_token=temp_key&oauth_verifier=mock_verifier',
)
with ctx:
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'token': 'temp_key',
'secret': 'temp_secret',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.objects.first()
assert_equal(account.oauth_key, 'perm_token')
assert_equal(account.oauth_secret, 'perm_secret')
assert_equal(account.provider_id, 'mock_provider_id')
assert_equal(account.provider_name, 'Mock OAuth 1.0a Provider')
@httpretty.activate
def test_callback_wrong_user(self):
# Reject temporary credentials not assigned to the user
#
# This prohibits users from associating their external account with
# another user's OSF account by using XSS or similar attack vector to
# complete the OAuth flow using the logged-in user but their own account
# on the external service.
#
# If the OSF were to allow login via OAuth with the provider in question,
# this would allow attackers to hijack OSF accounts with a simple script
# injection.
# mock a successful call to the provider to exchange temp keys for
# permanent keys
httpretty.register_uri(
httpretty.POST,
'http://mock1a.com/callback',
body='oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true',
)
user = UserFactory()
account = ExternalAccountFactory(
provider="mock1a",
provider_name='Mock 1A',
oauth_key="temp_key",
oauth_secret="temp_secret"
)
account.save()
# associate this ExternalAccount instance with the user
user.external_accounts.add(account)
user.save()
malicious_user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock1a/",
query_string="oauth_token=temp_key&oauth_verifier=mock_verifier"
):
# make sure the user is logged in
authenticate(user=malicious_user, access_token=None, response=None)
with assert_raises(PermissionsError):
# do the key exchange
self.provider.auth_callback(user=malicious_user)
class TestExternalProviderOAuth2(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 2.0
def setUp(self):
super(TestExternalProviderOAuth2, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth2Provider()
def test_oauth_version_default(self):
# OAuth 2.0 is the default version
assert_is(self.provider._oauth_version, OAUTH2)
def test_start_flow(self):
# Generate the appropriate URL and state token
with self.app.app.test_request_context("/oauth/connect/mock2/"):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_in('state', creds)
# The URL to which the user would be redirected
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
# check parameters
assert_equal(
params,
{
'state': [creds['state']],
'response_type': ['code'],
'client_id': [self.provider.client_id],
'redirect_uri': [
web_url_for('oauth_callback',
service_name=self.provider.short_name,
_absolute=True)
]
}
)
# check base URL
assert_equal(
url.split("?")[0],
"https://mock2.com/auth",
)
@httpretty.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.objects.first()
assert_equal(account.oauth_key, 'mock_access_token')
assert_equal(account.provider_id, 'mock_provider_id')
@httpretty.activate
def test_provider_down(self):
# Create a 500 error
_prepare_mock_500_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
with assert_raises(HTTPError) as error_raised:
self.provider.auth_callback(user=user)
assert_equal(
error_raised.exception.code,
503,
)
@httpretty.activate
def test_user_denies_access(self):
# Create a 401 error
_prepare_mock_401_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="error=mock_error&code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
assert_false(self.provider.auth_callback(user=user))
@httpretty.activate
def test_multiple_users_associated(self):
# Create only one ExternalAccount for multiple OSF users
#
# For some providers (ex: GitHub), the act of completing the OAuth flow
# revokes previously generated credentials. In addition, there is often no
# way to know the user's id on the external service until after the flow
# has completed.
#
# Having only one ExternalAccount instance per account on the external
# service means that connecting subsequent OSF users to the same external
# account will not invalidate the credentials used by the OSF for users
# already associated.
user_a = UserFactory()
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
user_a.external_accounts.add(external_account)
user_a.save()
user_b = UserFactory()
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user_b, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user_b)
user_a.reload()
user_b.reload()
external_account.reload()
assert_equal(
list(user_a.external_accounts.values_list('pk', flat=True)),
list(user_b.external_accounts.values_list('pk', flat=True)),
)
assert_equal(
ExternalAccount.find().count(),
1
)
@httpretty.activate
def test_force_refresh_oauth_key(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200).replace(tzinfo=pytz.utc)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'access_token': 'refreshed_access_token',
'expires_in': 3600,
'refresh_token': 'refreshed_refresh_token'
})
)
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=True)
external_account.reload()
assert_equal(external_account.oauth_key, 'refreshed_access_token')
assert_equal(external_account.refresh_token, 'refreshed_refresh_token')
assert_not_equal(external_account.expires_at, old_expiry)
assert_true(external_account.expires_at > old_expiry)
@httpretty.activate
def test_does_need_refresh(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200).replace(tzinfo=pytz.utc),
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'access_token': 'refreshed_access_token',
'expires_in': 3600,
'refresh_token': 'refreshed_refresh_token'
})
)
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=False)
external_account.reload()
assert_equal(external_account.oauth_key, 'refreshed_access_token')
assert_equal(external_account.refresh_token, 'refreshed_refresh_token')
assert_not_equal(external_account.expires_at, old_expiry)
assert_true(external_account.expires_at > old_expiry)
@httpretty.activate
def test_does_not_need_refresh(self):
self.provider.refresh_time = 1
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
refresh_token='old_refresh',
expires_at=datetime.utcfromtimestamp(time.time() + 200).replace(tzinfo=pytz.utc),
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
# .reload() has the side effect of rounding the microsends down to 3 significant figures
# (e.g. DT(YMDHMS, 365420) becomes DT(YMDHMS, 365000)),
# but must occur after possible refresh to reload tokens.
# Doing so before allows the `old_expiry == EA.expires_at` comparison to work.
external_account.reload()
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=False)
external_account.reload()
assert_equal(external_account.oauth_key, 'old_key')
assert_equal(external_account.refresh_token, 'old_refresh')
assert_equal(external_account.expires_at, old_expiry)
@httpretty.activate
def test_refresh_oauth_key_does_not_need_refresh(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() + 9999).replace(tzinfo=pytz.utc)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
self.provider.account = external_account
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_with_broken_provider(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200).replace(tzinfo=pytz.utc)
)
self.provider.client_id = None
self.provider.client_secret = None
self.provider.account = external_account
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_without_account_or_refresh_url(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() + 200).replace(tzinfo=pytz.utc)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_with_expired_credentials(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 10000).replace(tzinfo=pytz.utc) # Causes has_expired_credentials to be True
)
self.provider.account = external_account
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_force_refresh_with_expired_credentials(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 10000).replace(tzinfo=pytz.utc) # Causes has_expired_credentials to be True
)
self.provider.account = external_account
# mock a failing call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'error': 'invalid_grant',
}),
status=401
)
with assert_raises(OAuth2Error):
self.provider.refresh_oauth_key(force=True)
|
|
import urllib, urllib2, collections, hmac, binascii, time, random
from hashlib import sha1
from flask import Flask, redirect, request, session, render_template
#Twitter native module was used to ease the post tweet/fetch tweet actions, due to lack of time.
#Can generate a well formed JSON request and read the same, alternatively.
from twitter import *
#Twitter APP Variables
consumer_key = "OUR_CONSUMER_KEY"
consumer_secret = "OUR_CONSUMER_SECRET"
app = Flask(__name__, static_url_path='')
@app.route('/')
def root():
access_token = session.get('access_token')
if access_token is None:
return render_template('index.html')
access_token = access_token[0]
return render_template('loggedin.html')
@app.route('/authenticate')
def authenticate():
#Clear the existing session variables.
session.clear()
session['oauth_secret'] = ''
requestParams = {
"oauth_callback" : "http://127.0.0.1:5000/authorised", "oauth_consumer_key" : consumer_key,
"oauth_nonce" : str(random.randint(1, 999999999)), "oauth_signature_method" : "HMAC-SHA1", "oauth_timestamp" : int(time.time()),
"oauth_version" : "1.0"
}
receivedSig = signatureRequest(requestParams, "POST", "https://api.twitter.com/oauth/request_token")
requestParams["oauth_signature"] = receivedSig
request = urllib2.Request("https://api.twitter.com/oauth/request_token", "")
request.add_header("Authorization", formulateOauthHeaders(requestParams))
try:
httpResponse = urllib2.urlopen(request)
except urllib2.HTTPError, e:
return e.read()
responseData = fetchParams(httpResponse.read())
session['oauth_token'] = responseData['oauth_token']
session['oauth_secret'] = responseData['oauth_token_secret']
return redirect("https://api.twitter.com/oauth/authorize?oauth_token=" + session['oauth_token'])
@app.route('/authorised')
def authorised():
if request.args.get('oauth_token', '') == session['oauth_token']:
verifyRequestParams = {
"oauth_consumer_key" : consumer_key, "oauth_nonce" : str(random.randint(1, 999999999)), "oauth_signature_method" : "HMAC-SHA1", "oauth_timestamp" : int(time.time()), "oauth_version" : "1.0",
"oauth_token" : session['oauth_token']
}
signVerification = signatureRequest(verifyRequestParams, "POST", "https://api.twitter.com/oauth/access_token")
verifyRequestParams["oauth_signature"] = signVerification
verifyRequest = urllib2.Request("https://api.twitter.com/oauth/access_token", "oauth_verifier=" + request.args.get('oauth_verifier'))
verifyRequest.add_header("Authorization", formulateOauthHeaders(verifyRequestParams))
try:
httpResponse = urllib2.urlopen(verifyRequest)
except urllib2.HTTPError, e:
return e.read()
responseData = fetchParams(httpResponse.read())
#TODO: Flash some relavant message if user denies request? Currently Sign IN block is shown.
#if responseData is None:
# flash('You denied the request to sign in.')
# return redirect(next_url)
# SAVE some data
session['oauth_token'] = responseData["oauth_token"]
session['oauth_token_secret'] = responseData["oauth_token_secret"]
session['screen_name'] = responseData["screen_name"]
twitter = Twitter(
auth = OAuth(responseData['oauth_token'], responseData['oauth_token_secret'], consumer_key, consumer_secret))
session['results']= twitter.statuses.user_timeline(screen_name = session['screen_name'], count= 10)
session['hasNewTweet'] = 'false'
return render_template('loggedin.html')
@app.route('/tweets/', methods=['POST'])
def tweets():
new_status = request.form['yourstatus']
f_outh_token = request.form['o_t']
f_outh_token_secret = request.form['o_t_s']
f_screen_name = request.form['screen_name']
print("Token:"+f_outh_token+" Message:"+ new_status)
# Let's wrap our request in a try block, to handle 403/401 errors, eg. duplicate tweets that twitter disallows.
# TODO: Handle individual error types and messages accordingly. Currently only generic failure message shown.
try:
twitter = Twitter(
auth = OAuth(f_outh_token, f_outh_token_secret, consumer_key, consumer_secret))
post_response = twitter.statuses.update(status = new_status)
session['hasNewTweet'] = 'true'
except:
session['hasErrors'] = 'true'
#Show results in any case.
#limit tweet count to 10
session['results'] = twitter.statuses.user_timeline(screen_name = f_screen_name , count= 10)
session['screen_name'] = f_screen_name
session['oauth_token'] = f_outh_token
session['oauth_token_secret'] = f_outh_token_secret
return render_template('loggedin.html')
def fetchParams(paramString):
paramString = paramString.split("&")
pDict = {}
for parameter in paramString:
parameter = parameter.split("=")
pDict[parameter[0]] = parameter[1]
return pDict
def signatureRequest(parameters, method, baseURL):
baseURL = urllib.quote(baseURL, '')
p = collections.OrderedDict(sorted(parameters.items(), key=lambda t: t[0]))
requestString = method + "&" + baseURL + "&"
parameterString = ""
for idx, key in enumerate(p.keys()):
paramString = key + "=" + urllib.quote(str(p[key]), '')
if idx < len(p.keys()) - 1:
paramString += "&"
parameterString += paramString
result = requestString + urllib.quote(parameterString, '')
signingKey = consumer_secret + "&" + session['oauth_secret']
print signingKey
hashed = hmac.new(signingKey, result, sha1)
signature = binascii.b2a_base64(hashed.digest())[:-1]
return signature
def formulateOauthHeaders(oauthParams):
oauthp = collections.OrderedDict(sorted(oauthParams.items(), key=lambda t: t[0]))
headerString = "OAuth "
for idx, key in enumerate(oauthp):
hString = key + "=\"" + urllib.quote(str(oauthp[key]), '') + "\""
if idx < len(oauthp.keys()) - 1:
hString += ","
headerString += hString
return headerString
if __name__ == '__main__':
app.secret_key = 'I57DOMCRypy08r3ph2cK3yf0R5267o0P'
app.run(host='0.0.0.0', debug=True)
|
|
from __future__ import annotations
from typing import Any, Optional, List, TYPE_CHECKING
import cgi
import collections
import json
import logging
import sys
import irc
import regex as re
import requests
if TYPE_CHECKING:
from pajbot.bot import Bot
from pajbot.managers.schedule import ScheduleManager
log = logging.getLogger(__name__)
class ActionParser:
bot: Optional[Bot] = None
@staticmethod
def parse(raw_data=None, data=None, command=""):
try:
from pajbot.userdispatch import UserDispatch
Dispatch = UserDispatch
except ImportError:
from pajbot.dispatch import Dispatch
except:
log.exception("Something went wrong while attemting to import Dispatch, this should never happen")
sys.exit(1)
if not data:
data = json.loads(raw_data)
if data["type"] == "say":
action = SayAction(data["message"], ActionParser.bot)
elif data["type"] == "me":
action = MeAction(data["message"], ActionParser.bot)
elif data["type"] == "whisper":
action = WhisperAction(data["message"], ActionParser.bot)
elif data["type"] == "reply":
action = ReplyAction(data["message"], ActionParser.bot)
elif data["type"] == "func":
try:
action = FuncAction(getattr(Dispatch, data["cb"]))
except AttributeError as e:
log.error(f'AttributeError caught when parsing action for action "{command}": {e}')
return None
elif data["type"] == "multi":
action = MultiAction(data["args"], data["default"])
else:
raise Exception(f"Unknown action type: {data['type']}")
return action
class IfSubstitution:
def __call__(self, key, extra={}):
if self.sub.key is None:
msg = MessageAction.get_argument_value(extra.get("message", ""), self.sub.argument - 1)
if msg:
return self.get_true_response(extra)
return self.get_false_response(extra)
res = self.sub.cb(self.sub.key, extra)
if res:
return self.get_true_response(extra)
return self.get_false_response(extra)
def get_true_response(self, extra):
return apply_substitutions(self.true_response, self.true_subs, self.bot, extra)
def get_false_response(self, extra):
return apply_substitutions(self.false_response, self.false_subs, self.bot, extra)
def __init__(self, key, arguments, bot):
self.bot = bot
subs = get_substitutions(key, bot)
if len(subs) == 1:
self.sub = list(subs.values())[0]
else:
subs = get_argument_substitutions(key)
if len(subs) == 1:
self.sub = subs[0]
else:
self.sub = None
self.true_response = arguments[0][2:-1] if arguments else "Yes"
self.false_response = arguments[1][2:-1] if len(arguments) > 1 else "No"
self.true_subs = get_substitutions(self.true_response, bot)
self.false_subs = get_substitutions(self.false_response, bot)
class SubstitutionFilter:
def __init__(self, name: str, arguments: List[str]):
self.name = name
self.arguments = arguments
class Substitution:
argument_substitution_regex = re.compile(r"\$\((\d+)\)")
substitution_regex = re.compile(
r'\$\(([a-z_]+)(\;[0-9]+)?(\:[\w\.\/ -]+|\:\$\([\w_:;\._\/ -]+\))?(\|[\w]+(\([\w%:/ +-.]+\))?)*(\,[\'"]{1}[\w \|$;_\-:()\.]+[\'"]{1}){0,2}\)'
)
# https://stackoverflow.com/a/7109208
urlfetch_substitution_regex = re.compile(r"\$\(urlfetch ([A-Za-z0-9\-._~:/?#\[\]@!$%&\'()*+,;=]+)\)")
urlfetch_substitution_regex_all = re.compile(r"\$\(urlfetch (.+?)\)")
def __init__(self, cb, needle, key=None, argument=None, filters: List[SubstitutionFilter] = []):
self.cb = cb
self.key = key
self.argument = argument
self.filters = filters
self.needle = needle
class BaseAction:
type: Optional[str] = None
subtype: Optional[str] = None
def reset(self):
pass
class MultiAction(BaseAction):
type = "multi"
def __init__(self, args, default=None, fallback=None):
from pajbot.models.command import Command
self.commands = {}
self.default = default
self.fallback = fallback
for command in args:
cmd = Command.from_json(command)
for alias in command["command"].split("|"):
if alias not in self.commands:
self.commands[alias] = cmd
else:
log.error(f"Alias {alias} for this multiaction is already in use.")
import copy
self.original_commands = copy.copy(self.commands)
def reset(self):
import copy
self.commands = copy.copy(self.original_commands)
def __iadd__(self, other):
if other is not None and other.type == "multi":
self.commands.update(other.commands)
return self
@classmethod
def ready_built(cls, commands, default=None, fallback=None):
"""Useful if you already have a dictionary
with commands pre-built.
"""
multiaction = cls(args=[], default=default, fallback=fallback)
multiaction.commands = commands
import copy
multiaction.original_commands = copy.copy(commands)
return multiaction
def run(self, bot, source, message, event={}, args={}):
"""If there is more text sent to the multicommand after the
initial alias, we _ALWAYS_ assume it's trying the subaction command.
If the extra text was not a valid command, we try to run the fallback command.
In case there's no extra text sent, we will try to run the default command.
"""
cmd = None
if message:
msg_lower_parts = message.lower().split(" ")
command = msg_lower_parts[0]
cmd = self.commands.get(command, None)
extra_msg = " ".join(message.split(" ")[1:])
if cmd is None and self.fallback:
cmd = self.commands.get(self.fallback, None)
extra_msg = message
elif self.default:
command = self.default
cmd = self.commands.get(command, None)
extra_msg = None
if cmd:
if source.level >= cmd.level:
return cmd.run(bot, source, extra_msg, event, args)
log.info(f"User {source} tried running a sub-command he had no access to ({command}).")
return None
class FuncAction(BaseAction):
type = "func"
def __init__(self, cb):
self.cb = cb
def run(self, bot, source, message, event={}, args={}):
try:
return self.cb(bot, source, message, event, args)
except:
log.exception("Uncaught exception in FuncAction")
class RawFuncAction(BaseAction):
type = "rawfunc"
def __init__(self, cb):
self.cb = cb
def run(self, bot, source, message, event={}, args={}):
return self.cb(bot=bot, source=source, message=message, event=event, args=args)
def get_argument_substitutions(string: str) -> List[Substitution]:
"""
Returns a list of `Substitution` objects that are found in the passed `string`.
Will not return multiple `Substitution` objects for the same number.
This means string "$(1) $(1) $(2)" will only return two Substitutions.
"""
argument_substitutions: List[Substitution] = []
for sub_key in Substitution.argument_substitution_regex.finditer(string):
needle = sub_key.group(0)
argument_num = int(sub_key.group(1))
found = False
for sub in argument_substitutions:
if sub.argument == argument_num:
# We already matched this argument variable
found = True
break
if found:
continue
argument_substitutions.append(Substitution(None, needle=needle, argument=argument_num))
return argument_substitutions
def get_substitution_arguments(sub_key):
sub_string = sub_key.group(0)
path = sub_key.group(1)
argument = sub_key.group(2)
if argument is not None:
argument = int(argument[1:])
key = sub_key.group(3)
if key is not None:
key = key[1:]
matched_filters = sub_key.captures(4)
matched_filter_arguments = sub_key.captures(5)
filters: List[SubstitutionFilter] = []
filter_argument_index = 0
for f in matched_filters:
f = f[1:]
filter_arguments: List[str] = []
if "(" in f:
f = f[: -len(matched_filter_arguments[filter_argument_index])]
filter_arguments = [matched_filter_arguments[filter_argument_index][1:-1]]
filter_argument_index += 1
f = SubstitutionFilter(f, filter_arguments)
filters.append(f)
if_arguments = sub_key.captures(6)
return sub_string, path, argument, key, filters, if_arguments
def get_substitutions(string: str, bot: Bot) -> dict[str, Substitution]:
"""
Returns a dictionary of `Substitution` objects thare are found in the passed `string`.
Will not return multiple `Substitution` objects for the same string.
This means "You have $(source:points) points xD $(source:points)" only returns one Substitution.
"""
substitutions = collections.OrderedDict()
for sub_key in Substitution.substitution_regex.finditer(string):
sub_string, path, argument, key, filters, if_arguments = get_substitution_arguments(sub_key)
if sub_string in substitutions:
# We already matched this variable
continue
try:
if path == "if":
if if_arguments:
if_substitution = IfSubstitution(key, if_arguments, bot)
if if_substitution.sub is None:
continue
sub = Substitution(if_substitution, needle=sub_string, key=key, argument=argument, filters=filters)
substitutions[sub_string] = sub
except:
log.exception("BabyRage")
method_mapping = {}
try:
method_mapping["kvi"] = bot.get_kvi_value
method_mapping["tb"] = bot.get_value
method_mapping["lasttweet"] = bot.get_last_tweet
# "etm" is legacy
method_mapping["etm"] = bot.get_emote_epm
method_mapping["epm"] = bot.get_emote_epm
method_mapping["etmrecord"] = bot.get_emote_epm_record
method_mapping["epmrecord"] = bot.get_emote_epm_record
method_mapping["ecount"] = bot.get_emote_count
method_mapping["source"] = bot.get_source_value
method_mapping["user"] = bot.get_user_value
method_mapping["usersource"] = bot.get_usersource_value
method_mapping["time"] = bot.get_time_value
method_mapping["date"] = bot.get_date_value
method_mapping["datetimefromisoformat"] = bot.get_datetimefromisoformat_value
method_mapping["datetime"] = bot.get_datetime_value
method_mapping["curdeck"] = bot.decks.action_get_curdeck
method_mapping["stream"] = bot.stream_manager.get_stream_value
method_mapping["current_stream"] = bot.stream_manager.get_current_stream_value
method_mapping["last_stream"] = bot.stream_manager.get_last_stream_value
method_mapping["current_song"] = bot.get_current_song_value
method_mapping["args"] = bot.get_args_value
method_mapping["strictargs"] = bot.get_strictargs_value
method_mapping["command"] = bot.get_command_value
method_mapping["broadcaster"] = bot.get_broadcaster_value
except AttributeError:
pass
for sub_key in Substitution.substitution_regex.finditer(string):
sub_string, path, argument, key, filters, if_arguments = get_substitution_arguments(sub_key)
if sub_string in substitutions:
# We already matched this variable
continue
if path in method_mapping:
sub = Substitution(method_mapping[path], needle=sub_string, key=key, argument=argument, filters=filters)
substitutions[sub_string] = sub
return substitutions
def get_urlfetch_substitutions(string, all=False):
substitutions = {}
if all:
r = Substitution.urlfetch_substitution_regex_all
else:
r = Substitution.urlfetch_substitution_regex
for sub_key in r.finditer(string):
substitutions[sub_key.group(0)] = sub_key.group(1)
return substitutions
def is_message_good(bot, message, extra):
# this is imported here to avoid circular imports
# (Circular import was command.py importing this file)
from pajbot.modules.ascii import AsciiProtectionModule
checks = {
"banphrase": lambda: bot.banphrase_manager.check_message(message, extra["source"]),
"ascii": lambda: AsciiProtectionModule.check_message(message),
"massping": lambda: bot.module_manager.get_module("massping").check_message(message, extra["source"]),
}
for check_name, check_fn in checks.items():
# Make sure the module is enabled
if check_name not in bot.module_manager:
continue
# apply the check fn
# only if the result is False the check was successful
if check_fn() is not False:
log.info(f'Not sending message "{message}" because check "{check_name}" failed.')
return False
return True
class MessageAction(BaseAction):
type = "message"
def __init__(self, response: str, bot: Optional[Bot]):
self.response = response
self.argument_subs: List[Substitution] = []
self.subs: dict[str, Substitution] = {}
self.num_urlfetch_subs = 0
if bot:
self.argument_subs = get_argument_substitutions(self.response)
self.subs = get_substitutions(self.response, bot)
self.num_urlfetch_subs = len(get_urlfetch_substitutions(self.response, all=True))
@staticmethod
def get_argument_value(message, index):
if not message:
return ""
msg_parts = message.split(" ")
try:
return msg_parts[index]
except:
pass
return ""
def get_response(self, bot: Bot, extra) -> Optional[str]:
resp = self.response
resp = apply_substitutions(resp, self.subs, bot, extra)
if resp is None:
return None
for sub in self.argument_subs:
needle = sub.needle
value = str(MessageAction.get_argument_value(extra["message"], sub.argument - 1))
resp = resp.replace(needle, value)
log.debug(f"Replacing {needle} with {value}")
if "command" in extra and extra["command"].run_through_banphrases is True and "source" in extra:
if not is_message_good(bot, resp, extra):
return None
return resp
@staticmethod
def get_extra_data(source, message, args):
return {"source": source, "message": message, **args}
def run(self, bot, source, message, event={}, args={}):
raise NotImplementedError("Please implement the run method.")
def urlfetch_msg(method, message, num_urlfetch_subs, bot, extra={}, args=[], kwargs={}):
urlfetch_subs = get_urlfetch_substitutions(message)
if len(urlfetch_subs) > num_urlfetch_subs:
log.error(f"HIJACK ATTEMPT {message}")
return False
for needle, url in urlfetch_subs.items():
headers = {
"Accept": "text/plain",
"Accept-Language": "en-US, en;q=0.9, *;q=0.5",
"User-Agent": bot.user_agent,
}
r = requests.get(url, allow_redirects=True, headers=headers)
if r.status_code == requests.codes.ok:
# For "legacy" reasons, we don't check the content type of ok status codes
value = r.text.strip().replace("\n", "").replace("\r", "")[:400]
else:
# An error code was returned, ensure the response is plain text
content_type = r.headers["Content-Type"]
if content_type is not None and cgi.parse_header(content_type)[0] != "text/plain":
# The content type is not plain text, return a generic error showing the status code returned
value = f"urlfetch error {r.status_code}"
else:
value = r.text.strip().replace("\n", "").replace("\r", "")[:400]
message = message.replace(needle, value)
if "command" in extra and extra["command"].run_through_banphrases is True and "source" in extra:
if not is_message_good(bot, message, extra):
return None
args.append(message)
method(*args, **kwargs)
class SayAction(MessageAction):
subtype = "say"
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if self.num_urlfetch_subs == 0:
return bot.say(resp)
return ScheduleManager.execute_now(
urlfetch_msg,
args=[],
kwargs={
"args": [],
"kwargs": {},
"method": bot.say,
"bot": bot,
"extra": extra,
"message": resp,
"num_urlfetch_subs": self.num_urlfetch_subs,
},
)
class MeAction(MessageAction):
subtype = "me"
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if self.num_urlfetch_subs == 0:
return bot.me(resp)
return ScheduleManager.execute_now(
urlfetch_msg,
args=[],
kwargs={
"args": [],
"kwargs": {},
"method": bot.me,
"bot": bot,
"extra": extra,
"message": resp,
"num_urlfetch_subs": self.num_urlfetch_subs,
},
)
class WhisperAction(MessageAction):
subtype = "whisper"
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if self.num_urlfetch_subs == 0:
return bot.whisper(source, resp)
return ScheduleManager.execute_now(
urlfetch_msg,
args=[],
kwargs={
"args": [source],
"kwargs": {},
"method": bot.whisper,
"bot": bot,
"extra": extra,
"message": resp,
"num_urlfetch_subs": self.num_urlfetch_subs,
},
)
class ReplyAction(MessageAction):
subtype = "reply"
def run(self, bot, source, message, event={}, args={}):
extra = self.get_extra_data(source, message, args)
resp = self.get_response(bot, extra)
if not resp:
return False
if irc.client.is_channel(event.target):
if self.num_urlfetch_subs == 0:
return bot.say(resp, channel=event.target)
return ScheduleManager.execute_now(
urlfetch_msg,
args=[],
kwargs={
"args": [],
"kwargs": {"channel": event.target},
"method": bot.say,
"bot": bot,
"extra": extra,
"message": resp,
"num_urlfetch_subs": self.num_urlfetch_subs,
},
)
if self.num_urlfetch_subs == 0:
return bot.whisper(source, resp)
return ScheduleManager.execute_now(
urlfetch_msg,
args=[],
kwargs={
"args": [source],
"kwargs": {},
"method": bot.whisper,
"bot": bot,
"extra": extra,
"message": resp,
"num_urlfetch_subs": self.num_urlfetch_subs,
},
)
def apply_substitutions(text, substitutions: dict[Any, Substitution], bot: Bot, extra):
for needle, sub in substitutions.items():
if sub.key and sub.argument:
param = sub.key
extra["argument"] = MessageAction.get_argument_value(extra["message"], sub.argument - 1)
elif sub.key:
param = sub.key
elif sub.argument:
param = MessageAction.get_argument_value(extra["message"], sub.argument - 1)
else:
log.error("Unknown param for response.")
continue
value: Any = sub.cb(param, extra)
if value is None:
return None
try:
for f in sub.filters:
value = bot.apply_filter(value, f)
except:
log.exception("Exception caught in filter application")
if value is None:
return None
text = text.replace(needle, str(value))
return text
|
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utils for Federated Reconstruction training and evaluation."""
from typing import Callable, Iterable, Optional, Tuple
import attr
import tensorflow as tf
import tensorflow_federated as tff
from reconstruction import reconstruction_model
# Type alias for a function that takes in a TF dataset and tf.int64 round number
# and produces two TF datasets. The first is iterated over during reconstruction
# and the second is iterated over post-reconstruction, for both training and
# evaluation. This can be useful for e.g. splitting the dataset into disjoint
# halves for each stage, doing multiple local epochs of reconstruction/training,
# skipping reconstruction entirely, etc. See `build_dataset_split_fn` for
# a builder, although users can also specify their own `DatasetSplitFn`s (see
# `simple_dataset_split_fn` for an example).
DatasetSplitFn = Callable[[tf.data.Dataset, tf.Tensor], Tuple[tf.data.Dataset,
tf.data.Dataset]]
def simple_dataset_split_fn(
client_dataset: tf.data.Dataset,
round_num: tf.Tensor) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""An example of a `DatasetSplitFn` that returns the original client data.
Both the reconstruction data and post-reconstruction data will result from
iterating over the same tf.data.Dataset. Note that depending on any
preprocessing steps applied to client tf.data.Datasets, this may not produce
exactly the same data in the same order for both reconstruction and
post-reconstruction. For example, if
`client_dataset.shuffle(reshuffle_each_iteration=True)` was applied,
post-reconstruction data will be in a different order than reconstruction
data.
Args:
client_dataset: `tf.data.Dataset` representing client data.
round_num: Scalar tf.int64 tensor representing the 1-indexed round number
during training. During evaluation, this is 0.
Returns:
A tuple of two `tf.data.Datasets`, the first to be used for reconstruction,
the second to be used post-reconstruction.
"""
del round_num
return client_dataset, client_dataset
def build_dataset_split_fn(recon_epochs_max: int = 1,
recon_epochs_constant: bool = True,
recon_steps_max: Optional[int] = None,
post_recon_epochs: int = 1,
post_recon_steps_max: Optional[int] = None,
split_dataset: bool = False) -> DatasetSplitFn:
"""Builds a `DatasetSplitFn` for Federated Reconstruction training/evaluation.
Returned `DatasetSplitFn` parameterizes training and evaluation computations
and enables reconstruction for multiple local epochs (potentially as a
function of the server round number), multiple epochs of post-reconstruction
training, limiting the number of steps for both stages, and splitting client
datasets into disjoint halves for each stage.
Note that the returned function is used during both training and evaluation:
during training, "post-reconstruction" refers to training of global variables
(possibly jointly with local variables), and during evaluation, it refers to
calculation of metrics using reconstructed local variables and fixed global
variables.
Args:
recon_epochs_max: The integer maximum number of iterations over the dataset
to make during reconstruction.
recon_epochs_constant: If True, use `recon_epochs_max` as the constant
number of iterations to make during reconstruction. If False, the number
of iterations is min(round_num, recon_epochs_max).
recon_steps_max: If not None, the integer maximum number of steps (batches)
to iterate through during reconstruction. This maximum number of steps is
across all reconstruction iterations, i.e. it is applied after
`recon_epochs_max` and `recon_epochs_constant`. If None, this has no
effect.
post_recon_epochs: The integer constant number of iterations to make over
client data after reconstruction.
post_recon_steps_max: If not None, the integer maximum number of steps
(batches) to iterate through after reconstruction. This maximum number of
steps is across all post-reconstruction iterations, i.e. it is applied
after `post_recon_epochs`. If None, this has no effect.
split_dataset: If True, splits `client_dataset` in half for each user, using
even-indexed entries in reconstruction and odd-indexed entries after
reconstruction. If False, `client_dataset` is used for both reconstruction
and post-reconstruction, with the above arguments applied. If True,
splitting requires that mupltiple iterations through the dataset yield the
same ordering. For example if
`client_dataset.shuffle(reshuffle_each_iteration=True)` has been called,
then the split datasets may have overlap. If True, note that the dataset
should have more than one batch for reasonable results, since the
splitting does not occur within batches.
Returns:
A `SplitDatasetFn`.
"""
# Functions for splitting dataset if needed.
recon_condition = lambda i, entry: tf.equal(tf.math.floormod(i, 2), 0)
post_recon_condition = lambda i, entry: tf.greater(tf.math.floormod(i, 2), 0)
get_entry = lambda i, entry: entry
def dataset_split_fn(
client_dataset: tf.data.Dataset,
round_num: tf.Tensor) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""A `DatasetSplitFn` built with the given arguments.
Args:
client_dataset: `tf.data.Dataset` representing client data.
round_num: Scalar tf.int64 tensor representing the 1-indexed round number
during training. During evaluation, this is 0.
Returns:
A tuple of two `tf.data.Datasets`, the first to be used for
reconstruction, the second to be used post-reconstruction.
"""
# Split dataset if needed. This assumes the dataset has a consistent
# order across iterations.
if split_dataset:
recon_dataset = client_dataset.enumerate().filter(recon_condition).map(
get_entry)
post_recon_dataset = client_dataset.enumerate().filter(
post_recon_condition).map(get_entry)
else:
recon_dataset = client_dataset
post_recon_dataset = client_dataset
# Number of reconstruction epochs is exactly recon_epochs_max if
# recon_epochs_constant is True, and min(round_num, recon_epochs_max) if
# not.
num_recon_epochs = recon_epochs_max
if not recon_epochs_constant:
num_recon_epochs = tf.math.minimum(round_num, recon_epochs_max)
# Apply `num_recon_epochs` before limiting to a maximum number of batches
# if needed.
recon_dataset = recon_dataset.repeat(num_recon_epochs)
if recon_steps_max is not None:
recon_dataset = recon_dataset.take(recon_steps_max)
# Do the same for post-reconstruction.
post_recon_dataset = post_recon_dataset.repeat(post_recon_epochs)
if post_recon_steps_max is not None:
post_recon_dataset = post_recon_dataset.take(post_recon_steps_max)
return recon_dataset, post_recon_dataset
return dataset_split_fn
def get_global_variables(
model: reconstruction_model.ReconstructionModel
) -> tff.learning.ModelWeights:
"""Gets global variables from a `ReconstructionModel` as `ModelWeights`."""
return tff.learning.ModelWeights(
trainable=model.global_trainable_variables,
non_trainable=model.global_non_trainable_variables)
def get_local_variables(
model: reconstruction_model.ReconstructionModel
) -> tff.learning.ModelWeights:
"""Gets local variables from a `ReconstructionModel` as `ModelWeights`."""
return tff.learning.ModelWeights(
trainable=model.local_trainable_variables,
non_trainable=model.local_non_trainable_variables)
def has_only_global_variables(
model: reconstruction_model.ReconstructionModel) -> bool:
"""Returns `True` if the model has no local variables."""
local_variables_list = (
list(model.local_trainable_variables) +
list(model.local_non_trainable_variables))
if local_variables_list:
return False
return True
@attr.s(eq=False, frozen=True)
class ServerState(object):
"""Structure for state on the server during training.
Fields:
- `model`: A `tff.learning.ModelWeights` structure of the model's global
variables, both trainable and non_trainable.
- `optimizer_state`: Variables of the server optimizer.
- `round_num`: The integer training round number, 1-indexed.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
aggregator_state = attr.ib()
@attr.s(eq=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during training.
Fields:
- `weights_delta`: A dictionary of updates to the model's global trainable
variables.
- `client_weight`: Weight to be used in a weighted mean when aggregating
`weights_delta`.
- `model_output`: A structure reflecting the losses and metrics produced
during training on the input dataset.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
model_output = attr.ib()
def create_optimizer_vars(
model: reconstruction_model.ReconstructionModel,
optimizer: tf.keras.optimizers.Optimizer) -> Iterable[tf.Variable]:
"""Applies a placeholder update to optimizer to enable getting its variables."""
delta = tf.nest.map_structure(tf.zeros_like,
get_global_variables(model).trainable)
grads_and_vars = tf.nest.map_structure(
lambda x, v: (-1.0 * x, v), tf.nest.flatten(delta),
tf.nest.flatten(get_global_variables(model).trainable))
optimizer.apply_gradients(grads_and_vars, name='server_update')
return optimizer.variables()
|
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for huawei 18000 storage."""
import json
import mock
import os
import shutil
import tempfile
import time
from xml.dom import minidom
from oslo_log import log as logging
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import fc_zone_helper
from cinder.volume.drivers.huawei import huawei_driver
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import hypermetro
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
LOG = logging.getLogger(__name__)
hypermetro_devices = """{
"remote_device": {
"RestURL": "http://100.115.10.69:8082/deviceManager/rest",
"UserName": "admin",
"UserPassword": "Admin@storage1",
"StoragePool": "StoragePool001",
"domain_name": "hypermetro-domain",
"remote_target_ip": "111.111.101.241"
}
}
"""
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001#OpenStack_Pool',
'provider_location': '11',
}
fake_smartx_value = {'smarttier': 'true',
'smartcache': 'true',
'smartpartition': 'true',
'thin_provisioning_support': 'true',
'thick_provisioning_support': False,
'policy': '2',
'cachename': 'cache-test',
'partitionname': 'partition-test',
}
fake_hypermetro_opts = {'hypermetro': 'true',
'smarttier': False,
'smartcache': False,
'smartpartition': False,
'thin_provisioning_support': False,
'thick_provisioning_support': False,
}
hyper_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu@huawei#OpenStack_Pool',
'provider_location': '11',
'volume_metadata': [{'key': 'hypermetro_id',
'value': '1'},
{'key': 'remote_lun_id',
'value': '11'}],
}
test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': '11',
'volume': {"volume_id": '21ec7341-9256-497b-97d9-ef48edcf0635'},
'volume': {'provider_location': '12'},
}
test_host = {'host': 'ubuntu001@backend001#OpenStack_Pool',
'capabilities': {'smartcache': True,
'location_info': '210235G7J20000000000',
'QoS_support': True,
'pool_name': 'OpenStack_Pool',
'timestamp': '2015-07-13T11:41:00.513549',
'smartpartition': True,
'allocated_capacity_gb': 0,
'volume_backend_name': 'Huawei18000FCDriver',
'free_capacity_gb': 20.0,
'driver_version': '1.1.0',
'total_capacity_gb': 20.0,
'smarttier': True,
'hypermetro': True,
'reserved_percentage': 0,
'vendor_name': None,
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'storage_protocol': 'FC',
}
}
test_new_type = {
'name': u'new_type',
'qos_specs_id': None,
'deleted': False,
'created_at': None,
'updated_at': None,
'extra_specs': {
'smarttier': '<is> true',
'smartcache': '<is> true',
'smartpartition': '<is> true',
'thin_provisioning_support': '<is> true',
'thick_provisioning_support': '<is> False',
'policy': '2',
'smartcache:cachename': 'cache-test',
'smartpartition:partitionname': 'partition-test',
},
'is_public': True,
'deleted_at': None,
'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f',
'description': None,
}
hypermetro_devices = """
{
"remote_device": {
"RestURL": "http://100.115.10.69:8082/deviceManager/rest",
"UserName":"admin",
"UserPassword":"Admin@storage2",
"StoragePool":"StoragePool001",
"domain_name":"hypermetro_test"}
}
"""
FAKE_FIND_POOL_RESPONSE = {'CAPACITY': '985661440',
'ID': '0',
'TOTALCAPACITY': '985661440'}
FAKE_CREATE_VOLUME_RESPONSE = {"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA"}
FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['10000090fa0d6754'],
'wwnns': ['10000090fa0d6755'],
'host': 'ubuntuc',
}
smarttier_opts = {'smarttier': 'true',
'smartpartition': False,
'smartcache': False,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'policy': '3',
'readcachepolicy': '1',
'writecachepolicy': None,
}
fake_fabric_mapping = {
'swd1': {
'target_port_wwn_list': ['2000643e8c4c5f66'],
'initiator_port_wwn_list': ['10000090fa0d6754']
}
}
FAKE_CREATE_VOLUME_RESPONSE = {"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA"}
CHANGE_OPTS = {'policy': ('1', '2'),
'partitionid': (['1', 'partition001'], ['2', 'partition002']),
'cacheid': (['1', 'cache001'], ['2', 'cache002']),
'qos': (['11', {'MAXIOPS': '100', 'IOType': '1'}],
{'MAXIOPS': '100', 'IOType': '2',
'MIN': 1, 'LATENCY': 1}),
'host': ('ubuntu@huawei#OpenStack_Pool',
'ubuntu@huawei#OpenStack_Pool'),
'LUNType': ('0', '1'),
}
# A fake response of create a host
FAKE_CREATE_HOST_RESPONSE = """
{
"error": {
"code": 0
},
"data":{"NAME": "ubuntuc001",
"ID": "1"}
}
"""
# A fake response of success response storage
FAKE_COMMON_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data":{}
}
"""
# A fake response of login huawei storage
FAKE_GET_LOGIN_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"username": "admin",
"iBaseToken": "2001031430",
"deviceid": "210235G7J20000000000"
}
}
"""
# A fake response of login out huawei storage
FAKE_LOGIN_OUT_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11
}
}
"""
# A fake response of mock storage pool info
FAKE_STORAGE_POOL_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"USERFREECAPACITY": "985661440",
"ID": "0",
"NAME": "OpenStack_Pool",
"USERTOTALCAPACITY": "985661440"
}]
}
"""
# A fake response of lun or lungroup response
FAKE_LUN_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA"
}
}
"""
FAKE_LUN_GET_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "11",
"IOCLASSID": "11",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
"RUNNINGSTATUS": "2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "27",
"LUNLIST": "",
"ALLOCTYPE": "1",
"CAPACITY": "2097152",
"WRITEPOLICY": "1",
"MIRRORPOLICY": "0",
"PREFETCHPOLICY": "1",
"PREFETCHVALUE": "20",
"DATATRANSFERPOLICY": "1",
"READCACHEPOLICY": "2",
"WRITECACHEPOLICY": "5",
"OWNINGCONTROLLER": "0B",
"SMARTCACHEPARTITIONID": "",
"CACHEPARTITIONID": ""
}
}
"""
FAKE_QUERY_ALL_LUN_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": "1",
"NAME": "IexzQZJWSXuX2e9I7c8GNQ"
}]
}
"""
FAKE_LUN_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"11"
}]
}
"""
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """
{
"error": {
"code":0
},
"data":[{
"NAME":"OpenStack_LunGroup_1",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}]
}
"""
FAKE_QUERY_LUN_GROUP_RESPONSE = """
{
"error": {
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_LUN_COUNT_RESPONSE = """
{
"data":{
"COUNT":"0"
},
"error":{
"code":0,
"description":"0"
}
}
"""
# A fake response of snapshot list response
FAKE_SNAPSHOT_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": 11,
"NAME": "wr_LMKAjS7O_VtsEIREGYw"
},
{
"ID": 12,
"NAME": "SDFAJSDFLKJ"
},
{
"ID": 13,
"NAME": "s1Ew5v36To-hR2txJitX5Q"
}]
}
"""
# A fake response of create snapshot response
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get snapshot response
FAKE_GET_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get iscsi response
FAKE_GET_ISCSI_INFO_RESPONSE = """
{
"data": [{
"ETHPORTID": "139267",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:111.111.101.244",
"TPGT": "8196",
"TYPE": 249
},
{
"ETHPORTID": "139268",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:111.111.102.244",
"TPGT": "8196",
"TYPE": 249
}
],
"error": {
"code": 0,
"description": "0"
}
}
"""
# A fake response of get eth info response
FAKE_GET_ETH_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "192.168.1.2",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P0",
"MTU": "1500",
"PARENTID": "1.5"
},
{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "192.168.1.1",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P3",
"MTU": "1500",
"PARENTID": "1.5"
}]
}
"""
FAKE_GET_ETH_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"IPV4ADDR": "192.168.1.1",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
},
{
"IPV4ADDR": "192.168.1.2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
}
]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ISCSI_DEVICE_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:"
}]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ALL_HOST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 245,
"NAME": "ubuntuc",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "1",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
},
{
"PARENTTYPE": 245,
"NAME": "ubuntu",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "2",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
}]
}
"""
# A fake response of get host or hostgroup info response
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
},
{"NAME":"OpenStack_HostGroup_1",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}
]
}
"""
FAKE_GET_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data":{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}
}
"""
# A fake response of lun copy info response
FAKE_GET_LUN_COPY_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"COPYSTOPTIME": "-1",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "36",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "0",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "-1"
}
}
"""
# A fake response of lun copy list info response
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"COPYSTOPTIME": "1372209335",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "40",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "100",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "1372209329"
}]
}
"""
# A fake response of mappingview info response
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"OpenStack_Mapping_View_1",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
},
{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2",
"INBANDLUNWWN": "",
"TYPE": 245
}]
}
"""
FAKE_GET_MAPPING_VIEW_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"11",
"INBANDLUNWWN":"",
"TYPE": 245,
"AVAILABLEHOSTLUNIDLIST": ""
}]
}
"""
FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
}
}
"""
FAKE_FC_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6754",
"OPERATIONSYSTEM":"255",
"TYPE":223
},
{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6755",
"OPERATIONSYSTEM":"255",
"TYPE":223
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_HOST_LINK_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"PARENTTYPE":21,
"TARGET_ID":"0000000000000000",
"INITIATOR_NODE_WWN":"20000090fa0d6754",
"INITIATOR_TYPE":"223",
"RUNNINGSTATUS":"27",
"PARENTNAME":"ubuntuc",
"INITIATOR_ID":"10000090fa0d6754",
"TARGET_PORT_WWN":"24000022a10a2a39",
"HEALTHSTATUS":"1",
"INITIATOR_PORT_WWN":"10000090fa0d6754",
"ID":"010000090fa0d675-0000000000110400",
"TARGET_NODE_WWN":"21000022a10a2a39",
"PARENTID":"1",
"CTRL_ID":"0",
"TYPE":255,
"TARGET_TYPE":"212"
}]
}
"""
FAKE_PORT_GROUP_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":11,
"NAME": "portgroup-test"
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code": 0
},
"data":[{
"CHAPNAME": "mm-user",
"HEALTHSTATUS": "1",
"ID": "iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE": "true",
"MULTIPATHTYPE": "1",
"NAME": "",
"OPERATIONSYSTEM": "255",
"RUNNINGSTATUS": "28",
"TYPE": 222,
"USECHAP": "true"
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_ERROR_INFO_RESPONSE = """
{
"error":{
"code":31755596
}
}
"""
FAKE_ERROR_CONNECT_RESPONSE = """
{
"error":{
"code":-403
}
}
"""
FAKE_ERROR_LUN_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"ID":"11",
"IOCLASSID":"11",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"ALLOCTYPE": "0",
"DATATRANSFERPOLICY": "0",
"SMARTCACHEPARTITIONID": "0",
"CACHEPARTITIONID": "0"
}
}
"""
FAKE_GET_FC_INI_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"10000090fa0d6754",
"ISFREE":"true"
}]
}
"""
FAKE_GET_FC_PORT_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"RUNNINGSTATUS":"10",
"WWN":"2000643e8c4c5f66",
"PARENTID":"0A.1"
}]
}
"""
FAKE_SYSTEM_VERSION_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"PRODUCTVERSION": "V100R001C10"
}
}
"""
FAKE_GET_LUN_MIGRATION_RESPONSE = """
{
"data":[{"ENDTIME":"1436816174",
"ID":"9",
"PARENTID":"11",
"PARENTNAME":"xmRBHMlVRruql5vwthpPXQ",
"PROCESS":"-1",
"RUNNINGSTATUS":"76",
"SPEED":"2",
"STARTTIME":"1436816111",
"TARGETLUNID":"1",
"TARGETLUNNAME":"4924891454902893639",
"TYPE":253,
"WORKMODE":"0"
}],
"error":{"code":0,
"description":"0"}
}
"""
FAKE_GET_FC_INI_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"10000090fa0d6754",
"ISFREE":"true"
}]
}
"""
FAKE_HYPERMETRODOMAIN_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"PRODUCTVERSION": "V100R001C10",
"ID": "11",
"NAME": "hypermetro_test",
"RUNNINGSTATUS": "42"
}
}
"""
FAKE_QOS_INFO_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"ID": "11"
}
}
"""
FAKE_GET_FC_PORT_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"RUNNINGSTATUS":"10",
"WWN":"2000643e8c4c5f66",
"PARENTID":"0A.1"
}]
}
"""
FAKE_SMARTCACHEPARTITION_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"ID":"11",
"NAME":"cache-name"
}
}
"""
FAKE_CONNECT_FC_RESPONCE = {
"driver_volume_type": 'fibre_channel',
"data": {
"target_wwn": ["10000090fa0d6754"],
"target_lun": "1",
"volume_id": "21ec7341-9256-497b-97d9-ef48edcf0635"
}
}
FAKE_METRO_INFO_RESPONCE = {
"error": {
"code": 0
},
"data": {
"PRODUCTVERSION": "V100R001C10",
"ID": "11",
"NAME": "hypermetro_test",
"RUNNINGSTATUS": "42"
}
}
# mock login info map
MAP_COMMAND_TO_FAKE_RESPONSE = {}
MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions'] = (
FAKE_GET_LOGIN_STORAGE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/sessions'] = (
FAKE_LOGIN_OUT_STORAGE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/POST'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION?range=[0-100]/GET'] = (
FAKE_GET_LUN_MIGRATION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock storage info map
MAP_COMMAND_TO_FAKE_RESPONSE['/storagepool'] = (
FAKE_STORAGE_POOL_RESPONSE)
# mock lun info map
MAP_COMMAND_TO_FAKE_RESPONSE['/lun'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/GET'] = (
FAKE_LUN_GET_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/GET'] = (
FAKE_LUN_GET_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun?range=[0-65535]/GET'] = (
FAKE_QUERY_ALL_LUN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=12/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition?ID=1'
'&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=11'
'/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup?range=[0-8191]/GET'] = (
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup'] = (
FAKE_QUERY_LUN_GROUP_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate'] = (
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNGroup/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_COUNT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/expand/PUT'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=12/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot'] = (
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/GET'] = (
FAKE_GET_SNAPSHOT_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/activate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/stop/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot?range=[0-32767]/GET'] = (
FAKE_SNAPSHOT_LIST_INFO_RESPONSE)
# mock QoS info map
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/GET'] = (
FAKE_LUN_GET_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/active/11/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/'] = (
FAKE_QOS_INFO_RESPONSE)
# mock iscsi info map
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_tgt_port/GET'] = (
FAKE_GET_ISCSI_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/GET'] = (
FAKE_GET_ETH_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE'
'=257&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_ETH_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsidevicename'] = (
FAKE_GET_ISCSI_DEVICE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?range=[0-256]/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/POST'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/remove_iscsi_from_host/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/'
'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
# mock host info map
MAP_COMMAND_TO_FAKE_RESPONSE['/host?range=[0-65535]/GET'] = (
FAKE_GET_ALL_HOST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host'] = (
FAKE_CREATE_HOST_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup?range=[0-8191]/GET'] = (
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup'] = (
FAKE_GET_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0'
'&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=1'
'/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&'
'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/0/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&'
'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/associate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock copy info map
MAP_COMMAND_TO_FAKE_RESPONSE['/luncopy'] = (
FAKE_GET_LUN_COPY_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY?range=[0-1023]/GET'] = (
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/start/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/0/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock mapping view info map
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview?range=[0-8191]/GET'] = (
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/1/GET'] = (
FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/lungroup?TYPE=256&'
'ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&'
'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&'
'ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&'
'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate?ASSOCIATEOBJTYPE=245&'
'ASSOCIATEOBJID=1&range=[0-8191]/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock FC info map
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&'
'range=[0-8191]/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock FC info map
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&'
'range=[0-8191]/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host_link?INITIATOR_TYPE=223'
'&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = (
FAKE_HOST_LINK_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup?range=[0-8191]&TYPE=257/GET'] = (
FAKE_PORT_GROUP_RESPONSE)
# mock system info map
MAP_COMMAND_TO_FAKE_RESPONSE['/system/'] = (
FAKE_SYSTEM_VERSION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]/GET'] = (
FAKE_GET_FC_INI_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?range=[0-100]/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition/POST'] = (
FAKE_SYSTEM_VERSION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-100]&PARENTID=1/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/system/'] = (
FAKE_SYSTEM_VERSION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/0/GET'] = (
FAKE_SMARTCACHEPARTITION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/REMOVE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/cachepartition/0/GET'] = (
FAKE_SMARTCACHEPARTITION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroDomain?range=[0-100]/GET'] = (
FAKE_HYPERMETRODOMAIN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/POST'] = (
FAKE_HYPERMETRODOMAIN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/GET'] = (
FAKE_HYPERMETRODOMAIN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/disable_hcpair/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
def Fake_sleep(time):
pass
class Fake18000Client(rest_client.RestClient):
def __init__(self, configuration):
rest_client.RestClient.__init__(self, configuration)
self.delete_flag = False
self.terminateFlag = False
self.device_id = None
self.test_fail = False
self.test_multi_url_flag = False
self.checkFlag = False
self.remove_chap_flag = False
self.cache_not_exist = False
self.partition_not_exist = False
def _change_file_mode(self, filepath):
pass
def _parse_volume_type(self, volume):
poolinfo = self._find_pool_info()
volume_size = self._get_volume_size(poolinfo, volume)
params = {'LUNType': 0,
'WriteType': '1',
'PrefetchType': '3',
'qos_level': 'Qos-high',
'StripUnitSize': '64',
'PrefetchValue': '0',
'PrefetchTimes': '0',
'qos': 'OpenStack_Qos_High',
'MirrorSwitch': '1',
'tier': 'Tier_high',
}
params['volume_size'] = volume_size
params['pool_id'] = poolinfo['ID']
return params
def _get_snapshotid_by_name(self, snapshot_name):
return "11"
def _check_snapshot_exist(self, snapshot_id):
return True
def get_partition_id_by_name(self, name):
if self.partition_not_exist:
return None
return "11"
def get_cache_id_by_name(self, name):
if self.cache_not_exist:
return None
return "11"
def add_lun_to_cache(self, lunid, cache_id):
pass
def do_call(self, url=False, data=None, method=None, calltimeout=4):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
command = url.replace('/210235G7J20000000000/', '')
data = None
if method:
command = command + "/" + method
for item in MAP_COMMAND_TO_FAKE_RESPONSE.keys():
if command == item:
data = MAP_COMMAND_TO_FAKE_RESPONSE[item]
if self.test_fail:
data = FAKE_ERROR_INFO_RESPONSE
if command == 'lun/11/GET':
data = FAKE_ERROR_LUN_INFO_RESPONSE
self.test_fail = False
if self.test_multi_url_flag:
data = FAKE_ERROR_CONNECT_RESPONSE
self.test_multi_url_flag = False
return json.loads(data)
class Fake18000ISCSIStorage(huawei_driver.Huawei18000ISCSIDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self):
self.restclient = Fake18000Client(configuration=self.configuration)
class Fake18000FCStorage(huawei_driver.Huawei18000FCDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.fcsan_lookup_service = None
def do_setup(self):
self.restclient = Fake18000Client(configuration=self.configuration)
class Huawei18000ISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000ISCSIDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.configuration.hypermetro_devices = hypermetro_devices
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000ISCSIStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
self.portgroup = 'portgroup-test'
self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20503:192.168.1.1',
'iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20500:192.168.1.2']
self.target_ips = ['192.168.1.1',
'192.168.1.2']
self.portgroup_id = 11
def test_login_success(self):
device_id = self.driver.restclient.login()
self.assertEqual('210235G7J20000000000', device_id)
def test_create_volume_success(self):
self.driver.restclient.login()
# Have pool info in the volume.
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001#OpenStack_Pool',
'provider_location': '11',
}
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
# No pool info in the volume.
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001',
'provider_location': '11',
}
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_create_snapshot_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = ''
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = None
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_create_volume_from_snapsuccess(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['ID'])
def test_initialize_connection_success(self):
self.driver.restclient.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.restclient.login()
self.driver.restclient.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.restclient.terminateFlag)
def test_get_volume_status(self):
self.driver.restclient.login()
data = self.driver.get_volume_stats()
self.assertEqual('1.1.1', data['driver_version'])
def test_extend_volume(self):
self.driver.restclient.login()
lun_info = self.driver.extend_volume(test_volume, 3)
self.assertEqual('1', lun_info['provider_location'])
def test_login_fail(self):
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.restclient.login)
def test_create_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_snap)
def test_create_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
def test_delete_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_delete_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_snap)
self.assertTrue(delete_flag)
def test_initialize_connection_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_get_default_timeout(self):
result = huawei_utils.get_default_timeout(self.xml_file_path)
self.assertEqual('43200', result)
def test_get_wait_interval(self):
result = huawei_utils.get_wait_interval(self.xml_file_path,
'LUNReadyWaitInterval')
self.assertEqual(2, result)
def test_lun_is_associated_to_lungroup(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('11', '11')
result = self.driver.restclient._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('12', '12')
self.driver.restclient.remove_lun_from_lungroup('12', '12')
result = self.driver.restclient._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
def test_get_tgtip(self):
self.driver.restclient.login()
portg_id = self.driver.restclient.find_tgt_port_group(self.portgroup)
target_ip = self.driver.restclient._get_tgt_ip_from_portgroup(portg_id)
self.assertEqual(self.target_ips, target_ip)
def test_get_iscsi_params(self):
self.driver.restclient.login()
(iscsi_iqns, target_ips, portgroup_id) = (
self.driver.restclient.get_iscsi_params(self.xml_file_path,
FakeConnector))
self.assertEqual(self.iscsi_iqns, iscsi_iqns)
self.assertEqual(self.target_ips, target_ips)
self.assertEqual(self.portgroup_id, portgroup_id)
def test_get_lun_conf_params(self):
self.driver.restclient.login()
luninfo = huawei_utils.get_lun_conf_params(self.xml_file_path)
luninfo['pool_id'] = '0'
luninfo['volume_size'] = 2
luninfo['volume_description'] = 'test volume'
luninfo = huawei_utils.init_lun_parameters('5mFHcBv4RkCcD+JyrWc0SA',
luninfo)
self.assertEqual('5mFHcBv4RkCcD+JyrWc0SA', luninfo['NAME'])
def tset_get_iscsi_conf(self):
self.driver.restclient.login()
iscsiinfo = huawei_utils.get_iscsi_conf(self.xml_file_path)
self.assertEqual('iqn.1993-08.debian:01:ec2bff7ac3a3',
iscsiinfo['Initiator'])
def test_check_conf_file(self):
self.driver.restclient.login()
self.driver.restclient.checkFlag = True
huawei_utils.check_conf_file(self.xml_file_path)
self.assertTrue(self.driver.restclient.checkFlag)
def test_get_conf_host_os_type(self):
self.driver.restclient.login()
host_os = huawei_utils.get_conf_host_os_type('100.97.10.30',
self.configuration)
self.assertEqual('0', host_os)
def test_find_chap_info(self):
self.driver.restclient.login()
tmp_dict = {}
iscsi_info = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['CHAPinfo'] = 'mm-user;mm-user@storage'
ini_list = [tmp_dict]
iscsi_info['Initiator'] = ini_list
initiator_name = FakeConnector['initiator']
chapinfo = self.driver.restclient.find_chap_info(iscsi_info,
initiator_name)
chap_username, chap_password = chapinfo.split(';')
self.assertEqual('mm-user', chap_username)
self.assertEqual('mm-user@storage', chap_password)
def test_find_alua_info(self):
self.driver.restclient.login()
tmp_dict = {}
iscsi_info = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['ALUA'] = '1'
ini_list = [tmp_dict]
iscsi_info['Initiator'] = ini_list
initiator_name = FakeConnector['initiator']
type = self.driver.restclient._find_alua_info(iscsi_info,
initiator_name)
self.assertEqual('1', type)
def test_find_pool_info(self):
self.driver.restclient.login()
pools = {
"error": {"code": 0},
"data": [{
"NAME": "test001",
"ID": "0",
"USERFREECAPACITY": "36",
"USERTOTALCAPACITY": "48",
"USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE},
{"NAME": "test002",
"ID": "1",
"USERFREECAPACITY": "37",
"USERTOTALCAPACITY": "49",
"USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE},
{"NAME": "test003",
"ID": "0",
"USERFREECAPACITY": "36",
"DATASPACE": "35",
"USERTOTALCAPACITY": "48",
"USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE}]}
pool_name = 'test001'
test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48'}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test002'
test_info = {}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test000'
test_info = {}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test003'
test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48'}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
def test_get_smartx_specs_opts(self):
self.driver.restclient.login()
smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts)
self.assertEqual('3', smartx_opts['policy'])
@mock.patch.object(huawei_utils, 'get_volume_qos',
return_value={'MAXIOPS': '100',
'IOType': '2'})
def test_create_smartqos(self, mock_qos_value):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
@mock.patch.object(huawei_utils, 'get_volume_params',
return_value={'smarttier': 'true',
'smartcache': 'true',
'smartpartition': 'true',
'thin_provisioning_support': 'true',
'thick_provisioning_support': 'false',
'policy': '2',
'cachename': 'cache-test',
'partitionname': 'partition-test'})
def test_creat_smartx(self, mock_volume_types, mock_add_lun_to_partition):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_find_available_qos(self):
self.driver.restclient.login()
qos = {'MAXIOPS': '100', 'IOType': '2'}
fake_qos_info_response_equal = {
"error": {
"code": 0
},
"data": [{
"ID": "11",
"MAXIOPS": "100",
"IOType": "2",
"LUNLIST": u'["1", "2", "3", "4", "5", "6", "7", "8", "9",\
"10", ,"11", "12", "13", "14", "15", "16", "17", "18", "19",\
"20", ,"21", "22", "23", "24", "25", "26", "27", "28", "29",\
"30", ,"31", "32", "33", "34", "35", "36", "37", "38", "39",\
"40", ,"41", "42", "43", "44", "45", "46", "47", "48", "49",\
"50", ,"51", "52", "53", "54", "55", "56", "57", "58", "59",\
"60", ,"61", "62", "63", "64"]'
}]
}
# Number of LUNs in QoS is equal to 64
with mock.patch.object(rest_client.RestClient, 'get_qos',
return_value=fake_qos_info_response_equal):
(qos_id, lun_list) = self.driver.restclient.find_available_qos(qos)
self.assertEqual((None, []), (qos_id, lun_list))
# Number of LUNs in QoS is less than 64
fake_qos_info_response_less = {
"error": {
"code": 0
},
"data": [{
"ID": "11",
"MAXIOPS": "100",
"IOType": "2",
"LUNLIST": u'["0", "1", "2"]'
}]
}
with mock.patch.object(rest_client.RestClient, 'get_qos',
return_value=fake_qos_info_response_less):
(qos_id, lun_list) = self.driver.restclient.find_available_qos(qos)
self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list))
@mock.patch.object(huawei_utils, 'get_volume_params',
return_value=fake_hypermetro_opts)
@mock.patch.object(rest_client.RestClient, 'login_with_ip',
return_value='123456789')
@mock.patch.object(rest_client.RestClient, 'find_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'find_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'create_volume',
return_value=FAKE_CREATE_VOLUME_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
@mock.patch.object(hypermetro.HuaweiHyperMetro,
'_create_hypermetro_pair',
return_value={"ID": '11',
"NAME": 'hypermetro-pair'})
@mock.patch.object(rest_client.RestClient, 'logout',
return_value=None)
def test_create_hypermetro_success(self, mock_logout,
mock_hyper_pair_info,
mock_volume_ready,
mock_hyper_domain,
mock_create_volume,
mock_pool_info,
mock_all_pool_info,
mock_login_return,
mock_hypermetro_opts):
self.driver.restclient.login()
metadata = {"hypermetro_id": '11',
"remote_lun_id": '1'}
lun_info = self.driver.create_volume(hyper_volume)
mock_logout.assert_called_with()
self.assertEqual(metadata, lun_info['metadata'])
@mock.patch.object(huawei_utils, 'get_volume_params',
return_value=fake_hypermetro_opts)
@mock.patch.object(rest_client.RestClient, 'login_with_ip',
return_value='123456789')
@mock.patch.object(rest_client.RestClient, 'find_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'find_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'create_volume',
return_value=FAKE_CREATE_VOLUME_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
@mock.patch.object(hypermetro.HuaweiHyperMetro,
'_create_hypermetro_pair')
@mock.patch.object(rest_client.RestClient, 'delete_lun',
return_value=None)
@mock.patch.object(rest_client.RestClient, 'logout',
return_value=None)
def test_create_hypermetro_fail(self, mock_logout,
mock_delete_lun,
mock_hyper_pair_info,
mock_volume_ready,
mock_hyper_domain,
mock_create_volume,
mock_pool_info,
mock_all_pool_info,
mock_login_return,
mock_hypermetro_opts):
self.driver.restclient.login()
mock_hyper_pair_info.side_effect = exception.VolumeBackendAPIException(
data='Create hypermetro error.')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, hyper_volume)
mock_delete_lun.assert_called_with('1')
mock_logout.assert_called_with()
@mock.patch.object(rest_client.RestClient, 'login_with_ip',
return_value='123456789')
@mock.patch.object(rest_client.RestClient, 'check_lun_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'get_hypermetro_by_id',
return_value=FAKE_METRO_INFO_RESPONCE)
@mock.patch.object(rest_client.RestClient, 'delete_hypermetro',
return_value=FAKE_COMMON_SUCCESS_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'delete_lun',
return_value=None)
@mock.patch.object(rest_client.RestClient, 'logout',
return_value=None)
def test_delete_hypermetro_success(self, mock_logout,
mock_delete_lun,
mock_delete_hypermetro,
mock_metro_info,
mock_check_hyermetro,
mock_lun_exit,
mock_login_info):
self.driver.restclient.login()
result = self.driver.delete_volume(hyper_volume)
mock_logout.assert_called_with()
self.assertTrue(result)
@mock.patch.object(rest_client.RestClient, 'login_with_ip',
return_value='123456789')
@mock.patch.object(rest_client.RestClient, 'check_lun_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'get_hypermetro_by_id',
return_value=FAKE_METRO_INFO_RESPONCE)
@mock.patch.object(rest_client.RestClient, 'delete_hypermetro')
@mock.patch.object(rest_client.RestClient, 'delete_lun',
return_value=None)
@mock.patch.object(rest_client.RestClient, 'logout',
return_value=None)
def test_delete_hypermetro_fail(self, mock_logout,
mock_delete_lun,
mock_delete_hypermetro,
mock_metro_info,
mock_check_hyermetro,
mock_lun_exit,
mock_login_info):
self.driver.restclient.login()
mock_delete_hypermetro.side_effect = (
exception.VolumeBackendAPIException(data='Delete hypermetro '
'error.'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, hyper_volume)
mock_delete_lun.assert_called_with('11')
def create_fake_conf_file(self):
"""Create a fake Config file.
Huawei storage customize a XML configuration file, the configuration
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file.
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
storage.appendChild(storagepool)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.1.2')
initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage')
initiator.setAttribute('ALUA', '1')
initiator.setAttribute('TargetPortGroup', 'portgroup-test')
iscsi.appendChild(initiator)
host = doc.createElement('Host')
host.setAttribute('HostIP', '100.97.10.30')
host.setAttribute('OSType', 'Linux')
config.appendChild(host)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
class FCSanLookupService(object):
def get_device_mapping_from_network(self, initiator_list,
target_list):
return fake_fabric_mapping
class Huawei18000FCDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000FCDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.configuration.hypermetro_devices = hypermetro_devices
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000FCStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
def test_login_success(self):
device_id = self.driver.restclient.login()
self.assertEqual('210235G7J20000000000', device_id)
def test_create_volume_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_create_snapshot_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = ''
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = None
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_create_volume_from_snapsuccess(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['ID'])
def test_initialize_connection_success(self):
self.driver.restclient.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.restclient.login()
self.driver.restclient.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.restclient.terminateFlag)
def test_get_volume_status(self):
self.driver.restclient.login()
data = self.driver.get_volume_stats()
self.assertEqual('1.1.1', data['driver_version'])
def test_extend_volume(self):
self.driver.restclient.login()
lun_info = self.driver.extend_volume(test_volume, 3)
self.assertEqual('1', lun_info['provider_location'])
def test_login_fail(self):
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.restclient.login)
def test_create_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_snap)
def test_create_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
def test_delete_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_delete_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_initialize_connection_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_get_default_timeout(self):
result = huawei_utils.get_default_timeout(self.xml_file_path)
self.assertEqual('43200', result)
def test_get_wait_interval(self):
result = huawei_utils.get_wait_interval(self.xml_file_path,
'LUNReadyWaitInterval')
self.assertEqual(2, result)
def test_lun_is_associated_to_lungroup(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('11', '11')
result = self.driver.restclient._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('12', '12')
self.driver.restclient.remove_lun_from_lungroup('12', '12')
result = self.driver.restclient._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
def test_get_lun_conf_params(self):
self.driver.restclient.login()
luninfo = huawei_utils.get_lun_conf_params(self.xml_file_path)
luninfo['pool_id'] = '0'
luninfo['volume_size'] = 2
luninfo['volume_description'] = 'test volume'
luninfo = huawei_utils.init_lun_parameters('5mFHcBv4RkCcD+JyrWc0SA',
luninfo)
self.assertEqual('5mFHcBv4RkCcD+JyrWc0SA', luninfo['NAME'])
def test_check_conf_file(self):
self.driver.restclient.login()
self.driver.restclient.checkFlag = True
huawei_utils.check_conf_file(self.xml_file_path)
self.assertTrue(self.driver.restclient.checkFlag)
def test_get_conf_host_os_type(self):
self.driver.restclient.login()
host_os = huawei_utils.get_conf_host_os_type('100.97.10.30',
self.configuration)
self.assertEqual('0', host_os)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
def test_migrate_volume_success(self, mock_add_lun_to_partition):
self.driver.restclient.login()
# Migrate volume without new type.
model_update = None
moved = False
empty_dict = {}
# Migrate volume without new type.
moved, model_update = self.driver.migrate_volume(None,
test_volume,
test_host,
None)
self.assertTrue(moved)
self.assertEqual(empty_dict, model_update)
# Migrate volume with new type.
moved = False
empty_dict = {}
new_type = {'extra_specs':
{'smarttier': '<is> true',
'smartcache': '<is> true',
'smartpartition': '<is> true',
'thin_provisioning_support': '<is> true',
'thick_provisioning_support': '<is> False',
'policy': '2',
'smartcache:cachename': 'cache-test',
'smartpartition:partitionname': 'partition-test'}}
moved, model_update = self.driver.migrate_volume(None,
test_volume,
test_host,
new_type)
self.assertTrue(moved)
self.assertEqual(empty_dict, model_update)
def test_migrate_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
# Migrate volume without new type.
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.migrate_volume, None,
test_volume, test_host, None)
# Migrate volume with new type.
new_type = {'extra_specs':
{'smarttier': '<is> true',
'smartcache': '<is> true',
'thin_provisioning_support': '<is> true',
'thick_provisioning_support': '<is> False',
'policy': '2',
'smartcache:cachename': 'cache-test',
'partitionname': 'partition-test'}}
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.migrate_volume, None,
test_volume, test_host, new_type)
def test_check_migration_valid(self):
self.driver.restclient.login()
is_valid = self.driver._check_migration_valid(test_host,
test_volume)
self.assertTrue(is_valid)
# No pool_name in capabilities.
invalid_host1 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000000',
'allocated_capacity_gb': 0,
'volume_backend_name': 'Huawei18000FCDriver',
'storage_protocol': 'FC'}}
is_valid = self.driver._check_migration_valid(invalid_host1,
test_volume)
self.assertFalse(is_valid)
# location_info in capabilities is not matched.
invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000001',
'allocated_capacity_gb': 0,
'pool_name': 'OpenStack_Pool',
'volume_backend_name': 'Huawei18000FCDriver',
'storage_protocol': 'FC'}}
is_valid = self.driver._check_migration_valid(invalid_host2,
test_volume)
self.assertFalse(is_valid)
# storage_protocol is not match current protocol and volume status is
# 'in-use'.
volume_in_use = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_attachment': 'in-use',
'provider_location': '11'}
invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000001',
'allocated_capacity_gb': 0,
'pool_name': 'OpenStack_Pool',
'volume_backend_name': 'Huawei18000FCDriver',
'storage_protocol': 'iSCSI'}}
is_valid = self.driver._check_migration_valid(invalid_host2,
volume_in_use)
self.assertFalse(is_valid)
# pool_name is empty.
invalid_host3 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000001',
'allocated_capacity_gb': 0,
'pool_name': '',
'volume_backend_name': 'Huawei18000FCDriver',
'storage_protocol': 'iSCSI'}}
is_valid = self.driver._check_migration_valid(invalid_host3,
test_volume)
self.assertFalse(is_valid)
@mock.patch.object(rest_client.RestClient, 'rename_lun')
def test_update_migrated_volume_success(self, mock_rename_lun):
self.driver.restclient.login()
original_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}
current_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0636'}
model_update = self.driver.update_migrated_volume(None,
original_volume,
current_volume,
'available')
self.assertEqual({'_name_id': None}, model_update)
@mock.patch.object(rest_client.RestClient, 'rename_lun')
def test_update_migrated_volume_fail(self, mock_rename_lun):
self.driver.restclient.login()
mock_rename_lun.side_effect = exception.VolumeBackendAPIException(
data='Error occurred.')
original_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}
current_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0636',
'_name_id': '21ec7341-9256-497b-97d9-ef48edcf0637'}
model_update = self.driver.update_migrated_volume(None,
original_volume,
current_volume,
'available')
self.assertEqual({'_name_id': '21ec7341-9256-497b-97d9-ef48edcf0637'},
model_update)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
def test_retype_volume_success(self, mock_add_lun_to_partition):
self.driver.restclient.login()
retype = self.driver.retype(None, test_volume,
test_new_type, None, test_host)
self.assertTrue(retype)
def test_retype_volume_cache_fail(self):
self.driver.restclient.cache_not_exist = True
self.driver.restclient.login()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.retype, None,
test_volume, test_new_type, None, test_host)
def test_retype_volume_partition_fail(self):
self.driver.restclient.partition_not_exist = True
self.driver.restclient.login()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.retype, None,
test_volume, test_new_type, None, test_host)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
def test_retype_volume_fail(self, mock_add_lun_to_partition):
self.driver.restclient.login()
mock_add_lun_to_partition.side_effect = (
exception.VolumeBackendAPIException(data='Error occurred.'))
retype = self.driver.retype(None, test_volume,
test_new_type, None, test_host)
self.assertFalse(retype)
def test_build_ini_targ_map(self):
self.driver.restclient.login()
fake_lookup_service = FCSanLookupService()
fake_lookup_service.get_device_mapping_from_network = mock.Mock(
return_value=fake_fabric_mapping)
zone_helper = fc_zone_helper.FCZoneHelper(
fake_lookup_service, self.driver.restclient)
(tgt_port_wwns,
init_targ_map) = (zone_helper.build_ini_targ_map(
['10000090fa0d6754']))
target_port_wwns = ['2000643e8c4c5f66']
ini_target_map = {'10000090fa0d6754': ['2000643e8c4c5f66']}
self.assertEqual(target_port_wwns, tgt_port_wwns)
self.assertEqual(ini_target_map, init_targ_map)
def test_filter_port_by_contr(self):
self.driver.restclient.login()
# Six ports in one fabric.
ports_in_fabric = ['1', '2', '3', '4', '5', '6']
# Ports 1,3,4,7 belonged to controller A
# Ports 2,5,8 belonged to controller B
# ports 6 belonged to controller C
total_port_contr_map = {'1': 'A', '3': 'A', '4': 'A', '7': 'A',
'2': 'B', '5': 'B', '8': 'B',
'6': 'C'}
zone_helper = fc_zone_helper.FCZoneHelper(None, None)
filtered_ports = zone_helper._filter_port_by_contr(
ports_in_fabric, total_port_contr_map)
expected_filtered_ports = ['1', '3', '2', '5', '6']
self.assertEqual(expected_filtered_ports, filtered_ports)
def test_multi_resturls_success(self):
self.driver.restclient.login()
self.driver.restclient.test_multi_url_flag = True
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_get_id_from_result(self):
self.driver.restclient.login()
result = {}
name = 'test_name'
key = 'NAME'
re = self.driver.restclient._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': {}}
re = self.driver.restclient._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': [{'COUNT': 1, 'ID': '1'},
{'COUNT': 2, 'ID': '2'}]}
re = self.driver.restclient._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': [{'NAME': 'test_name1', 'ID': '1'},
{'NAME': 'test_name2', 'ID': '2'}]}
re = self.driver.restclient._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': [{'NAME': 'test_name', 'ID': '1'},
{'NAME': 'test_name2', 'ID': '2'}]}
re = self.driver.restclient._get_id_from_result(result, name, key)
self.assertEqual('1', re)
@mock.patch.object(rest_client.RestClient, 'find_pool_info',
return_value={'ID': 1,
'CAPACITY': 110362624,
'TOTALCAPACITY': 209715200})
def test_get_capacity(self, mock_find_pool_info):
expected_pool_capacity = {'total_capacity': 100.0,
'free_capacity': 52.625}
pool_capacity = self.driver.restclient._get_capacity(None,
None)
self.assertEqual(expected_pool_capacity, pool_capacity)
@mock.patch.object(huawei_utils, 'get_volume_params',
return_value=fake_hypermetro_opts)
@mock.patch.object(rest_client.RestClient, 'login_with_ip',
return_value='123456789')
@mock.patch.object(rest_client.RestClient, 'find_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'find_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'create_volume',
return_value=FAKE_CREATE_VOLUME_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
@mock.patch.object(hypermetro.HuaweiHyperMetro,
'_create_hypermetro_pair',
return_value={"ID": '11',
"NAME": 'hypermetro-pair'})
@mock.patch.object(rest_client.RestClient, 'logout',
return_value=None)
def test_create_hypermetro_success(self, mock_hypermetro_opts,
mock_login_return,
mock_all_pool_info,
mock_pool_info,
mock_create_volume,
mock_hyper_domain,
mock_volume_ready,
mock_pair_info,
mock_logout):
self.driver.restclient.login()
metadata = {"hypermetro_id": '11',
"remote_lun_id": '1'}
lun_info = self.driver.create_volume(hyper_volume)
self.assertEqual(metadata, lun_info['metadata'])
def create_fake_conf_file(self):
"""Create a fake Config file
Huawei storage customize a XML configuration file,
the configuration file is used to set the Huawei storage custom
parameters, therefore, in the UT test we need to simulate such a
configuration file
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
protocol = doc.createElement('Protocol')
protocol_text = doc.createTextNode('FC')
protocol.appendChild(protocol_text)
storage.appendChild(protocol)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
storage.appendChild(storagepool)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
lun_type = doc.createElement('LUNType')
lun_type_text = doc.createTextNode('Thick')
lun_type.appendChild(lun_type_text)
lun.appendChild(lun_type)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.1.2')
iscsi.appendChild(initiator)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
host = doc.createElement('Host')
host.setAttribute('HostIP', '100.97.10.30')
host.setAttribute('OSType', 'Linux')
config.appendChild(host)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides DockerImage for examining docker_build outputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import gzip
import io
import json
import os
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v1 import docker_creds as v1_creds
from containerregistry.client.v1 import docker_http
import httplib2
import six
from six.moves import range # pylint: disable=redefined-builtin
import six.moves.http_client
class DockerImage(six.with_metaclass(abc.ABCMeta, object)):
"""Interface for implementations that interact with Docker images."""
# pytype: disable=bad-return-type
@abc.abstractmethod
def top(self):
"""The layer id of the topmost layer."""
# pytype: enable=bad-return-type
# pytype: disable=bad-return-type
@abc.abstractmethod
def repositories(self):
"""The json blob of tags, loaded as a dict."""
pass
# pytype: enable=bad-return-type
def parent(self, layer_id):
"""The layer of id of the parent of the provided layer, or None.
Args:
layer_id: the id of the layer whose parentage we're asking
Returns:
The identity of the parent layer, or None if the root.
"""
metadata = json.loads(self.json(layer_id))
if 'parent' not in metadata:
return None
return metadata['parent']
# pytype: disable=bad-return-type
@abc.abstractmethod
def json(self, layer_id):
"""The JSON metadata of the provided layer.
Args:
layer_id: the id of the layer whose metadata we're asking
Returns:
The raw json string of the layer.
"""
pass
# pytype: enable=bad-return-type
# pytype: disable=bad-return-type
@abc.abstractmethod
def layer(self, layer_id):
"""The layer.tar.gz blob of the provided layer id.
Args:
layer_id: the id of the layer for whose layer blob we're asking
Returns:
The raw blob string of the layer.
"""
pass
# pytype: enable=bad-return-type
def uncompressed_layer(self, layer_id):
"""Same as layer() but uncompressed."""
zipped = self.layer(layer_id)
buf = io.BytesIO(zipped)
f = gzip.GzipFile(mode='rb', fileobj=buf)
unzipped = f.read()
return unzipped
def diff_id(self, digest):
"""diff_id only exist in schema v22."""
return None
# pytype: disable=bad-return-type
@abc.abstractmethod
def ancestry(self, layer_id):
"""The ancestry of the given layer, base layer first.
Args:
layer_id: the id of the layer whose ancestry we're asking
Returns:
The list of ancestor IDs, base first, layer_id last.
"""
pass
# pytype: enable=bad-return-type
# __enter__ and __exit__ allow use as a context manager.
@abc.abstractmethod
def __enter__(self):
pass
@abc.abstractmethod
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
# Gzip injects a timestamp into its output, which makes its output and digest
# non-deterministic. To get reproducible pushes, freeze time.
# This approach is based on the following StackOverflow answer:
# http://stackoverflow.com/
# questions/264224/setting-the-gzip-timestamp-from-python
class _FakeTime(object):
def time(self):
return 1225856967.109
gzip.time = _FakeTime()
class FromShardedTarball(DockerImage):
"""This decodes the sharded image tarballs from docker_build."""
def __init__(self,
layer_to_tarball,
top,
name = None,
compresslevel = 9):
self._layer_to_tarball = layer_to_tarball
self._top = top
self._compresslevel = compresslevel
self._memoize = {}
self._lock = threading.Lock()
self._name = name
def _content(self, layer_id, name, memoize = True):
"""Fetches a particular path's contents from the tarball."""
# Check our cache
if memoize:
with self._lock:
if name in self._memoize:
return self._memoize[name]
# tarfile is inherently single-threaded:
# https://mail.python.org/pipermail/python-bugs-list/2015-March/265999.html
# so instead of locking, just open the tarfile for each file
# we want to read.
with tarfile.open(name=self._layer_to_tarball(layer_id), mode='r:') as tar:
try:
content = tar.extractfile(name).read() # pytype: disable=attribute-error
except KeyError:
content = tar.extractfile('./' + name).read() # pytype: disable=attribute-error
# Populate our cache.
if memoize:
with self._lock:
self._memoize[name] = content
return content
def top(self):
"""Override."""
return self._top
def repositories(self):
"""Override."""
return json.loads(self._content(self.top(), 'repositories').decode('utf8'))
def json(self, layer_id):
"""Override."""
return self._content(layer_id, layer_id + '/json').decode('utf8')
# Large, do not memoize.
def uncompressed_layer(self, layer_id):
"""Override."""
return self._content(layer_id, layer_id + '/layer.tar', memoize=False)
# Large, do not memoize.
def layer(self, layer_id):
"""Override."""
unzipped = self.uncompressed_layer(layer_id)
buf = io.BytesIO()
f = gzip.GzipFile(mode='wb', compresslevel=self._compresslevel, fileobj=buf)
try:
f.write(unzipped)
finally:
f.close()
zipped = buf.getvalue()
return zipped
def ancestry(self, layer_id):
"""Override."""
p = self.parent(layer_id)
if not p:
return [layer_id]
return [layer_id] + self.ancestry(p)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def _get_top(tarball, name = None):
"""Get the topmost layer in the image tarball."""
with tarfile.open(name=tarball, mode='r:') as tar:
reps = tar.extractfile('repositories') or tar.extractfile('./repositories')
if reps is None:
raise ValueError('Tarball must contain a repositories file')
repositories = json.loads(reps.read().decode('utf8'))
if name:
key = str(name.as_repository())
return repositories[key][name.tag]
if len(repositories) != 1:
raise ValueError('Tarball must contain a single repository, '
'or a name must be specified to FromTarball.')
for (unused_repo, tags) in six.iteritems(repositories):
if len(tags) != 1:
raise ValueError('Tarball must contain a single tag, '
'or a name must be specified to FromTarball.')
for (unused_tag, layer_id) in six.iteritems(tags):
return layer_id
raise Exception('Unreachable code in _get_top()')
class FromTarball(FromShardedTarball):
"""This decodes the image tarball output of docker_build for upload."""
def __init__(self,
tarball,
name = None,
compresslevel = 9):
super(FromTarball, self).__init__(
lambda unused_id: tarball,
_get_top(tarball, name),
name=name,
compresslevel=compresslevel)
class FromRegistry(DockerImage):
"""This accesses a docker image hosted on a registry (non-local)."""
def __init__(
self,
name,
basic_creds,
transport):
self._name = name
self._creds = basic_creds
self._transport = transport
# Set up in __enter__
self._tags = {}
self._response = {}
def top(self):
"""Override."""
assert isinstance(self._name, docker_name.Tag)
return self._tags[self._name.tag]
def repositories(self):
"""Override."""
return {self._name.repository: self._tags}
def tags(self):
"""Lists the tags present in the remote repository."""
return list(self.raw_tags().keys())
def raw_tags(self):
"""Dictionary of tag to image id."""
return self._tags
def _content(self, suffix):
if suffix not in self._response:
_, self._response[suffix] = docker_http.Request(
self._transport, '{scheme}://{endpoint}/v1/images/{suffix}'.format(
scheme=docker_http.Scheme(self._endpoint),
endpoint=self._endpoint,
suffix=suffix), self._creds, [six.moves.http_client.OK])
return self._response[suffix]
def json(self, layer_id):
"""Override."""
# GET server1/v1/images/IMAGEID/json
return self._content(layer_id + '/json').decode('utf8')
# Large, do not memoize.
def layer(self, layer_id):
"""Override."""
# GET server1/v1/images/IMAGEID/layer
return self._content(layer_id + '/layer')
def ancestry(self, layer_id):
"""Override."""
# GET server1/v1/images/IMAGEID/ancestry
return json.loads(self._content(layer_id + '/ancestry').decode('utf8'))
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
# This initiates the pull by issuing:
# GET H:P/v1/repositories/R/images
resp, unused_content = docker_http.Request(
self._transport,
'{scheme}://{registry}/v1/repositories/{repository_name}/images'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
repository_name=self._name.repository), self._creds,
[six.moves.http_client.OK])
# The response should have an X-Docker-Token header, which
# we should extract and annotate subsequent requests with:
# Authorization: Token {extracted value}
self._creds = v1_creds.Token(resp['x-docker-token'])
self._endpoint = resp['x-docker-endpoints']
# TODO(user): Consider also supporting cookies, which are
# used by Quay.io for authenticated sessions.
# Next, fetch the set of tags in this repository.
# GET server1/v1/repositories/R/tags
resp, content = docker_http.Request(
self._transport,
'{scheme}://{endpoint}/v1/repositories/{repository_name}/tags'.format(
scheme=docker_http.Scheme(self._endpoint),
endpoint=self._endpoint,
repository_name=self._name.repository), self._creds,
[six.moves.http_client.OK])
self._tags = json.loads(content.decode('utf8'))
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
class Random(DockerImage):
"""This generates an image with Random properties.
We ensure basic consistency of the generated docker
image.
"""
# TODO(b/36589467): Add function arg for creating blob.
def __init__(self,
sample,
num_layers = 5,
layer_byte_size = 64,
blobs = None):
# Generate the image.
self._ancestry = []
self._layers = {}
num_layers = len(blobs) if blobs else num_layers
for i in range(num_layers):
# Avoid repetitions.
while True:
layer_id = self._next_id(sample)
if layer_id not in self._ancestry:
self._ancestry += [layer_id]
blob = blobs[i] if blobs else None
self._layers[layer_id] = self._next_layer(
sample, layer_byte_size, blob)
break
def top(self):
"""Override."""
return self._ancestry[0]
def repositories(self):
"""Override."""
return {'random/image': {'latest': self.top(),}}
def json(self, layer_id):
"""Override."""
metadata = {'id': layer_id}
ancestry = self.ancestry(layer_id)
if len(ancestry) != 1:
metadata['parent'] = ancestry[1]
return json.dumps(metadata, sort_keys=True)
def layer(self, layer_id):
"""Override."""
return self._layers[layer_id]
def ancestry(self, layer_id):
"""Override."""
assert layer_id in self._ancestry
index = self._ancestry.index(layer_id)
return self._ancestry[index:]
def _next_id(self, sample):
return sample(b'0123456789abcdef', 64).decode('utf8')
# pylint: disable=missing-docstring
def _next_layer(self, sample,
layer_byte_size, blob):
buf = io.BytesIO()
# TODO(user): Consider doing something more creative...
with tarfile.open(fileobj=buf, mode='w:gz') as tar:
if blob:
info = tarfile.TarInfo(name='./'+self._next_id(sample))
info.size = len(blob)
tar.addfile(info, fileobj=io.BytesIO(blob))
# Linux optimization, use dd for data file creation.
elif sys.platform.startswith('linux') and layer_byte_size >= 1024 * 1024:
mb = layer_byte_size / (1024 * 1024)
tempdir = tempfile.mkdtemp()
data_filename = os.path.join(tempdir, 'a.bin')
if os.path.exists(data_filename):
os.remove(data_filename)
process = subprocess.Popen([
'dd', 'if=/dev/urandom',
'of=%s' % data_filename, 'bs=1M',
'count=%d' % mb
])
process.wait()
with io.open(data_filename, u'rb') as fd:
info = tar.gettarinfo(name=data_filename)
tar.addfile(info, fileobj=fd)
os.remove(data_filename)
os.rmdir(tempdir)
else:
data = sample(string.printable.encode('utf8'), layer_byte_size)
info = tarfile.TarInfo(name='./' + self._next_id(sample))
info.size = len(data)
tar.addfile(info, fileobj=io.BytesIO(data))
return buf.getvalue()
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from collections import defaultdict
from operator import mul
from pymatgen.core.periodic_table import Specie, get_el_sp
from monty.design_patterns import cached_class
import itertools
import json
import logging
import math
import os
import six
from six.moves import zip
"""
This module provides classes for representing species substitution
probabilities.
"""
__author__ = "Will Richards, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Will Richards"
__email__ = "[email protected]"
__date__ = "Aug 31, 2012"
@cached_class
class SubstitutionProbability(object):
"""
This class finds substitution probabilities given lists of atoms
to substitute. The inputs make more sense if you look through the
from_defaults static method.
The substitution prediction algorithm is presented in:
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011)
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663. doi:10.1021/ic102031h
Args:
lambda_table:
json table of the weight functions lambda if None,
will use the default lambda.json table
alpha:
weight function for never observed substitutions
"""
def __init__(self, lambda_table=None, alpha=-5):
if lambda_table is not None:
self._lambda_table = lambda_table
else:
module_dir = os.path.dirname(__file__)
json_file = os.path.join(module_dir, 'data', 'lambda.json')
with open(json_file) as f:
self._lambda_table = json.load(f)
# build map of specie pairs to lambdas
self.alpha = alpha
self._l = {}
self.species = set()
for row in self._lambda_table:
if 'D1+' not in row:
s1 = Specie.from_string(row[0])
s2 = Specie.from_string(row[1])
self.species.add(s1)
self.species.add(s2)
self._l[frozenset([s1, s2])] = float(row[2])
# create Z and px
self.Z = 0
self._px = defaultdict(float)
for s1, s2 in itertools.product(self.species, repeat=2):
value = math.exp(self.get_lambda(s1, s2))
self._px[s1] += value / 2
self._px[s2] += value / 2
self.Z += value
def get_lambda(self, s1, s2):
k = frozenset([get_el_sp(s1),
get_el_sp(s2)])
return self._l.get(k, self.alpha)
def get_px(self, sp):
return self._px[get_el_sp(sp)]
def prob(self, s1, s2):
"""
Gets the probability of 2 species substitution. Not used by the
structure predictor.
Returns:
Probability of s1 and s2 substitution.
"""
return math.exp(self.get_lambda(s1, s2)) / self.Z
def cond_prob(self, s1, s2):
"""
Conditional probability of substituting s1 for s2.
Args:
s1:
The *variable* specie
s2:
The *fixed* specie
Returns:
Conditional probability used by structure predictor.
"""
return math.exp(self.get_lambda(s1, s2)) / self.get_px(s2)
def pair_corr(self, s1, s2):
"""
Pair correlation of two species.
Returns:
The pair correlation of 2 species
"""
return math.exp(self.get_lambda(s1, s2)) * \
self.Z / (self.get_px(s1) * self.get_px(s2))
def cond_prob_list(self, l1, l2):
"""
Find the probabilities of 2 lists. These should include ALL species.
This is the probability conditional on l2
Args:
l1, l2:
lists of species
Returns:
The conditional probability (assuming these species are in
l2)
"""
assert len(l1) == len(l2)
p = 1
for s1, s2 in zip(l1, l2):
p *= self.cond_prob(s1, s2)
return p
def as_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"lambda_table": self._lambda_table,
"alpha": self._alpha},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d['init_args'])
class SubstitutionPredictor(object):
"""
Predicts likely substitutions either to or from a given composition
or species list using the SubstitutionProbability
"""
def __init__(self, lambda_table=None, alpha=-5, threshold=1e-3):
self.p = SubstitutionProbability(lambda_table, alpha)
self.threshold = threshold
def list_prediction(self, species, to_this_composition=True):
"""
Args:
species:
list of species
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
"""
for sp in species:
if get_el_sp(sp) not in self.p.species:
raise ValueError("the species {} is not allowed for the"
"probability model you are using".format(sp))
max_probabilities = []
for s1 in species:
if to_this_composition:
max_p = max([self.p.cond_prob(s2, s1) for s2 in self.p.species])
else:
max_p = max([self.p.cond_prob(s1, s2) for s2 in self.p.species])
max_probabilities.append(max_p)
output = []
def _recurse(output_prob, output_species):
best_case_prob = list(max_probabilities)
best_case_prob[:len(output_prob)] = output_prob
if six.moves.reduce(mul, best_case_prob) > self.threshold:
if len(output_species) == len(species):
odict = {
'probability': six.moves.reduce(mul, best_case_prob)}
if to_this_composition:
odict['substitutions'] = dict(
zip(output_species, species))
else:
odict['substitutions'] = dict(
zip(species, output_species))
if len(output_species) == len(set(output_species)):
output.append(odict)
return
for sp in self.p.species:
i = len(output_prob)
if to_this_composition:
prob = self.p.cond_prob(sp, species[i])
else:
prob = self.p.cond_prob(species[i], sp)
_recurse(output_prob + [prob], output_species + [sp])
_recurse([], [])
logging.info('{} substitutions found'.format(len(output)))
return output
def composition_prediction(self, composition, to_this_composition=True):
"""
Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
"""
preds = self.list_prediction(list(composition.keys()),
to_this_composition)
output = []
for p in preds:
if to_this_composition:
subs = {v: k for k, v in p['substitutions'].items()}
else:
subs = p['substitutions']
charge = 0
for k, v in composition.items():
charge += subs[k].oxi_state * v
if abs(charge) < 1e-8:
output.append(p)
logging.info('{} charge balanced substitutions found'
.format(len(output)))
return output
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for constructing vocabulary, converting the examples to integer format and building the required masks for batch computation Author: aneelakantan (Arvind Neelakantan)
"""
import copy
import numbers
import numpy as np
import wiki_data
def return_index(a):
for i in range(len(a)):
if (a[i] == 1.0):
return i
def construct_vocab(data, utility, add_word=False):
ans = []
for example in data:
sent = ""
for word in example.question:
if (not (isinstance(word, numbers.Number))):
sent += word + " "
example.original_nc = copy.deepcopy(example.number_columns)
example.original_wc = copy.deepcopy(example.word_columns)
example.original_nc_names = copy.deepcopy(example.number_column_names)
example.original_wc_names = copy.deepcopy(example.word_column_names)
if (add_word):
continue
number_found = 0
if (not (example.is_bad_example)):
for word in example.question:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
for col_name in example.word_column_names:
for word in col_name:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
for col_name in example.number_column_names:
for word in col_name:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
def word_lookup(word, utility):
if (utility.word_ids.has_key(word)):
return word
else:
return utility.unk_token
def convert_to_int_2d_and_pad(a, utility):
ans = []
#print a
for b in a:
temp = []
if (len(b) > utility.FLAGS.max_entry_length):
b = b[0:utility.FLAGS.max_entry_length]
for remaining in range(len(b), utility.FLAGS.max_entry_length):
b.append(utility.dummy_token)
assert len(b) == utility.FLAGS.max_entry_length
for word in b:
temp.append(utility.word_ids[word_lookup(word, utility)])
ans.append(temp)
#print ans
return ans
def convert_to_bool_and_pad(a, utility):
a = a.tolist()
for i in range(len(a)):
for j in range(len(a[i])):
if (a[i][j] < 1):
a[i][j] = False
else:
a[i][j] = True
a[i] = a[i] + [False] * (utility.FLAGS.max_elements - len(a[i]))
return a
seen_tables = {}
def partial_match(question, table, number):
answer = []
match = {}
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
for j in range(len(table[i])):
for word in question:
if (number):
if (word == table[i][j]):
answer[i][j] = 1.0
match[i] = 1.0
else:
if (word in table[i][j]):
answer[i][j] = 1.0
match[i] = 1.0
return answer, match
def exact_match(question, table, number):
#performs exact match operation
answer = []
match = {}
matched_indices = []
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
for j in range(len(table[i])):
if (number):
for word in question:
if (word == table[i][j]):
match[i] = 1.0
answer[i][j] = 1.0
else:
table_entry = table[i][j]
for k in range(len(question)):
if (k + len(table_entry) <= len(question)):
if (table_entry == question[k:(k + len(table_entry))]):
#if(len(table_entry) == 1):
#print "match: ", table_entry, question
match[i] = 1.0
answer[i][j] = 1.0
matched_indices.append((k, len(table_entry)))
return answer, match, matched_indices
def partial_column_match(question, table, number):
answer = []
for i in range(len(table)):
answer.append(0)
for i in range(len(table)):
for word in question:
if (word in table[i]):
answer[i] = 1.0
return answer
def exact_column_match(question, table, number):
#performs exact match on column names
answer = []
matched_indices = []
for i in range(len(table)):
answer.append(0)
for i in range(len(table)):
table_entry = table[i]
for k in range(len(question)):
if (k + len(table_entry) <= len(question)):
if (table_entry == question[k:(k + len(table_entry))]):
answer[i] = 1.0
matched_indices.append((k, len(table_entry)))
return answer, matched_indices
def get_max_entry(a):
e = {}
for w in a:
if (w != "UNK, "):
if (e.has_key(w)):
e[w] += 1
else:
e[w] = 1
if (len(e) > 0):
(key, val) = sorted(e.items(), key=lambda x: -1 * x[1])[0]
if (val > 1):
return key
else:
return -1.0
else:
return -1.0
def list_join(a):
ans = ""
for w in a:
ans += str(w) + ", "
return ans
def group_by_max(table, number):
#computes the most frequently occuring entry in a column
answer = []
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
if (number):
curr = table[i]
else:
curr = [list_join(w) for w in table[i]]
max_entry = get_max_entry(curr)
#print i, max_entry
for j in range(len(curr)):
if (max_entry == curr[j]):
answer[i][j] = 1.0
else:
answer[i][j] = 0.0
return answer
def pick_one(a):
for i in range(len(a)):
if (1.0 in a[i]):
return True
return False
def check_processed_cols(col, utility):
return True in [
True for y in col
if (y != utility.FLAGS.pad_int and y !=
utility.FLAGS.bad_number_pre_process)
]
def complete_wiki_processing(data, utility, train=True):
#convert to integers and padding
processed_data = []
num_bad_examples = 0
for example in data:
number_found = 0
if (example.is_bad_example):
num_bad_examples += 1
if (not (example.is_bad_example)):
example.string_question = example.question[:]
#entry match
example.processed_number_columns = example.processed_number_columns[:]
example.processed_word_columns = example.processed_word_columns[:]
example.word_exact_match, word_match, matched_indices = exact_match(
example.string_question, example.original_wc, number=False)
example.number_exact_match, number_match, _ = exact_match(
example.string_question, example.original_nc, number=True)
if (not (pick_one(example.word_exact_match)) and not (
pick_one(example.number_exact_match))):
assert len(word_match) == 0
assert len(number_match) == 0
example.word_exact_match, word_match = partial_match(
example.string_question, example.original_wc, number=False)
#group by max
example.word_group_by_max = group_by_max(example.original_wc, False)
example.number_group_by_max = group_by_max(example.original_nc, True)
#column name match
example.word_column_exact_match, wcol_matched_indices = exact_column_match(
example.string_question, example.original_wc_names, number=False)
example.number_column_exact_match, ncol_matched_indices = exact_column_match(
example.string_question, example.original_nc_names, number=False)
if (not (1.0 in example.word_column_exact_match) and not (
1.0 in example.number_column_exact_match)):
example.word_column_exact_match = partial_column_match(
example.string_question, example.original_wc_names, number=False)
example.number_column_exact_match = partial_column_match(
example.string_question, example.original_nc_names, number=False)
if (len(word_match) > 0 or len(number_match) > 0):
example.question.append(utility.entry_match_token)
if (1.0 in example.word_column_exact_match or
1.0 in example.number_column_exact_match):
example.question.append(utility.column_match_token)
example.string_question = example.question[:]
example.number_lookup_matrix = np.transpose(
example.number_lookup_matrix)[:]
example.word_lookup_matrix = np.transpose(example.word_lookup_matrix)[:]
example.columns = example.number_columns[:]
example.word_columns = example.word_columns[:]
example.len_total_cols = len(example.word_column_names) + len(
example.number_column_names)
example.column_names = example.number_column_names[:]
example.word_column_names = example.word_column_names[:]
example.string_column_names = example.number_column_names[:]
example.string_word_column_names = example.word_column_names[:]
example.sorted_number_index = []
example.sorted_word_index = []
example.column_mask = []
example.word_column_mask = []
example.processed_column_mask = []
example.processed_word_column_mask = []
example.word_column_entry_mask = []
example.question_attention_mask = []
example.question_number = example.question_number_1 = -1
example.question_attention_mask = []
example.ordinal_question = []
example.ordinal_question_one = []
new_question = []
if (len(example.number_columns) > 0):
example.len_col = len(example.number_columns[0])
else:
example.len_col = len(example.word_columns[0])
for (start, length) in matched_indices:
for j in range(length):
example.question[start + j] = utility.unk_token
#print example.question
for word in example.question:
if (isinstance(word, numbers.Number) or wiki_data.is_date(word)):
if (not (isinstance(word, numbers.Number)) and
wiki_data.is_date(word)):
word = word.replace("X", "").replace("-", "")
number_found += 1
if (number_found == 1):
example.question_number = word
if (len(example.ordinal_question) > 0):
example.ordinal_question[len(example.ordinal_question) - 1] = 1.0
else:
example.ordinal_question.append(1.0)
elif (number_found == 2):
example.question_number_1 = word
if (len(example.ordinal_question_one) > 0):
example.ordinal_question_one[len(example.ordinal_question_one) -
1] = 1.0
else:
example.ordinal_question_one.append(1.0)
else:
new_question.append(word)
example.ordinal_question.append(0.0)
example.ordinal_question_one.append(0.0)
example.question = [
utility.word_ids[word_lookup(w, utility)] for w in new_question
]
example.question_attention_mask = [0.0] * len(example.question)
#when the first question number occurs before a word
example.ordinal_question = example.ordinal_question[0:len(
example.question)]
example.ordinal_question_one = example.ordinal_question_one[0:len(
example.question)]
#question-padding
example.question = [utility.word_ids[utility.dummy_token]] * (
utility.FLAGS.question_length - len(example.question)
) + example.question
example.question_attention_mask = [-10000.0] * (
utility.FLAGS.question_length - len(example.question_attention_mask)
) + example.question_attention_mask
example.ordinal_question = [0.0] * (utility.FLAGS.question_length -
len(example.ordinal_question)
) + example.ordinal_question
example.ordinal_question_one = [0.0] * (utility.FLAGS.question_length -
len(example.ordinal_question_one)
) + example.ordinal_question_one
if (True):
#number columns and related-padding
num_cols = len(example.columns)
start = 0
for column in example.number_columns:
if (check_processed_cols(example.processed_number_columns[start],
utility)):
example.processed_column_mask.append(0.0)
sorted_index = sorted(
range(len(example.processed_number_columns[start])),
key=lambda k: example.processed_number_columns[start][k],
reverse=True)
sorted_index = sorted_index + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(sorted_index))
example.sorted_number_index.append(sorted_index)
example.columns[start] = column + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(column))
example.processed_number_columns[start] += [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements -
len(example.processed_number_columns[start]))
start += 1
example.column_mask.append(0.0)
for remaining in range(num_cols, utility.FLAGS.max_number_cols):
example.sorted_number_index.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.processed_number_columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.number_exact_match.append([0.0] *
(utility.FLAGS.max_elements))
example.number_group_by_max.append([0.0] *
(utility.FLAGS.max_elements))
example.column_mask.append(-100000000.0)
example.processed_column_mask.append(-100000000.0)
example.number_column_exact_match.append(0.0)
example.column_names.append([utility.dummy_token])
#word column and related-padding
start = 0
word_num_cols = len(example.word_columns)
for column in example.word_columns:
if (check_processed_cols(example.processed_word_columns[start],
utility)):
example.processed_word_column_mask.append(0.0)
sorted_index = sorted(
range(len(example.processed_word_columns[start])),
key=lambda k: example.processed_word_columns[start][k],
reverse=True)
sorted_index = sorted_index + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(sorted_index))
example.sorted_word_index.append(sorted_index)
column = convert_to_int_2d_and_pad(column, utility)
example.word_columns[start] = column + [[
utility.word_ids[utility.dummy_token]
] * utility.FLAGS.max_entry_length] * (utility.FLAGS.max_elements -
len(column))
example.processed_word_columns[start] += [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements -
len(example.processed_word_columns[start]))
example.word_column_entry_mask.append([0] * len(column) + [
utility.word_ids[utility.dummy_token]
] * (utility.FLAGS.max_elements - len(column)))
start += 1
example.word_column_mask.append(0.0)
for remaining in range(word_num_cols, utility.FLAGS.max_word_cols):
example.sorted_word_index.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.word_columns.append([[utility.word_ids[utility.dummy_token]] *
utility.FLAGS.max_entry_length] *
(utility.FLAGS.max_elements))
example.word_column_entry_mask.append(
[utility.word_ids[utility.dummy_token]] *
(utility.FLAGS.max_elements))
example.word_exact_match.append([0.0] * (utility.FLAGS.max_elements))
example.word_group_by_max.append([0.0] * (utility.FLAGS.max_elements))
example.processed_word_columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.word_column_mask.append(-100000000.0)
example.processed_word_column_mask.append(-100000000.0)
example.word_column_exact_match.append(0.0)
example.word_column_names.append([utility.dummy_token] *
utility.FLAGS.max_entry_length)
seen_tables[example.table_key] = 1
#convert column and word column names to integers
example.column_ids = convert_to_int_2d_and_pad(example.column_names,
utility)
example.word_column_ids = convert_to_int_2d_and_pad(
example.word_column_names, utility)
for i_em in range(len(example.number_exact_match)):
example.number_exact_match[i_em] = example.number_exact_match[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_exact_match[i_em]))
example.number_group_by_max[i_em] = example.number_group_by_max[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_group_by_max[i_em]))
for i_em in range(len(example.word_exact_match)):
example.word_exact_match[i_em] = example.word_exact_match[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_exact_match[i_em]))
example.word_group_by_max[i_em] = example.word_group_by_max[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_group_by_max[i_em]))
example.exact_match = example.number_exact_match + example.word_exact_match
example.group_by_max = example.number_group_by_max + example.word_group_by_max
example.exact_column_match = example.number_column_exact_match + example.word_column_exact_match
#answer and related mask, padding
if (example.is_lookup):
example.answer = example.calc_answer
example.number_print_answer = example.number_lookup_matrix.tolist()
example.word_print_answer = example.word_lookup_matrix.tolist()
for i_answer in range(len(example.number_print_answer)):
example.number_print_answer[i_answer] = example.number_print_answer[
i_answer] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_print_answer[i_answer]))
for i_answer in range(len(example.word_print_answer)):
example.word_print_answer[i_answer] = example.word_print_answer[
i_answer] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_print_answer[i_answer]))
example.number_lookup_matrix = convert_to_bool_and_pad(
example.number_lookup_matrix, utility)
example.word_lookup_matrix = convert_to_bool_and_pad(
example.word_lookup_matrix, utility)
for remaining in range(num_cols, utility.FLAGS.max_number_cols):
example.number_lookup_matrix.append([False] *
utility.FLAGS.max_elements)
example.number_print_answer.append([0.0] * utility.FLAGS.max_elements)
for remaining in range(word_num_cols, utility.FLAGS.max_word_cols):
example.word_lookup_matrix.append([False] *
utility.FLAGS.max_elements)
example.word_print_answer.append([0.0] * utility.FLAGS.max_elements)
example.print_answer = example.number_print_answer + example.word_print_answer
else:
example.answer = example.calc_answer
example.print_answer = [[0.0] * (utility.FLAGS.max_elements)] * (
utility.FLAGS.max_number_cols + utility.FLAGS.max_word_cols)
#question_number masks
if (example.question_number == -1):
example.question_number_mask = np.zeros([utility.FLAGS.max_elements])
else:
example.question_number_mask = np.ones([utility.FLAGS.max_elements])
if (example.question_number_1 == -1):
example.question_number_one_mask = -10000.0
else:
example.question_number_one_mask = np.float64(0.0)
if (example.len_col > utility.FLAGS.max_elements):
continue
processed_data.append(example)
return processed_data
def add_special_words(utility):
utility.words.append(utility.entry_match_token)
utility.word_ids[utility.entry_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.entry_match_token]] = utility.entry_match_token
utility.entry_match_token_id = utility.word_ids[utility.entry_match_token]
print "entry match token: ", utility.word_ids[
utility.entry_match_token], utility.entry_match_token_id
utility.words.append(utility.column_match_token)
utility.word_ids[utility.column_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.column_match_token]] = utility.column_match_token
utility.column_match_token_id = utility.word_ids[utility.column_match_token]
print "entry match token: ", utility.word_ids[
utility.column_match_token], utility.column_match_token_id
utility.words.append(utility.dummy_token)
utility.word_ids[utility.dummy_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.dummy_token]] = utility.dummy_token
utility.dummy_token_id = utility.word_ids[utility.dummy_token]
utility.words.append(utility.unk_token)
utility.word_ids[utility.unk_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.unk_token]] = utility.unk_token
def perform_word_cutoff(utility):
if (utility.FLAGS.word_cutoff > 0):
for word in utility.word_ids.keys():
if (utility.word_count.has_key(word) and utility.word_count[word] <
utility.FLAGS.word_cutoff and word != utility.unk_token and
word != utility.dummy_token and word != utility.entry_match_token and
word != utility.column_match_token):
utility.word_ids.pop(word)
utility.words.remove(word)
def word_dropout(question, utility):
if (utility.FLAGS.word_dropout_prob > 0.0):
new_question = []
for i in range(len(question)):
if (question[i] != utility.dummy_token_id and
utility.random.random() > utility.FLAGS.word_dropout_prob):
new_question.append(utility.word_ids[utility.unk_token])
else:
new_question.append(question[i])
return new_question
else:
return question
def generate_feed_dict(data, curr, batch_size, gr, train=False, utility=None):
#prepare feed dict dictionary
feed_dict = {}
feed_examples = []
for j in range(batch_size):
feed_examples.append(data[curr + j])
if (train):
feed_dict[gr.batch_question] = [
word_dropout(feed_examples[j].question, utility)
for j in range(batch_size)
]
else:
feed_dict[gr.batch_question] = [
feed_examples[j].question for j in range(batch_size)
]
feed_dict[gr.batch_question_attention_mask] = [
feed_examples[j].question_attention_mask for j in range(batch_size)
]
feed_dict[
gr.batch_answer] = [feed_examples[j].answer for j in range(batch_size)]
feed_dict[gr.batch_number_column] = [
feed_examples[j].columns for j in range(batch_size)
]
feed_dict[gr.batch_processed_number_column] = [
feed_examples[j].processed_number_columns for j in range(batch_size)
]
feed_dict[gr.batch_processed_sorted_index_number_column] = [
feed_examples[j].sorted_number_index for j in range(batch_size)
]
feed_dict[gr.batch_processed_sorted_index_word_column] = [
feed_examples[j].sorted_word_index for j in range(batch_size)
]
feed_dict[gr.batch_question_number] = np.array(
[feed_examples[j].question_number for j in range(batch_size)]).reshape(
(batch_size, 1))
feed_dict[gr.batch_question_number_one] = np.array(
[feed_examples[j].question_number_1 for j in range(batch_size)]).reshape(
(batch_size, 1))
feed_dict[gr.batch_question_number_mask] = [
feed_examples[j].question_number_mask for j in range(batch_size)
]
feed_dict[gr.batch_question_number_one_mask] = np.array(
[feed_examples[j].question_number_one_mask for j in range(batch_size)
]).reshape((batch_size, 1))
feed_dict[gr.batch_print_answer] = [
feed_examples[j].print_answer for j in range(batch_size)
]
feed_dict[gr.batch_exact_match] = [
feed_examples[j].exact_match for j in range(batch_size)
]
feed_dict[gr.batch_group_by_max] = [
feed_examples[j].group_by_max for j in range(batch_size)
]
feed_dict[gr.batch_column_exact_match] = [
feed_examples[j].exact_column_match for j in range(batch_size)
]
feed_dict[gr.batch_ordinal_question] = [
feed_examples[j].ordinal_question for j in range(batch_size)
]
feed_dict[gr.batch_ordinal_question_one] = [
feed_examples[j].ordinal_question_one for j in range(batch_size)
]
feed_dict[gr.batch_number_column_mask] = [
feed_examples[j].column_mask for j in range(batch_size)
]
feed_dict[gr.batch_number_column_names] = [
feed_examples[j].column_ids for j in range(batch_size)
]
feed_dict[gr.batch_processed_word_column] = [
feed_examples[j].processed_word_columns for j in range(batch_size)
]
feed_dict[gr.batch_word_column_mask] = [
feed_examples[j].word_column_mask for j in range(batch_size)
]
feed_dict[gr.batch_word_column_names] = [
feed_examples[j].word_column_ids for j in range(batch_size)
]
feed_dict[gr.batch_word_column_entry_mask] = [
feed_examples[j].word_column_entry_mask for j in range(batch_size)
]
return feed_dict
|
|
# test_hscimgloader.py
# ALS 2017/05/02
"""
to be used with pytest
test sets for hscimgloader
"""
import numpy as np
import astropy.table as at
import astropy.units as u
import shutil
import os
import pytest
from astropy.io import fits
import filecmp
import glob
from ..hscimgloader import hscimgLoader
ra = 140.099341430207
dec = 0.580162492432517
dir_parent = './testing/'
dir_obj = './testing/SDSSJ0920+0034/'
img_width = 128*u.pix
img_height = 128*u.pix
@pytest.fixture(scope="module", autouse=True)
def setUp_tearDown():
""" rm ./testing/ and ./test2/ before and after test"""
# setup
if os.path.isdir(dir_parent):
shutil.rmtree(dir_parent)
yield
# tear down
if os.path.isdir(dir_parent):
shutil.rmtree(dir_parent)
@pytest.fixture
def L_radec():
""" returns a imgLoader object initiated with the ra dec above"""
return hscimgLoader(ra=ra , dec=dec, dir_parent=dir_parent, img_width=img_width, img_height=img_height)
def test_downlaod_psf(L_radec):
""" test it can make_psf"""
L = L_radec
L._download_psf(band = 'r')
assert os.path.isfile(L.dir_obj+'psf-r.fits')
def test_make_psf(L_radec):
""" test it can make_psf"""
L = L_radec
L.make_psf(band = 'r', overwrite=True)
assert os.path.isfile(L.dir_obj+'psf-r.fits')
def test_make_psf_no_img_in_hsc():
""" try loading a img thats not in hsc """
ra = 150.0547735
dec = 12.7073027
dir_obj = './testing/SDSSJ1000+1242/'
L = hscimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=img_width, img_height=img_height)
assert L.make_psf(band = 'r', overwrite=True) is False
def test_make_psfs(L_radec):
""" test it can make_psf"""
L = L_radec
L.make_psfs(overwrite=True)
for b in ['g', 'r', 'i', 'z', 'y']:
assert os.path.isfile(L.dir_obj+'psf-{0}.fits'.format(b))
def test_make_psfs_check_distinct(L_radec):
""" test it can make_psf"""
L = L_radec
L.make_psfs(overwrite=True)
for b in ['g', 'r', 'i', 'z', 'y']:
assert os.path.isfile(L.dir_obj+'psf-{0}.fits'.format(b))
for b0 in ['g', 'r', 'i', 'z', 'y']:
for b1 in ['g', 'r', 'i', 'z', 'y']:
if b0 != b1:
psf0 = fits.getdata(L.get_fp_psf(b0))
psf1 = fits.getdata(L.get_fp_psf(b1))
assert not np.all(psf0 == psf1)
def test_make_psfs_no_img_in_hsc():
""" try loading a img thats not in hsc """
ra = 150.0547735
dec = 12.7073027
dir_obj = './testing/SDSSJ1000+1242/'
L = hscimgLoader(ra=ra , dec=dec, dir_obj=dir_obj, img_width=img_width, img_height=img_height)
assert L.make_psfs(overwrite=True) is False
def test_make_psfs_overwriteTrue(L_radec):
"""
test that when overwrite=True make_psf() always call download_psf() whether file exists or not
"""
L = L_radec
band = 'r'
overwrite = True
file = dir_obj+'psf-{0}.fits'.format(band)
if os.path.isfile(file):
os.remove(file)
# when file does not exist it creates psf
assert not os.path.isfile(file)
L.make_psfs(overwrite=overwrite)
assert os.path.isfile(file)
# when file does exist it overwrites psf
if os.path.isfile(file):
os.remove(file)
open(file, 'w').close()
L.make_psfs(overwrite=overwrite)
assert os.path.isfile(file)
assert os.stat(file).st_size > 0
def test_make_a_zero_size_file():
file = 'testfile.txt'
open(file, 'w').close()
assert os.stat(file).st_size == 0
os.remove(file)
def test_make_psfs_overwriteFalse(L_radec):
"""
test that when overwrite=False make_psf() does not update file
"""
L = L_radec
overwrite = False
for band in L.bands:
file = dir_obj+'psf-{0}.fits'.format(band)
if not os.path.isdir(dir_obj):
os.makedirs(dir_obj)
if os.path.isfile(file):
os.remove(file)
open(file, 'w').close()
assert os.stat(file).st_size == 0
# when file exists it should not update file
L.make_psfs(overwrite=overwrite)
for band in L.bands:
file = dir_obj+'psf-{0}.fits'.format(band)
assert os.stat(file).st_size == 0
# def test_make_psf_correctimgsize():
# for pixnum in [64, 128, 256]:
# L = hscimgLoader(ra=ra , dec=dec, dir_parent=dir_parent, img_width=pixnum, img_height=pixnum)
# L.make_psf(band='r', overwrite=True)
# data = fits.getdata(L.dir_obj+'psf-r.fits')
# assert data.shape == (pixnum, pixnum)
# def test_make_psf_correctcontent():
# L = hscimgLoader(ra=ra , dec=dec, dir_parent=dir_parent, img_width=128, img_height=128)
# L.make_psfs(overwrite=True)
# for band in L.bands:
# f = 'psf-{0}.fits'.format(band)
# file_totest = dir_obj+f
# file_verification = './test_verification_data_128pix/SDSSJ0920+0034/'+f
# assert filecmp.cmp(file_totest, file_verification)
# f = 'hsc_xid.csv'
# tab = at.Table.read(f, format='ascii.csv')
# for col in ['ra', 'dec', 'patch_id', 'tract', 'patch', 'patch_s', 'parent_id', ]:
# assert col in tab.colnames
|
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.providers.mesos.mesos_disk import LocalDisk
import requests
from requests.auth import HTTPBasicAuth
import six.moves.urllib.parse
FLAGS = flags.FLAGS
MARATHON_API_PREFIX = '/v2/apps/'
USERNAME = 'root'
class MesosDockerSpec(virtual_machine.BaseVmSpec):
"""Object containing the information needed to create a MesosDockerInstance.
Attributes:
docker_cpus: None or float. Number of CPUs for Docker instances.
docker_memory_mb: None or int. Memory limit (in MB) for Docker instances.
mesos_privileged_docker: None of boolean. Indicates if Docker container
should be run in privileged mode.
"""
CLOUD = providers.MESOS
@classmethod
def _GetOptionDecoderConstructions(cls):
result = super(MesosDockerSpec, cls)._GetOptionDecoderConstructions()
result.update({
'docker_cpus': (option_decoders.FloatDecoder, {'default': 1}),
'docker_memory_mb': (option_decoders.IntDecoder, {'default': 2048}),
'mesos_privileged_docker': (option_decoders.BooleanDecoder,
{'default': False})})
return result
def _ApplyFlags(self, config_values, flag_values):
super(MesosDockerSpec, self)._ApplyFlags(config_values, flag_values)
if flag_values['docker_cpus'].present:
config_values['docker_cpus'] = flag_values.docker_cpus
if flag_values['docker_memory_mb'].present:
config_values['docker_memory_mb'] = flag_values.docker_memory_mb
if flag_values['mesos_privileged_docker'].present:
config_values['mesos_privileged_docker'] =\
flag_values.mesos_privileged_docker
class MesosDockerInstance(virtual_machine.BaseVirtualMachine):
"""
Represents a Docker instance spawned by Marathon framework on a Mesos cluster
"""
CLOUD = providers.MESOS
def __init__(self, vm_spec):
super(MesosDockerInstance, self).__init__(vm_spec)
self.user_name = USERNAME
self.cpus = vm_spec.docker_cpus
self.memory_mb = vm_spec.docker_memory_mb
self.privileged = vm_spec.mesos_privileged_docker
self.api_url = six.moves.urllib.parse.urljoin(FLAGS.marathon_address,
MARATHON_API_PREFIX)
self.app_url = six.moves.urllib.parse.urljoin(self.api_url, self.name)
auth = FLAGS.marathon_auth.split(":")
if len(auth) == 2:
self.auth = HTTPBasicAuth(auth[0], auth[1])
else:
self.auth = None
def _CreateDependencies(self):
self._CheckPrerequisites()
self._CreateVolumes()
def _Create(self):
self._CreateApp()
self._WaitForBootCompletion()
def _PostCreate(self):
self._SetupSSH()
self._ConfigureProxy()
def _Delete(self):
self._DeleteApp()
def _CheckPrerequisites(self):
"""
Exits if any of the prerequisites is not met.
"""
if self.disk_specs and self.disk_specs[0].disk_type == disk.STANDARD:
raise Exception('Currently only local disks are supported. Please '
're-run the benchmark with "--scratch_disk_type=local".')
if not FLAGS.marathon_address:
raise Exception('Please provide the address and port of Marathon '
'framework. Example: 10:20:30:40:8080')
def _CreateVolumes(self):
"""
Creates volumes for scratch disks.
"""
for disk_num, disk_spec in enumerate(self.disk_specs):
if disk_spec.disk_type == disk.LOCAL:
scratch_disk = LocalDisk(disk_num, disk_spec, self.name)
else:
raise Exception('Currently only local disks are supported. Please '
're-run the benchmark with "--scratch_disk_type=local"')
scratch_disk._Create()
self.scratch_disks.append(scratch_disk)
def _CreateApp(self):
"""
Creates Marathon's App (Docker instance).
"""
logging.info("Attempting to create App: %s" % self.name)
body = self._BuildAppBody()
headers = {'content-type': 'application/json'}
output = requests.post(self.api_url, data=body, headers=headers,
auth=self.auth)
if output.status_code != requests.codes.CREATED:
raise Exception("Unable to create App: %s" % output.text)
logging.info("App %s created successfully." % self.name)
@vm_util.Retry(poll_interval=10, max_retries=600, log_errors=False)
def _WaitForBootCompletion(self):
"""
Periodically asks Marathon if the instance is already running.
"""
logging.info("Waiting for App %s to get up and running. It may take a while"
" if a Docker image is being downloaded for the first time."
% self.name)
output = requests.get(self.app_url, auth=self.auth)
output = json.loads(output.text)
tasks_running = output['app']['tasksRunning']
if not tasks_running:
raise Exception("Container is not booted yet. Retrying.")
@vm_util.Retry(poll_interval=10, max_retries=100, log_errors=True)
def _SetupSSH(self):
"""
Setup SSH connection details for each instance:
- IP address of the instance is the address of a host which instance
is running on,
- SSH port is drawn by Marathon and is unique for each instance.
"""
output = requests.get(self.app_url, auth=self.auth)
output = json.loads(output.text)
tasks = output['app']['tasks']
if not tasks or not tasks[0]['ports']:
raise Exception("Unable to figure out where the container is running."
"Retrying to retrieve host and port.")
self.ip_address = tasks[0]['host']
self.ssh_port = tasks[0]['ports'][0]
internal_ip, _ = self.RemoteCommand("ifconfig eth0 | grep 'inet addr' | awk"
" -F: '{print $2}' | awk '{print $1}'")
self.internal_ip = internal_ip.rstrip()
@vm_util.Retry(poll_interval=10, max_retries=100, log_errors=True)
def _ConfigureProxy(self):
"""
In Docker containers environment variables from /etc/environment
are not sourced - this results in connection problems when running
behind proxy. Prepending proxy environment variables to bashrc
solves the problem. Note: APPENDING to bashrc will not work because
the script exits when it is NOT executed in interactive shell.
"""
if FLAGS.http_proxy:
http_proxy = "sed -i '1i export http_proxy=%s' /etc/bash.bashrc"
self.RemoteCommand(http_proxy % FLAGS.http_proxy)
if FLAGS.https_proxy:
https_proxy = "sed -i '1i export https_proxy=%s' /etc/bash.bashrc"
self.RemoteCommand(https_proxy % FLAGS.http_proxy)
if FLAGS.ftp_proxy:
ftp_proxy = "sed -i '1i export ftp_proxy=%s' /etc/bash.bashrc"
self.RemoteCommand(ftp_proxy % FLAGS.ftp_proxy)
@vm_util.Retry(poll_interval=10, max_retries=100, log_errors=True)
def _DeleteApp(self):
"""
Deletes an App.
"""
logging.info('Attempting to delete App: %s' % self.name)
output = requests.delete(self.app_url, auth=self.auth)
if output.status_code == requests.codes.NOT_FOUND:
logging.info('App %s has been already deleted.' % self.name)
return
if output.status_code != requests.codes.OK:
raise Exception("Deleting App: %s failed. Reattempting." % self.name)
def _BuildAppBody(self):
"""
Builds JSON which will be passed as a body of POST request to Marathon
API in order to create App.
"""
cat_cmd = ['cat', vm_util.GetPublicKeyPath()]
key_file, _ = vm_util.IssueRetryableCommand(cat_cmd)
cmd = "/bin/mkdir /root/.ssh; echo '%s' >> /root/.ssh/authorized_keys; " \
"/usr/sbin/sshd -D" % key_file
body = {
'id': self.name,
'mem': self.memory_mb,
'cpus': self.cpus,
'cmd': cmd,
'container': {
'type': 'DOCKER',
'docker': {
'image': self.image,
'network': 'BRIDGE',
'portMappings': [
{
'containerPort': 22,
'hostPort': 0,
'protocol': 'tcp'
}
],
'privileged': self.privileged,
'parameters': [{'key': 'hostname', 'value': self.name}]
}
}
}
for scratch_disk in self.scratch_disks:
scratch_disk.AttachVolumeInfo(body['container'])
return json.dumps(body)
def SetupLocalDisks(self):
# Do not call parent's method
return
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import sid
class prefix_sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/subTLVs/subTLVs/prefix-sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines segment routing extensions for prefixes.
"""
__slots__ = ("_path_helper", "_extmethods", "__sid")
_yang_name = "prefix-sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"prefix-sid",
]
def _get_sid(self):
"""
Getter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid (list)
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
return self.__sid
def _set_sid(self, v, load=False):
"""
Setter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid() directly.
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,sid.sid, yang_name="sid", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="sid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__sid = t
if hasattr(self, "_set"):
self._set()
def _unset_sid(self):
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
sid = __builtin__.property(_get_sid)
_pyangbind_elements = OrderedDict([("sid", sid)])
from . import sid
class prefix_sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/subTLVs/subTLVs/prefix-sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines segment routing extensions for prefixes.
"""
__slots__ = ("_path_helper", "_extmethods", "__sid")
_yang_name = "prefix-sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"prefix-sid",
]
def _get_sid(self):
"""
Getter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid (list)
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
return self.__sid
def _set_sid(self, v, load=False):
"""
Setter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid() directly.
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,sid.sid, yang_name="sid", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="sid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__sid = t
if hasattr(self, "_set"):
self._set()
def _unset_sid(self):
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
sid = __builtin__.property(_get_sid)
_pyangbind_elements = OrderedDict([("sid", sid)])
|
|
import unittest
import subprocess
import os
import shutil
import time
from datetime import date
import sys
import socket
from kafka import KafkaProducer
import json
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
import zlib
import xml.etree.ElementTree as ET
this_dir = os.path.dirname(__file__)
fakes3_data_path = os.path.join(this_dir, 'data')
fixture_path = os.path.join(this_dir, 'fixture')
# as configured in system-test-s3-sink.properties. might not be super portable
# but simplest option for now without needing to dynamically write that config
# for each run...
connect_data_path = '/tmp/connect-system-test'
g_fakes3_proc = None
g_s3connect_proc = None
def modulo_partitioner(key, all_partitions, available_partitions):
if key is None:
key = 0
idx = int(key) % len(all_partitions)
return all_partitions[idx]
g_producer = KafkaProducer(bootstrap_servers="localhost:9092",
partitioner=modulo_partitioner,
metadata_max_age_ms=1000);
g_s3_conn = S3Connection('foo', 'bar', is_secure=False, port=9090, host='localhost',
calling_format=OrdinaryCallingFormat())
# requires proc to be Popened with stdout=subprocess.PIPE,stderr=subprocess.STDOUT
def dumpServerStdIO(proc, msg, until=None, until_fail=None, timeout=None, trim_indented=False, post_fail_lines=20):
sys.stdout.write(msg + os.linesep)
if not proc:
return (False, None)
start = time.time()
# After we see fail, add another few lines to see the full error
post_fail_lines_remaining = post_fail_lines
fail_line = None
while True:
try:
line = proc.stdout.readline()
if not line:
break
if fail_line is not None:
if post_fail_lines_remaining <= 0:
return (False, fail_line)
else:
sys.stderr.write(" STDIO> " + line)
post_fail_lines_remaining -= 1
continue
if not trim_indented or not line.startswith((' ', '\t')):
sys.stderr.write(" STDIO> " + line)
if until_fail and line.find(until_fail) >= 0:
fail_line = line
continue
if until and line.find(until) >= 0:
return (True, line)
if timeout is not None and (time.time() - start) > timeout:
return (False, "Timedout after {} second".format(time.time() - start))
except (KeyboardInterrupt, SystemExit):
tearDownModule()
sys.exit(1)
return (True, None)
def setUpModule():
global g_fakes3_proc, g_s3_conn
# Clean up data from previous runs
if os.path.isdir(fakes3_data_path):
shutil.rmtree(fakes3_data_path)
if os.path.isdir('/tmp/connect-system-test'):
shutil.rmtree('/tmp/connect-system-test')
# Recreate the dirs!
os.mkdir(fakes3_data_path)
os.mkdir(connect_data_path)
# Clear our topic from Kafka
try:
subprocess.check_output([os.path.join(this_dir, 'standalone-kafka/kafka/bin/kafka-topics.sh'),
'--zookeeper', 'localhost:2181', '--delete', '--topic', 'system-test']);
except subprocess.CalledProcessError as e:
# if the complaint is that the topic doesn't exist then ignore it, otherwise fail loudly
if e.output.find("Topic system-test does not exist on ZK path") < 0:
raise e
# Recreate fresh
output = subprocess.check_output([os.path.join(this_dir, 'standalone-kafka/kafka/bin/kafka-topics.sh'),
'--zookeeper', 'localhost:2181', '--create', '--topic', 'system-test',
'--partitions', '1', '--replication-factor', '1']);
if output != "Created topic \"system-test\".\n":
raise RuntimeError("Failed to create test topic:\n{}".format(output))
# Run fakeS3
print "Starting FakeS3..."
g_fakes3_proc = subprocess.Popen(['fakes3', '-p', '9090', '-r', fakes3_data_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT);
try:
print "While we wait, let's do very basic check that kafka is up"
sock = socket.create_connection(('localhost', 9092), 1)
# Connected without throwing timeout exception so just close again
sock.close();
print "Great, Kafka seems to be there."
dumpServerStdIO(g_fakes3_proc, "Just waiting for FakeS3 to be ready...", "WEBrick::HTTPServer#start");
# ensure bucket is created
g_s3_conn.create_bucket('connect-system-test')
print "SETUP DONE"
except:
tearDownModule()
raise
def tearDownModule():
global g_fakes3_proc
if g_s3connect_proc is not None:
print "Terminating Kafka Connect"
g_s3connect_proc.kill()
g_s3connect_proc.wait()
if g_fakes3_proc is not None:
print "Terminating FakeS3"
g_fakes3_proc.kill()
g_fakes3_proc.wait()
print "TEARDOWN DONE"
def runS3ConnectStandalone():
global g_s3connect_proc
# quick hack to get version from pom.
tree = ET.parse(os.path.join(this_dir, '..', 'pom.xml'))
root = tree.getroot()
version = root.find('{http://maven.apache.org/POM/4.0.0}version').text
env = {
'CLASSPATH': os.path.join(this_dir, '../target/kafka-connect-s3-{}.jar'.format(version))
}
cmd = [os.path.join(this_dir, 'standalone-kafka/kafka/bin/connect-standalone.sh'),
os.path.join(this_dir, 'system-test-worker.properties'),
os.path.join(this_dir, 'system-test-s3-sink.properties')]
g_s3connect_proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
dumpServerStdIO(g_s3connect_proc,
"Wait for S3 connect initialisation...",
"finished initialization and start",
trim_indented=True)
return g_s3connect_proc
class TestConnectS3(unittest.TestCase):
'''
These tests are highly non-deterministic, but they pass almost always on my local setup.
Controlling things like time, exactly when connect flushes etc are not really possible.
Making validation here so smart that it can correctly identify any valid output of the system
without false positives is a strictly harder programming problem than the system under test.
So this serves as a manually-run set of smoke tests that sanity check the integration logic of
the implementation, and automate a many-step ad-hoc testing environment.
'''
def test_basic_consuming(self):
global g_producer
topic = "system-test"
s3connect = runS3ConnectStandalone()
# messages produced asynchronously - synchronous producing makes it likely
# they will be split into different flushes in connect
expected_data = ''
for i in range(0, 100):
record = b'{{"foo": "bar", "counter":{}}}'.format(i)
g_producer.send(topic, record)
expected_data += record + '\n'
ok, line = dumpServerStdIO(s3connect, "Wait for connect to process and commit",
until="Successfully uploaded chunk for system-test-0",
until_fail="ERROR",
timeout=5, trim_indented=True)
self.assertTrue(ok, msg="Didn't get success message but did get: {}".format(line))
today = date.today()
pfx = 'systest/{}/'.format(today.isoformat())
# Fetch the files written and assert they are as expected
self.assert_s3_file_contents('systest/last_chunk_index.system-test-00000.txt',
pfx+'system-test-00000-000000000000.index.json')
self.assert_s3_file_contents(pfx+'system-test-00000-000000000000.index.json',
'{"chunks":[{"byte_length_uncompressed":2890,"num_records":100,"byte_length":275,"byte_offset":0,"first_record_offset":0}]}')
self.assert_s3_file_contents(pfx+'system-test-00000-000000000000.gz', expected_data, gzipped=True)
# Now stop the connect process and restart it and ensure it correctly resumes from where we left
print "Restarting Kafka Connect"
s3connect.kill()
s3connect.wait()
# produce 100 more entries
expected_data = ''
for i in range(100, 200):
record = b'{{"foo": "bar", "counter":{}}}'.format(i)
g_producer.send(topic, record)
expected_data += record + '\n'
# restart connect
s3connect = runS3ConnectStandalone()
ok, line = dumpServerStdIO(s3connect, "Wait for connect to process and commit",
until="Successfully uploaded chunk for system-test-0",
until_fail="ERROR",
timeout=5, trim_indented=True)
self.assertTrue(ok, msg="Didn't get success message but did get: {}".format(line))
today = date.today()
pfx = 'systest/{}/'.format(today.isoformat())
# Fetch the files written and assert they are as expected
self.assert_s3_file_contents('systest/last_chunk_index.system-test-00000.txt',
pfx+'system-test-00000-000000000100.index.json')
self.assert_s3_file_contents(pfx+'system-test-00000-000000000100.index.json',
'{"chunks":[{"byte_length_uncompressed":3000,"num_records":100,"byte_length":272,"byte_offset":0,"first_record_offset":100}]}')
self.assert_s3_file_contents(pfx+'system-test-00000-000000000100.gz', expected_data, gzipped=True)
# now we test reconfiguring the topic to have more partitions...
print "Reconfiguring topic..."
output = subprocess.check_output([os.path.join(this_dir, 'standalone-kafka/kafka/bin/kafka-topics.sh'),
'--zookeeper', 'localhost:2181', '--alter', '--topic', 'system-test',
'--partitions', '3']);
if not output.endswith("Adding partitions succeeded!\n"):
raise RuntimeError("Failed to reconfigure test topic:\n{}".format(output))
# wait for out producer to catch up with the reconfiguration otherwise we'll keep producing only
# to the single partition
while len(g_producer.partitions_for('system-test')) < 3:
print "Waiting for new partitions to show up in producer"
time.sleep(0.5)
# produce some more, this time with keys so we know where they will end up
expected_partitions = ['','','']
for i in range(200, 300):
record = b'{{"foo": "bar", "counter":{}}}'.format(i)
g_producer.send(topic, key=bytes(i), value=record)
expected_partitions[i % 3] += record + '\n'
# wait for all three partitions to commit (not we don't match partition number as)
# we can't assume what order they will appear in.
ok, line = dumpServerStdIO(s3connect, "Wait for connect to process and commit 1/3",
until="Successfully uploaded chunk for system-test-",
until_fail="ERROR",
timeout=5, trim_indented=True)
self.assertTrue(ok, msg="Didn't get success message but did get: {}".format(line))
ok, line = dumpServerStdIO(s3connect, "Wait for connect to process and commit 2/3",
until="Successfully uploaded chunk for system-test-",
until_fail="ERROR",
timeout=5, trim_indented=True)
self.assertTrue(ok, msg="Didn't get success message but did get: {}".format(line))
ok, line = dumpServerStdIO(s3connect, "Wait for connect to process and commit 3/3",
until="Successfully uploaded chunk for system-test-",
until_fail="ERROR",
timeout=5, trim_indented=True)
self.assertTrue(ok, msg="Didn't get success message but did get: {}".format(line))
# partition 0
self.assert_s3_file_contents('systest/last_chunk_index.system-test-00000.txt',
pfx+'system-test-00000-000000000200.index.json')
self.assert_s3_file_contents(pfx+'system-test-00000-000000000200.index.json',
'{"chunks":[{"byte_length_uncompressed":990,"num_records":33,"byte_length":137,"byte_offset":0,"first_record_offset":200}]}')
self.assert_s3_file_contents(pfx+'system-test-00000-000000000200.gz', expected_partitions[0], gzipped=True)
# partition 1 (new partition will start from offset 0)
self.assert_s3_file_contents('systest/last_chunk_index.system-test-00001.txt',
pfx+'system-test-00001-000000000000.index.json')
self.assert_s3_file_contents(pfx+'system-test-00001-000000000000.index.json',
'{"chunks":[{"byte_length_uncompressed":990,"num_records":33,"byte_length":137,"byte_offset":0,"first_record_offset":0}]}')
self.assert_s3_file_contents(pfx+'system-test-00001-000000000000.gz', expected_partitions[1], gzipped=True)
# partition 2 (new partition will start from offset 0)
self.assert_s3_file_contents('systest/last_chunk_index.system-test-00002.txt',
pfx+'system-test-00002-000000000000.index.json')
self.assert_s3_file_contents(pfx+'system-test-00002-000000000000.index.json',
'{"chunks":[{"byte_length_uncompressed":1020,"num_records":34,"byte_length":139,"byte_offset":0,"first_record_offset":0}]}')
self.assert_s3_file_contents(pfx+'system-test-00002-000000000000.gz', expected_partitions[2], gzipped=True)
def assert_s3_file_contents(self, key, content, gzipped=False, encoding="utf-8"):
global g_s3_conn
bucket = g_s3_conn.get_bucket('connect-system-test')
file = bucket.get_key(key)
actual_contents = file.get_contents_as_string()
if gzipped:
# Hacks, http://stackoverflow.com/questions/2695152/in-python-how-do-i-decode-gzip-encoding
actual_contents = zlib.decompress(actual_contents, 16+zlib.MAX_WBITS)
self.assertEqual(content, actual_contents.decode(encoding))
if __name__ == '__main__':
unittest.main()
|
|
from django.contrib.auth import (authenticate,
logout as auth_logout,
login as auth_login)
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, render_to_response
from django.contrib.auth.forms import PasswordResetForm
from registration.forms import RegistrationFormUniqueEmail
from registration.backends.default.views import RegistrationView
from rest_framework import decorators, status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from apps.user.models import ItsiUser
from apps.user.itsi import ItsiService
@decorators.api_view(['POST', 'GET'])
@decorators.permission_classes((AllowAny, ))
def login(request):
response_data = {}
status_code = status.HTTP_200_OK
if request.method == 'POST':
user = authenticate(username=request.REQUEST.get('username'),
password=request.REQUEST.get('password'))
if user is not None:
if user.is_active:
auth_login(request, user)
response_data['result'] = 'success'
response_data['username'] = user.username
response_data['guest'] = False
response_data['id'] = user.id
else:
response_data['errors'] = ['Please activate your account']
response_data['guest'] = True
response_data['id'] = 0
status_code = status.HTTP_400_BAD_REQUEST
else:
response_data['errors'] = ['Invalid username or password']
response_data['guest'] = True
response_data['id'] = 0
status_code = status.HTTP_400_BAD_REQUEST
elif request.method == 'GET':
user = request.user
if user.is_authenticated() and user.is_active:
response_data['username'] = user.username
response_data['guest'] = False
response_data['id'] = user.id
else:
response_data['guest'] = True
response_data['id'] = 0
response_data['result'] = 'success'
status_code = status.HTTP_200_OK
return Response(data=response_data, status=status_code)
@decorators.api_view(['GET'])
@decorators.permission_classes((AllowAny, ))
def logout(request):
auth_logout(request)
if request.is_ajax():
response_data = {
'guest': True,
'result': 'success',
'id': 0
}
return Response(data=response_data)
else:
return render_to_response('user/logout.html')
itsi = ItsiService()
def itsi_login(request):
redirect_uri = request.build_absolute_uri(reverse('itsi_auth'))
params = {'redirect_uri': redirect_uri}
auth_url = itsi.get_authorize_url(**params)
return redirect(auth_url)
def itsi_auth(request):
code = request.GET.get('code', None)
# Basic validation
if code is None:
return redirect('/error/itsi')
try:
session = itsi.get_session_from_code(code)
itsi_user = session.get_user()
except:
# In case we are unable to reach ITSI and get an unexpected response
return redirect('/error/itsi')
user = authenticate(itsi_id=itsi_user['id'])
if user is not None and user.is_active:
auth_login(request, user)
return redirect('/')
else:
# User did not authenticate. Save their ITSI ID and send to /register
request.session['itsi_id'] = itsi_user['id']
return redirect(
'/sign-up/itsi/{username}/{first_name}/{last_name}'.format(
**itsi_user['extra']
)
)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def itsi_sign_up(request):
# Validate request
errors = []
if 'itsi_id' not in request.session:
errors.append("There was an error in authenticating you with ITSI")
if 'username' not in request.POST or not request.POST.get('username'):
errors.append("Username must be specified")
elif User.objects.filter(username=request.POST.get('username')).exists():
errors.append("Username already exists")
if 'first_name' not in request.POST or not request.POST.get('first_name'):
errors.append("First name must be specified")
if 'last_name' not in request.POST or not request.POST.get('last_name'):
errors.append("Last name must be specified")
if 'agreed' not in request.POST or not request.POST.get('agreed'):
errors.append("You must agree to the terms")
if len(errors) > 0:
response_data = {"errors": errors}
return Response(data=response_data,
status=status.HTTP_400_BAD_REQUEST)
itsi_id = request.session['itsi_id']
# Create new user with given details and no email address or password
# since they will be authenticated using ITSI credentials
user = User.objects.create_user(
request.POST.get('username'),
email=None,
password=None,
first_name=request.POST.get('first_name'),
last_name=request.POST.get('last_name'),
)
user.save()
# Create corresponding itsi_user object that links to ITSI account
itsi_user = ItsiUser.objects.create_itsi_user(user, itsi_id)
itsi_user.save()
# Authenticate and log new user in
user = authenticate(itsi_id=itsi_id)
auth_login(request, user)
response_data = {'result': 'success',
'username': user.username,
'guest': False}
return Response(data=response_data,
status=status.HTTP_200_OK)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def sign_up(request):
view = RegistrationView()
form = RegistrationFormUniqueEmail(request.POST)
if form.is_valid():
user = view.register(request, **form.cleaned_data)
response_data = {'result': 'success',
'username': user.username,
'guest': False}
return Response(data=response_data,
status=status.HTTP_200_OK)
else:
errors = []
if 'username' not in form.cleaned_data:
errors.append("Username is invalid or already in use")
if 'password1' not in form.cleaned_data:
errors.append("Password must be specified")
if 'password2' not in form.cleaned_data or \
form.cleaned_data['password1'] != form.cleaned_data['password2']:
errors.append("Passwords do not match")
if 'email' not in form.cleaned_data:
errors.append("Email is invalid or already in use")
if len(errors) == 0:
errors.append("Invalid data submitted")
response_data = {"errors": errors}
return Response(data=response_data,
status=status.HTTP_400_BAD_REQUEST)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def forgot(request):
form = PasswordResetForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
# If there are active user(s) that match email
next(form.get_users(email))
form.save(request=request)
response_data = {'result': 'success',
'guest': True}
status_code = status.HTTP_200_OK
except StopIteration:
response_data = {'errors': ["Email cannot be found"]}
status_code = status.HTTP_400_BAD_REQUEST
else:
response_data = {'errors': ["Email is invalid"]}
status_code = status.HTTP_400_BAD_REQUEST
return Response(data=response_data, status=status_code)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.