code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo.config import cfg
from oslo import messaging
from cinder import compute
from cinder import context
from cinder import exception
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
from cinder import quota
from cinder import utils
from cinder.volume.configuration import Configuration
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager.fc_zone_manager import ZoneManager
from eventlet.greenpool import GreenPool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMISCSIDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
default='none',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver':
'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver',
'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver':
'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver',
'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver':
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver',
'cinder.volume.drivers.gpfs.GPFSDriver':
'cinder.volume.drivers.ibm.gpfs.GPFSDriver', }
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot_id, **kwargs):
@utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot_id, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.16'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = Configuration(volume_manager_opts,
config_group=service_name)
self._tp = GreenPool()
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warn(_("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host)
self.zonemanager = None
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s" %
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
if self.configuration.safe_get('zoning_mode') == 'fabric':
self.zonemanager = ZoneManager(configuration=self.configuration)
LOG.info(_("Starting FC Zone Manager %(zm_version)s,"
" Driver %(drv_name)s %(drv_version)s") %
{'zm_version': self.zonemanager.get_version(),
'drv_name': self.zonemanager.driver.__class__.__name__,
'drv_version': self.zonemanager.driver.get_version()})
LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
try:
sum = 0
self.stats.update({'allocated_capacity_gb': sum})
for volume in volumes:
if volume['status'] in ['in-use']:
# calculate allocated capacity for driver
sum += volume['size']
self.stats['allocated_capacity_gb'] = sum
try:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
LOG.error(_("Failed to re-export volume %s: "
"setting to error state"), volume['id'])
LOG.exception(export_ex)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
elif volume['status'] == 'downloading':
LOG.info(_("volume %s stuck in a downloading state"),
volume['id'])
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
LOG.info(_("volume %s: skipping export"), volume['id'])
except Exception as ex:
LOG.error(_("Error encountered during "
"re-exporting phase of driver initialization: "
" %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
return
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug(_('Resuming any in progress delete operations'))
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume(ctxt,
volume['id']))
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
"""Creates the volume."""
context_saved = context.deepcopy()
context = context.elevated()
if filter_properties is None:
filter_properties = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume_id,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid,
allow_reschedule=allow_reschedule,
reschedule_context=context_saved,
request_spec=request_spec,
filter_properties=filter_properties)
except Exception:
LOG.exception(_("Failed to create manager volume flow"))
raise exception.CinderException(
_("Failed to create manager volume flow"))
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
# Fetch created volume from storage
volume_ref = flow_engine.storage.fetch('volume')
# Update volume stats
self.stats['allocated_capacity_gb'] += volume_ref['size']
return volume_ref['id']
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume_ref['host'] != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("volume %s: removing export"), volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['id'])
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
self.db.volume_update(context, volume_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_ref['id'],
{'status': 'error_deleting'})
# If deleting the source volume in a migration, we want to skip quotas
# and other database updates.
if volume_ref['migration_status']:
return True
# Get reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting volume"))
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
self.publish_service_capabilities(context)
return True
def create_snapshot(self, context, volume_id, snapshot_id):
"""Creates and exports the snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %(snap_id)s: creating"),
{'snap_id': snapshot_ref['id']})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot_ref['context'] = caller_context
model_update = self.driver.create_snapshot(snapshot_ref)
if model_update:
self.db.snapshot_update(context, snapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error'})
self.db.snapshot_update(context,
snapshot_ref['id'], {'status': 'available',
'progress': '100%'})
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_ref['id'], volume_id)
except exception.CinderException as ex:
LOG.exception(_("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
raise exception.MetadataCopyFailure(reason=ex)
LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot_id):
"""Deletes and unexports snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
project_id = snapshot_ref['project_id']
LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot_ref['context'] = caller_context
self.driver.delete_snapshot(snapshot_ref)
except exception.SnapshotIsBusy:
LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
snapshot_ref['id'])
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error_deleting'})
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot_ref['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
self.db.snapshot_destroy(context, snapshot_id)
LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume['instance_uuid'] and volume['instance_uuid'] !=
instance_uuid):
msg = _("being attached by another instance")
raise exception.InvalidVolume(reason=msg)
if (volume['attached_host'] and volume['attached_host'] !=
host_name):
msg = _("being attached by another host")
raise exception.InvalidVolume(reason=msg)
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
elif volume['status'] != "available":
msg = _("status must be available or attaching")
raise exception.InvalidVolume(reason=msg)
# TODO(jdg): attach_time column is currently varchar
# we should update this to a date-time object
# also consider adding detach_time?
self._notify_about_volume_usage(context, volume,
"attach.start")
self.db.volume_update(context, volume_id,
{"instance_uuid": instance_uuid,
"attached_host": host_name,
"status": "attaching",
"attach_time": timeutils.strtime()})
self.db.volume_admin_metadata_update(context.elevated(),
volume_id,
{"attached_mode": mode},
False)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
volume_id,
instance_uuid,
host_name_sanitized,
mountpoint)
self._notify_about_volume_usage(context, volume, "attach.end")
return do_attach()
@locked_volume_operation
def detach_volume(self, context, volume_id):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.detach_volume(context, volume)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_id,
{'status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id)
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_("Error detaching volume %(volume)s, "
"due to uninitialized driver."),
{"volume": volume_id})
self._notify_about_volume_usage(context, volume, "detach.end")
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug(_("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully"),
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
with excutils.save_and_reraise_exception():
payload['message'] = unicode(error)
finally:
if (volume['instance_uuid'] is None and
volume['attached_host'] is None):
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except Exception as err:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
LOG.debug(_("Volume %s: creating export"), volume_id)
model_update = self.driver.create_export(context.elevated(),
volume)
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
if model_update:
LOG.exception(_("Failed updating model of volume %(volume_id)s"
" with driver provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise exception.ExportFailure(reason=ex)
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# NOTE(skolathur): If volume_type is fibre_channel, invoke
# FCZoneManager to add access control via FC zoning.
vol_type = conn_info.get('driver_volume_type', None)
mode = self.configuration.zoning_mode
LOG.debug(_("Zoning Mode: %s"), mode)
if vol_type == 'fibre_channel' and self.zonemanager:
self._add_or_delete_fc_connection(conn_info, 1)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
conn_info = self.driver.terminate_connection(volume_ref,
connector,
force=force)
# NOTE(skolathur): If volume_type is fibre_channel, invoke
# FCZoneManager to remove access control via FC zoning.
if conn_info:
vol_type = conn_info.get('driver_volume_type', None)
mode = self.configuration.zoning_mode
LOG.debug(_("Zoning Mode: %s"), mode)
if vol_type == 'fibre_channel' and self.zonemanager:
self._add_or_delete_fc_connection(conn_info, 0)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
% {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
LOG.debug(_("volume %s: removing export"), volume_id)
self.driver.remove_export(context.elevated(), volume_ref)
except Exception as ex:
LOG.exception(_("Error detaching volume %(volume)s, "
"due to remove export failure."),
{"volume": volume_id})
raise exception.RemoveExportException(volume=volume_id, reason=ex)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
self.driver.accept_transfer(context, volume_ref, new_user, new_project)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
new_vol_values = {}
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'creating'
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'available':
tries = tries + 1
now = time.time()
if new_volume['status'] == 'error':
msg = _("failed to create new_volume on destination host")
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume = self.db.volume_get(ctxt, new_volume['id'])
# Copy the source volume to the destination volume
try:
if (volume['instance_uuid'] is None and
volume['attached_host'] is None):
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'], error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
nova_api.update_server_volume(ctxt, volume['instance_uuid'],
volume['id'], new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
volume = self.db.volume_get(ctxt, volume['id'])
# If we're in the completing phase don't delete the target
# because we may have already deleted the source!
if volume['migration_status'] == 'migrating':
rpcapi.delete_volume(ctxt, new_volume)
new_volume['migration_status'] = None
def _get_original_status(self, volume):
if (volume['instance_uuid'] is None and
volume['attached_host'] is None):
return 'available'
else:
return 'in-use'
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
msg = _("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s")
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
status_update = None
if volume['status'] == 'retyping':
status_update = {'status': self._get_original_status(volume)}
if error:
msg = _("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s")
LOG.info(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
new_volume['migration_status'] = None
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
self.db.volume_update(ctxt, volume_id, updates)
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
LOG.error(msg % {'vol': volume_id, 'err': ex})
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
self.db.volume_update(ctxt, volume_id, updates)
return volume['id']
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
status_update = None
if volume_ref['status'] == 'retyping':
status_update = {'status': self._get_original_status(volume_ref)}
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy and new_type_id is None:
try:
LOG.debug(_("volume %s: calling driver migrate_volume"),
volume_ref['id'])
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
host)
if moved:
updates = {'host': host['host'],
'migration_status': None}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
volume_ref['id'],
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
updates.update(model_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
if not moved:
try:
self._migrate_volume_generic(ctxt, volume_ref, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
updates.update(model_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
@periodic_task.periodic_task
def _report_driver_status(self, context):
LOG.info(_("Updating volume status"))
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_('Unable to update stats, %(driver_name)s '
'-%(driver_version)s '
'%(config_group)s driver is uninitialized.') %
{'driver_name': self.driver.__class__.__name__,
'driver_version': self.driver.get_version(),
'config_group': config_group})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
volume_stats.update(self.stats)
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def notification(self, context, event):
LOG.info(_("Notification {%s} received"), event)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_("volume %s: extending"), volume['id'])
self.driver.extend_volume(volume, new_size)
LOG.info(_("volume %s: extended successfully"), volume['id'])
except Exception:
LOG.exception(_("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume_id)
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
self.db.volume_update(context, volume['id'], {'size': int(new_size),
'status': 'available'})
self.stats['allocated_capacity_gb'] += size_increase
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None):
def _retype_error(context, volume_id, old_reservations,
new_reservations, status_update):
try:
self.db.volume_update(context, volume_id, status_update)
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
volume_ref = self.db.volume_get(ctxt, volume_id)
status_update = {'status': self._get_original_status(volume_ref)}
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
self.db.volume_update(context, volume_id, status_update)
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
old_reservations = None
self.db.volume_update(context, volume_id, status_update)
LOG.exception(_("Failed to update usages while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume_ref.get('volume_type_id'), new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
if not retyped:
try:
new_type = volume_types.get_volume_type(context, new_type_id)
retyped = self.driver.retype(context, volume_ref, new_type,
diff, host)
if retyped:
LOG.info(_("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
retyped = False
LOG.error(_("Volume %s: driver error when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = self.db.snapshot_get_all_for_volume(context,
volume_ref['id'])
if snaps:
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.db.volume_update(context, volume_ref['id'],
{'migration_status': 'starting'})
try:
self.migrate_volume(context, volume_id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
self.db.volume_update(context, volume_id,
{'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']})
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self.publish_service_capabilities(context)
def manage_existing(self, ctxt, volume_id, ref=None):
LOG.debug('manage_existing: managing %s' % ref)
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
LOG.exception(_("Failed to create manage_existing flow."))
raise exception.CinderException(
_("Failed to create manage existing flow."))
flow_engine.run()
# Fetch created volume from storage
volume_ref = flow_engine.storage.fetch('volume')
# Update volume stats
self.stats['allocated_capacity_gb'] += volume_ref['size']
return volume_ref['id']
def _add_or_delete_fc_connection(self, conn_info, zone_op):
"""Add or delete connection control to fibre channel network.
In case of fibre channel, when zoning mode is set as fabric
ZoneManager is invoked to apply FC zoning configuration to the network
using initiator and target WWNs used for attach/detach.
params conn_info: connector passed by volume driver after
initialize_connection or terminate_connection.
params zone_op: Indicates if it is a zone add or delete operation
zone_op=0 for delete connection and 1 for add connection
"""
_initiator_target_map = None
if 'initiator_target_map' in conn_info['data']:
_initiator_target_map = conn_info['data']['initiator_target_map']
LOG.debug(_("Initiator Target map:%s"), _initiator_target_map)
# NOTE(skolathur): Invoke Zonemanager to handle automated FC zone
# management when vol_type is fibre_channel and zoning_mode is fabric
# Initiator_target map associating each initiator WWN to one or more
# target WWN is passed to ZoneManager to add or update zone config.
LOG.debug(_("Zoning op: %s"), zone_op)
if _initiator_target_map is not None:
try:
if zone_op == 1:
self.zonemanager.add_connection(_initiator_target_map)
elif zone_op == 0:
self.zonemanager.delete_connection(_initiator_target_map)
except exception.ZoneManagerException as e:
with excutils.save_and_reraise_exception():
LOG.error(e)
| Thingee/cinder | cinder/volume/manager.py | Python | apache-2.0 | 59,306 |
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from c7n_azure import constants
from c7n_azure.actions.logic_app import LogicAppAction
from azure.mgmt.resourcegraph.models import QueryRequest
from c7n_azure.actions.notify import Notify
from c7n_azure.filters import ParentFilter
from c7n_azure.provider import resources
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.query import sources, MaxResourceLimit
from c7n.utils import local_session
log = logging.getLogger('custodian.azure.query')
class ResourceQuery(object):
def __init__(self, session_factory):
self.session_factory = session_factory
def filter(self, resource_manager, **params):
m = resource_manager.resource_type
enum_op, list_op, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
params.update(m.extra_args(resource_manager))
try:
op = getattr(getattr(resource_manager.get_client(), enum_op), list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
except Exception as e:
log.error("Failed to query resource.\n"
"Type: azure.{0}.\n"
"Error: {1}".format(resource_manager.type, e))
raise
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
@sources.register('describe-azure')
class DescribeSource(object):
resource_query_factory = ResourceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.resource_query_factory(self.manager.session_factory)
def validate(self):
pass
def get_resources(self, query):
return self.query.filter(self.manager)
def get_permissions(self):
return ()
def augment(self, resources):
return resources
@sources.register('resource-graph')
class ResourceGraphSource(object):
def __init__(self, manager):
self.manager = manager
def validate(self):
if not hasattr(self.manager.resource_type, 'resource_type'):
raise PolicyValidationError(
"%s is not supported with the Azure Resource Graph source."
% self.manager.data['resource'])
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data
def get_permissions(self):
return ()
def augment(self, resources):
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. SQL and Cosmos databases
"""
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
subset = resource_manager.enumerate_resources(parent, m, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results
@sources.register('describe-child-azure')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
class TypeMeta(type):
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client)
@six.add_metaclass(TypeMeta)
class TypeInfo(object):
doc_groups = None
"""api client construction information"""
service = ''
client = ''
# Default id field, resources should override if different (used for meta filters, report etc)
id = 'id'
resource = constants.RESOURCE_ACTIVE_DIRECTORY
@classmethod
def extra_args(cls, resource_manager):
return {}
@six.add_metaclass(TypeMeta)
class ChildTypeInfo(TypeInfo):
"""api client construction information for child resources"""
parent_manager_name = ''
annotate_parent = True
raise_on_exception = True
parent_key = 'c7n:parent-id'
@classmethod
def extra_args(cls, parent_resource):
return {}
class QueryMeta(type):
"""metaclass to have consistent action/filter registry for new resources."""
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
@six.add_metaclass(QueryMeta)
class QueryResourceManager(ResourceManager):
class resource_type(TypeInfo):
pass
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None
def augment(self, resources):
return resources
def get_permissions(self):
return ()
def get_source(self, source_type):
return sources.get(source_type)(self)
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session
def get_client(self, service=None):
if not service:
return self.get_session().client(
"%s.%s" % (self.resource_type.service, self.resource_type.client))
return self.get_session().client(service)
def get_cache_key(self, query):
return {'source_type': self.source_type, 'query': query}
@classmethod
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type)
@property
def source_type(self):
return self.data.get('source', 'describe-azure')
def resources(self, query=None):
cache_key = self.get_cache_key(query)
resources = None
if self._cache.load():
resources = self._cache.get(cache_key)
if resources is not None:
self.log.debug("Using cached %s: %d" % (
"%s.%s" % (self.__class__.__module__,
self.__class__.__name__),
len(resources)))
if resources is None:
resources = self.augment(self.source.get_resources(query))
self._cache.save(cache_key, resources)
resource_count = len(resources)
resources = self.filter_resources(resources)
# Check if we're out of a policies execution limits.
if self.data == self.ctx.policy.data:
self.check_resource_limit(len(resources), resource_count)
return resources
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits()
def get_resources(self, resource_ids, **params):
resource_client = self.get_client()
m = self.resource_type
get_client, get_op, extra_args = m.get_spec
if extra_args:
params.update(extra_args)
op = getattr(getattr(resource_client, get_client), get_op)
data = [
op(rid, **params)
for rid in resource_ids
]
return [r.serialize(True) for r in data]
@staticmethod
def register_actions_and_filters(registry, resource_class):
resource_class.action_registry.register('notify', Notify)
if 'logic-app' not in resource_class.action_registry:
resource_class.action_registry.register('logic-app', LogicAppAction)
def validate(self):
self.source.validate()
@six.add_metaclass(QueryMeta)
class ChildResourceManager(QueryResourceManager):
child_source = 'describe-child-azure'
parent_manager = None
@property
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source
def get_parent_manager(self):
if not self.parent_manager:
self.parent_manager = self.get_resource_manager(self.resource_type.parent_manager_name)
return self.parent_manager
def get_session(self):
if self._session is None:
session = super(ChildResourceManager, self).get_session()
if self.resource_type.resource != constants.RESOURCE_ACTIVE_DIRECTORY:
session = session.get_session_for_resource(self.resource_type.resource)
self._session = session
return self._session
def enumerate_resources(self, parent_resource, type_info, **params):
client = self.get_client()
enum_op, list_op, extra_args = self.resource_type.enum_spec
# There are 2 types of extra_args:
# - static values stored in 'extra_args' dict (e.g. some type)
# - dynamic values are retrieved via 'extra_args' method (e.g. parent name)
if extra_args:
params.update({key: extra_args[key](parent_resource) for key in extra_args.keys()})
params.update(type_info.extra_args(parent_resource))
# Some resources might not have enum_op piece (non-arm resources)
if enum_op:
op = getattr(getattr(client, enum_op), list_op)
else:
op = getattr(client, list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def register_child_specific(registry, resource_class):
if not issubclass(resource_class, ChildResourceManager):
return
# If Child Resource doesn't annotate parent, there is no way to filter based on
# parent properties.
if resource_class.resource_type.annotate_parent:
resource_class.filter_registry.register('parent', ParentFilter)
resources.subscribe(QueryResourceManager.register_actions_and_filters)
resources.subscribe(ChildResourceManager.register_child_specific)
| kapilt/cloud-custodian | tools/c7n_azure/c7n_azure/query.py | Python | apache-2.0 | 13,063 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TopShops'
db.create_table(u'catalog_topshops', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Shop'])),
('score', self.gf('django.db.models.fields.IntegerField')()),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'catalog', ['TopShops'])
def backwards(self, orm):
# Deleting model 'TopShops'
db.delete_table(u'catalog_topshops')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"})
},
u'catalog.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': u"orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
u'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"})
},
u'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog'] | Makeystreet/makeystreet | woot/apps/catalog/migrations/0025_auto__add_topshops.py | Python | apache-2.0 | 24,810 |
from operator import and_
from functools import reduce
from django import forms
from django.db.models import Q
from django.utils.six import PY3
from django.utils.translation import ugettext_lazy as _
from api.dc.domain.views import dc_domain
from api.dns.domain.views import dns_domain
from api.dns.record.views import dns_record_list, dns_record
from api.vm.utils import get_owners
from gui.forms import SerializerForm
from gui.fields import ArrayField
from gui.widgets import NumberInput
from pdns.models import Domain, Record
TEXT_INPUT_ATTRS = {'class': 'input-transparent narrow', 'required': 'required'}
SELECT_ATTRS = {'class': 'narrow input-select2'}
if PY3:
t_long = int
else:
t_long = long # noqa: F821
class DcDomainForm(SerializerForm):
"""
Create or remove DC<->DNS Domain link by calling dc_domain.
"""
_api_call = dc_domain
name = forms.ChoiceField(label=_('Domain'), required=True,
widget=forms.Select(attrs={'class': 'input-select2 narrow disable_created2'}))
def __init__(self, request, domains, *args, **kwargs):
super(DcDomainForm, self).__init__(request, None, *args, **kwargs)
self.fields['name'].choices = domains.values_list('name', 'name')
def _final_data(self, data=None):
return {}
class AdminDomainForm(SerializerForm):
"""
Create DNS domain by calling dns_domain.
"""
_api_call = dns_domain
dc_bound = forms.BooleanField(label=_('DC-bound?'), required=False,
widget=forms.CheckboxInput(attrs={'class': 'normal-check'}))
name = forms.CharField(label=_('Name'), max_length=255, required=True,
widget=forms.TextInput(attrs={'class': 'input-transparent narrow disable_created',
'required': 'required', 'pattern': '[A-Za-z0-9._-]+'}))
owner = forms.ChoiceField(label=_('Owner'), required=False,
widget=forms.Select(attrs=SELECT_ATTRS))
access = forms.TypedChoiceField(label=_('Access'), required=False, coerce=int, choices=Domain.ACCESS,
widget=forms.Select(attrs=SELECT_ATTRS))
type = forms.ChoiceField(label=_('Type'), required=False, choices=Domain.TYPE_MASTER,
widget=forms.Select(attrs=SELECT_ATTRS),
help_text=_('PowerDNS domain type. '
'MASTER - use DNS protocol messages to communicate changes '
'with slaves. NATIVE - use database replication '
'between master DNS server and slave DNS servers.'))
desc = forms.CharField(label=_('Description'), max_length=128, required=False,
widget=forms.TextInput(attrs={'class': 'input-transparent wide', 'required': ''}))
tsig_keys = forms.CharField(label=_('TSIG Key(s)'), max_length=1000, required=False,
widget=forms.TextInput(attrs={'class': 'input-transparent', 'required': ''}),
help_text=_('TSIG DNS keys for external zone transfers. Zone transfers to '
'external DNS slaves will only be allowed using this key. '
'For more info on how to generate the key see Danube Cloud docs.'
))
def __init__(self, request, domain, *args, **kwargs):
super(AdminDomainForm, self).__init__(request, domain, *args, **kwargs)
self.fields['owner'].choices = get_owners(request).values_list('username', 'username')
if not request.user.is_staff:
self.fields['dc_bound'].widget.attrs['disabled'] = 'disabled'
def _initial_data(self, request, obj):
return obj.web_data
def _final_data(self, data=None):
data = super(AdminDomainForm, self)._final_data(data=data)
if self.action == 'create': # Add dc parameter when doing POST (required by api.db.utils.get_virt_object)
data['dc'] = self._request.dc.name
return data
class DnsRecordFilterForm(forms.Form):
"""
Filter DNS records for a domain.
"""
all = forms.BooleanField(widget=forms.HiddenInput(attrs={'class': 'always-include-navigation'}), required=False)
domain = forms.ChoiceField(label=_('Domain'), required=False,
widget=forms.Select(attrs={'class': 'fill-up input-navigation select-transparent '
'always-include-navigation'}))
type = forms.ChoiceField(label=_('Type'), required=False, choices=(('', _('Type (all)')),) + Record.TYPE_USED,
widget=forms.Select(attrs={'class': 'fill-up input-navigation select-transparent'}))
name = forms.CharField(label=_('Name'), required=False,
widget=forms.TextInput(attrs={'class': 'fill-up input-navigation input-transparent',
'placeholder': _('Search by name')}))
content = forms.CharField(label=_('Content'), required=False,
widget=forms.TextInput(attrs={'class': 'fill-up input-navigation input-transparent',
'placeholder': _('Search by content')}))
changed_since = forms.DateField(label=_('Changed since'), required=False, input_formats=('%Y-%m-%d',),
widget=forms.DateInput(format='%Y-%m-%d',
attrs={'placeholder': _('Changed since'),
'class': 'fill-up input-navigation input-transparent '
'input-date'}))
def __init__(self, request, data, _all=False, **kwargs):
super(DnsRecordFilterForm, self).__init__(data, **kwargs)
domains = Domain.objects.order_by('name')
user, dc = request.user, request.dc
if request.GET.get('deleted', False):
domains = domains.exclude(access=Domain.INTERNAL)
else:
domains = domains.exclude(access__in=Domain.INVISIBLE)
if user.is_staff and _all:
domain_choices = [(d.name, d.name) for d in domains]
else:
dc_domain_ids = list(dc.domaindc_set.values_list('domain_id', flat=True))
domains = domains.filter(Q(id__in=dc_domain_ids) | Q(user=user.id))
domain_choices = [(d.name, d.name) for d in domains
if (user.is_staff or d.user == user.id or d.dc_bound == dc.id)]
self.fields['domain'].choices = domain_choices
def get_filters(self):
data = self.cleaned_data
query = []
_type = data.get('type')
if _type:
query.append(Q(type=_type))
name = data.get('name')
if name:
query.append(Q(name__icontains=name))
content = data.get('content')
if content:
query.append(Q(content__icontains=content))
changed_since = data.get('changed_since')
if changed_since:
query.append(Q(change_date__gte=changed_since.strftime('%s')))
if query:
return reduce(and_, query)
else:
return None
class DnsRecordForm(SerializerForm):
"""
Create, update or delete network DNS record.
"""
_ip = None
_api_call = dns_record
template = 'gui/dc/domain_record_form.html'
id = forms.IntegerField(label=_('ID'), required=True, widget=forms.HiddenInput())
name = forms.CharField(label=_('Name'), required=True,
help_text=_('The full URI the DNS server should pick up on.'),
widget=forms.TextInput(attrs=TEXT_INPUT_ATTRS))
content = forms.CharField(label=_('Content'), required=False,
# help_text=_('The answer of the DNS query.'),
widget=forms.TextInput(attrs={'class': 'input-transparent narrow'}))
type = forms.ChoiceField(label=_('Type'), required=True, choices=Record.TYPE_USED,
widget=forms.Select(attrs=SELECT_ATTRS))
ttl = forms.IntegerField(label=_('TTL'), required=False,
help_text=_('How long the DNS client is allowed to remember this record.'),
widget=NumberInput(attrs={'class': 'input-transparent narrow'}))
prio = forms.IntegerField(label=_('Priority'), required=False,
# help_text=_('Priority used by some record types.'),
widget=NumberInput(attrs={'class': 'input-transparent narrow'}))
disabled = forms.BooleanField(label=_('Disabled?'), required=False,
help_text=_('If set to true, this record is hidden from DNS clients.'),
widget=forms.CheckboxInput(attrs={'class': 'normal-check'}))
def __init__(self, request, domain, record, *args, **kwargs):
self.domain = domain
super(DnsRecordForm, self).__init__(request, record, *args, **kwargs)
def _initial_data(self, request, obj):
return obj.web_data
def api_call_args(self, domain_name):
if self.action == 'create':
return domain_name,
else:
return domain_name, self.cleaned_data['id']
class MultiDnsRecordForm(SerializerForm):
"""
Delete multiple DNS records at once.
"""
_api_call = dns_record_list
template = 'gui/dc/domain_records_form.html'
records = ArrayField(required=True, widget=forms.HiddenInput())
def __init__(self, request, domain, record, *args, **kwargs):
self.domain = domain
super(MultiDnsRecordForm, self).__init__(request, record, *args, **kwargs)
@staticmethod
def api_call_args(domain_name):
return domain_name,
| erigones/esdc-ce | gui/dc/dns/forms.py | Python | apache-2.0 | 10,134 |
# coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import mailmojo_sdk
from mailmojo_sdk.api.page_api import PageApi # noqa: E501
from mailmojo_sdk.rest import ApiException
class TestPageApi(unittest.TestCase):
"""PageApi unit test stubs"""
def setUp(self):
self.api = mailmojo_sdk.api.page_api.PageApi() # noqa: E501
def tearDown(self):
pass
def test_get_page_by_id(self):
"""Test case for get_page_by_id
Retrieve a landing page. # noqa: E501
"""
pass
def test_get_pages(self):
"""Test case for get_pages
Retrieve all landing pages. # noqa: E501
"""
pass
def test_update_page(self):
"""Test case for update_page
Update a landing page partially. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| eliksir/mailmojo-python-sdk | test/test_page_api.py | Python | apache-2.0 | 1,093 |
from logging import CRITICAL, getLevelName
from functools import wraps
from django.utils.decorators import available_attrs
from celery import states
from celery.utils.log import get_task_logger
from api.decorators import catch_exception
from api.task.utils import task_log
logger = get_task_logger(__name__)
class DetailLog(list):
"""
List-like object for collecting log lines for task log detail.
"""
def __init__(self, task_id, msg, obj=None):
super(DetailLog, self).__init__()
self.task_id = task_id
self.msg = msg
self.obj = obj
self.dc_id = None # Do not change this, unless you know what you are doing (the "vm_zoneid_changed" case)
def add(self, level, message):
return self.append((level, message))
def get_detail(self):
return '\n'.join('%s: %s' % (getLevelName(level), message) for level, message in self)
@catch_exception
def save(self, status):
"""Save task log entry if result is not None"""
if hasattr(status, '__iter__'):
status = [i for i in status if i is not None] # remove None from result
if status:
success = all(status)
else:
success = None
else:
success = status
if success is None:
return
if success:
task_status = states.SUCCESS
else:
task_status = states.FAILURE
task_log(self.task_id, self.msg, obj=self.obj, task_status=task_status, task_result=True,
detail=self.get_detail(), dc_id=self.dc_id, update_user_tasks=False)
def save_task_log(msg):
"""
Decorator used by monitoring tasks. It creates a unique list-like object for collecting monitoring logs and is
responsible for creating a task log entry after the monitoring task is finished.
"""
def wrap(fun):
@wraps(fun, assigned=available_attrs(fun))
def inner(task_id, sender, **kwargs):
logger.info('Primary task %s issued a secondary mgmt monitoring task %s', sender, task_id)
status = None
# Every monitoring task should collect logs
# NOTE: However, the monitoring task is responsible for setting up the object related to the log entry
kwargs['log'] = log = DetailLog(sender, msg)
try:
status = fun(task_id, sender, **kwargs)
except Exception as exc:
status = False
log.add(CRITICAL, exc)
raise exc
finally:
log.save(status)
return status
return inner
return wrap
| erigones/esdc-ce | api/mon/log.py | Python | apache-2.0 | 2,678 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import proboscis
from trove.tests.api import backups
from trove.tests.api import configurations
from trove.tests.api import databases
from trove.tests.api import datastores
from trove.tests.api import flavors
from trove.tests.api import instances
from trove.tests.api import instances_actions
from trove.tests.api.mgmt import accounts
from trove.tests.api.mgmt import admin_required
from trove.tests.api.mgmt import hosts
from trove.tests.api.mgmt import instances as mgmt_instances
from trove.tests.api.mgmt import storage
from trove.tests.api import replication
from trove.tests.api import root
from trove.tests.api import user_access
from trove.tests.api import users
from trove.tests.api import versions
GROUP_SERVICES_INITIALIZE = "services.initialize"
black_box_groups = [
flavors.GROUP,
users.GROUP,
user_access.GROUP,
databases.GROUP,
root.GROUP,
GROUP_SERVICES_INITIALIZE,
instances.GROUP_START,
instances.GROUP_QUOTAS,
instances.GROUP_SECURITY_GROUPS,
backups.GROUP,
replication.GROUP,
configurations.GROUP,
datastores.GROUP,
instances_actions.GROUP_RESIZE,
# TODO(SlickNik): The restart tests fail intermittently so pulling
# them out of the blackbox group temporarily. Refer to Trove bug:
# https://bugs.launchpad.net/trove/+bug/1204233
# instances_actions.GROUP_RESTART,
instances_actions.GROUP_STOP_MYSQL,
instances.GROUP_STOP,
versions.GROUP,
instances.GROUP_GUEST,
]
proboscis.register(groups=["blackbox", "mysql"],
depends_on_groups=black_box_groups)
simple_black_box_groups = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
admin_required.GROUP,
]
proboscis.register(groups=["simple_blackbox"],
depends_on_groups=simple_black_box_groups)
black_box_mgmt_groups = [
accounts.GROUP,
hosts.GROUP,
storage.GROUP,
instances_actions.GROUP_REBOOT,
admin_required.GROUP,
mgmt_instances.GROUP,
]
proboscis.register(groups=["blackbox_mgmt"],
depends_on_groups=black_box_mgmt_groups)
# Datastores groups for int-tests
datastore_group = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
]
proboscis.register(groups=["cassandra", "couchbase", "mongodb", "postgresql"],
depends_on_groups=datastore_group)
| changsimon/trove | trove/tests/int_tests.py | Python | apache-2.0 | 3,057 |
# Copyright (c) 2016 Dustin Doloff
# Licensed under Apache License v2.0
import jinja2
import os
MESSAGE_FILL = '`'
AUTO_GEN_MESSAGE = """
``````````````````````````````````````````````````````
``````````````````````````````````````````````````````
````````______________________________________ ``````
```````/ /\ `````
``````/ /..\ ````
`````/ AUTO-GENERATED FILE. DO NOT EDIT /....\ ```
````/ /______\ ``
```/_____________________________________/````````````
``````````````````````````````````````````````````````
``````````````````````````````````````````````````````
"""
def reverse(v):
"""
Reverses any iterable value
"""
return v[::-1]
def auto_gen_message(open, fill, close):
"""
Produces the auto-generated warning header with language-spcific syntax
open - str - The language-specific opening of the comment
fill - str - The values to fill the background with
close - str - The language-specific closing of the comment
"""
assert open or fill or close
message = AUTO_GEN_MESSAGE.strip()
if open:
message = message.replace(MESSAGE_FILL * len(open), open, 1)
if close:
message = reverse(reverse(message).replace(MESSAGE_FILL * len(close), close[::-1], 1))
if fill:
message = message.replace(MESSAGE_FILL * len(fill), fill)
return message
def generate(template, config, out_file, pretty=False):
path, ext = os.path.splitext(out_file.name)
ext = ext[1:]
if pretty:
if ext == 'py':
out_file.write(auto_gen_message('#', '#', ''))
elif ext == 'html':
out_file.write(auto_gen_message('<!--', '-', '-->'))
template_path, template_filename = os.path.split(template)
env = jinja2.Environment(loader = jinja2.FileSystemLoader([template_path]))
template = env.get_template(template_filename)
template.stream(config).dump(out_file)
# There needs to be an extra line at the end to make it a valid text file. Jinja strips trailing
# whitespace
if pretty:
out_file.write(os.linesep)
| quittle/bazel_toolbox | actions/scripts/jinja_helper.py | Python | apache-2.0 | 2,197 |
"""Additional form validators
"""
# future imports
from __future__ import absolute_import
# stdlib import
import re
from StringIO import StringIO
# third-party imports
from PIL import Image
from wtforms import ValidationError
from wtforms import validators
# Pulled from http://www.regular-expressions.info/email.html
email_re = re.compile(
r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
r"@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?",
re.IGNORECASE
)
def validate_email_address(form, field):
"""Validate a string email address against the email regex
"""
if (not isinstance(field.data, basestring) or
not email_re.search(field.data)):
raise ValidationError('Not a valid email address.')
def validate_image_format(form, field):
"""Use PIL to inspect an image, to see its format type.
"""
valid_formats = ['JPG', 'JPEG', 'PNG']
if len(field.raw_data):
if hasattr(field.raw_data[0], 'filename'):
try:
i = Image.open(StringIO(field.raw_data[0].value))
if i.format not in valid_formats:
raise ValidationError('Invalid image provided.')
except IOError:
raise ValidationError('Invalid image format found.')
def validate_image_size(width=None, height=None):
def _validate_image_size(form, field):
if len(field.raw_data):
if hasattr(field.raw_data[0], 'filename'):
try:
i = Image.open(StringIO(field.raw_data[0].value))
if (width and height) and ((width, height) != i.size):
raise ValidationError(
'Image must be {}x{}, found {}x{}.'.format(
width,
height,
i.size[0],
i.size[1]
)
)
elif width and width != i.size[0]:
raise ValidationError(
'Image must be {}px in width, found {}px.'.format(
width,
i.size[0]
)
)
elif height and height != i.size[1]:
raise ValidationError(
'Image must be {}px in height, found {}px.'.format(
height,
i.size[1]
)
)
except IOError:
raise ValidationError('Invalid image format found.')
return _validate_image_size
class RequiredIf(validators.Required):
"""A validator which makes a field required if
another field is set and has a truthy value.
"""
other_field_name = None
exta_validators = []
def __init__(self, other_field_name, *args, **kwargs):
self.other_field_name = other_field_name
self.exta_validators = args
super(RequiredIf, self).__init__(*args, **kwargs)
def __call__(self, form, field):
other_field = form._fields.get(self.other_field_name)
if other_field is None:
raise Exception(
'no field named "%s" in form' % self.other_field_name)
if bool(other_field.data):
super(RequiredIf, self).__call__(form, field)
for val in self.exta_validators:
val.__call__(form, field)
| mjmcconnell/sra | src-server/app/forms/utils/validators.py | Python | apache-2.0 | 3,603 |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.contrib import admin
# Register your models here.
| isb-cgc/ISB-CGC-Webapp | seqpeek/admin.py | Python | apache-2.0 | 666 |
# coding: pyxl
import unittest2
from pyxl import html
from pyxl.base import PyxlException, x_base
from pyxl.element import x_element
class PyxlTests(unittest2.TestCase):
def test_basics(self):
self.assertEqual(<div />.to_string(), '<div></div>')
self.assertEqual(<img src="blah" />.to_string(), '<img src="blah" />')
self.assertEqual(<div class="c"></div>.to_string(), '<div class="c"></div>')
self.assertEqual(<div><span></span></div>.to_string(), '<div><span></span></div>')
self.assertEqual(<frag><span /><span /></frag>.to_string(), '<span></span><span></span>')
def test_escaping(self):
self.assertEqual(<div class="&">&{'&'}</div>.to_string(), '<div class="&">&&</div>')
self.assertEqual(<div>{html.rawhtml('&')}</div>.to_string(), '<div>&</div>')
def test_comments(self):
pyxl = (
<div
class="blah" # attr comment
> # comment1
<!-- comment2 -->
text# comment3
# comment4
</div>)
self.assertEqual(pyxl.to_string(), '<div class="blah">text</div>')
def test_cond_comment(self):
s = 'blahblah'
self.assertEqual(
<cond_comment cond="lt IE 8"><div class=">">{s}</div></cond_comment>.to_string(),
'<!--[if lt IE 8]><div class=">">blahblah</div><![endif]-->')
self.assertEqual(
<cond_comment cond="(lt IE 8) & (gt IE 5)"><div>{s}</div></cond_comment>.to_string(),
'<!--[if (lt IE 8) & (gt IE 5)]><div>blahblah</div><![endif]-->')
def test_decl(self):
self.assertEqual(
<script><![CDATA[<div><div>]]></script>.to_string(),
'<script><![CDATA[<div><div>]]></script>')
def test_form_error(self):
self.assertEqual(
<form_error name="foo" />.to_string(),
'<form:error name="foo" />')
def test_enum_attrs(self):
class x_foo(x_base):
__attrs__ = {
'value': ['a', 'b'],
}
def _to_list(self, l):
pass
self.assertEqual(<foo />.attr('value'), 'a')
self.assertEqual(<foo />.value, 'a')
self.assertEqual(<foo value="b" />.attr('value'), 'b')
self.assertEqual(<foo value="b" />.value, 'b')
with self.assertRaises(PyxlException):
<foo value="c" />
class x_bar(x_base):
__attrs__ = {
'value': ['a', None, 'b'],
}
def _to_list(self, l):
pass
with self.assertRaises(PyxlException):
<bar />.attr('value')
with self.assertRaises(PyxlException):
<bar />.value
class x_baz(x_base):
__attrs__ = {
'value': [None, 'a', 'b'],
}
def _to_list(self, l):
pass
self.assertEqual(<baz />.value, None)
def test_render_args_are_added_to_pyxl_attributes(self):
class x_foo(x_element):
def render(self, value: int):
return <span>{value}</span>
self.assertEqual(<foo />.value, None)
self.assertEqual(<foo value="10" />.value, 10)
with self.assertRaises(PyxlException):
<foo value="boo" />.value
self.assertEqual(<foo value="11" />.to_string(), '<span>11</span>')
def test_render_arg_supports_enum(self):
class x_foo(x_element):
def render(self, value: ['a', 'b']):
return <span>{value}</span>
self.assertEqual(<foo />.value, 'a')
self.assertEqual(<foo value="b" />.value, 'b')
with self.assertRaises(PyxlException):
<foo value="c" />.value
def test_render_arg_without_annotation(self):
class x_foo(x_element):
def render(self, value):
return <span>{value}</span>
self.assertEqual(<foo />.to_string(), '<span></span>')
self.assertEqual(<foo value="123" />.to_string(), '<span>123</span>')
self.assertEqual(<foo value="boo" />.to_string(), '<span>boo</span>')
def test_underscore_in_render_arg(self):
class x_foo(x_element):
def render(self, a_b_c: int):
return <span>{a_b_c}</span>
self.assertEqual(<foo a_b_c="1" />.to_string(), '<span>1</span>')
def test_attr_collision(self):
with self.assertRaises(PyxlException):
class x_foo(x_element):
__attrs__ = {
'laughing': object
}
def render(self, laughing):
pass
def test_validate_attrs(self):
class x_foo(x_element):
__validate_attrs__ = False
def render(self):
return <span>{self.anything}</span>
self.assertEqual(<foo anything="yep" />.to_string(), '<span>yep</span>')
class x_bar(x_element):
__validate_attrs__ = True
def render(self):
return <span>{self.anything}</span>
with self.assertRaises(PyxlException):
<bar anything="nope" />
class x_baz(x_element):
def render(self):
return <span>{self.anything}</span>
with self.assertRaises(PyxlException):
<baz anything="nope" />
if __name__ == '__main__':
unittest2.main()
| lez/pyxl3 | tests/test_basic.py | Python | apache-2.0 | 5,433 |
"""A simple example of Google Analytics batched user permissions."""
import json
from apiclient.errors import HttpError
from apiclient.http import BatchHttpRequest
def call_back(request_id, response, exception):
"""Handle batched request responses."""
print request_id
if exception is not None:
if isinstance(exception, HttpError):
message = json.loads(exception.content)['error']['message']
print ('Request %s returned API error : %s : %s ' %
(request_id, exception.resp.status, message))
else:
print response
def add_users(users, permissions):
"""Adds users to every view (profile) with the given permissions.
Args:
users: A list of user email addresses.
permissions: A list of user permissions.
Note: this code assumes you have MANAGE_USERS level permissions
to each profile and an authorized Google Analytics service object.
"""
# Get the a full set of account summaries.
account_summaries = analytics.management().accountSummaries().list().execute()
# Loop through each account.
for account in account_summaries.get('items', []):
account_id = account.get('id')
# Loop through each user.
for user in users:
# Create the BatchHttpRequest object.
batch = BatchHttpRequest(callback=call_back)
# Loop through each property.
for property_summary in account.get('webProperties', []):
property_id = property_summary.get('id')
# Loop through each view (profile).
for view in property_summary.get('profiles', []):
view_id = view.get('id')
# Construct the Profile User Link.
link = analytics.management().profileUserLinks().insert(
accountId=account_id,
webPropertyId=property_id,
profileId=view_id,
body={
'permissions': {
'local': permissions
},
'userRef': {
'email': user
}
}
)
batch.add(link)
# Execute the batch request for each user.
batch.execute()
if __name__ == '__main__':
# Construct a list of users.
emails = ['[email protected]', '[email protected]', '[email protected]', '[email protected]']
# call the add_users function with the list of desired permissions.
add_users(emails, ['READ_AND_ANALYZE'])
| mcohoon/api-samples | batching/permissions.py | Python | apache-2.0 | 2,389 |
from sys import maxsize
class Contact:
def __init__(self, Firstname=None, Middlename=None, Lastname=None, Nickname=None, Title=None, Company=None, Address=None, Home=None, Mobile=None, Work=None,
Fax=None, Email=None, Email2=None, Email3=None, Homepage=None, Bday=None, Bmonth=None, Byear=None, Aday=None, Amonth=None, Ayear=None, Address2=None, Phone2=None,
Notes=None, id=None, all_phones_from_home_page=None, all_address_from_home_page=None, all_emails=None):
self.Firstname = Firstname
self.Middlename = Middlename
self.Lastname = Lastname
self.Nickname = Nickname
self.Title = Title
self.Company = Company
self.Address = Address
self.Home = Home
self.Mobile = Mobile
self.Work = Work
self.Fax = Fax
self.Email = Email
self.Email2 = Email2
self.Email3 = Email3
self.Homepage = Homepage
self.Bday = Bday
self.Bmonth = Bmonth
self.Byear = Byear
self.Aday = Aday
self.Amonth = Amonth
self.Ayear = Ayear
self.Address2 = Address2
self.Phone2 = Phone2
self.Notes = Notes
self.id = id
self.all_phones_from_home_page = all_phones_from_home_page
self.all_address_from_home_page = all_address_from_home_page
self.all_emails=all_emails
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.Firstname == other.Firstname and self.Lastname == other.Lastname
def __repr__(self):
return "%s:%s;%s" % (self.Firstname, self.Lastname, self.Middlename)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| IrinaZI/Python_training | model/contact.py | Python | apache-2.0 | 1,808 |
from setuptools import setup
setup(name='mock_labels', version='0.0.1', packages=['mock_labels'])
| cloudify-cosmo/cloudify-manager | tests/integration_tests_plugins/mock_labels/setup.py | Python | apache-2.0 | 99 |
import json
from unit.http import TestHTTP
from unit.option import option
http = TestHTTP()
def check_chroot():
available = option.available
resp = http.put(
url='/config',
sock_type='unix',
addr=option.temp_dir + '/control.unit.sock',
body=json.dumps(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [
{
"action": {
"share": option.temp_dir,
"chroot": option.temp_dir,
}
}
],
}
),
)
if 'success' in resp['body']:
available['features']['chroot'] = True
| nginx/unit | test/unit/check/chroot.py | Python | apache-2.0 | 748 |
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from hybrid_a_star_python_interface import *
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import animation
import numpy as np
import time
import math
def HybridAStarPlan(visualize_flag):
# initialze object
HybridAStar = HybridAStarPlanner()
# parameter(except max, min and car size is defined in proto)
num_output_buffer = 100000
sx = -8
sy = 4
sphi = 0.0
scenario = "backward"
# scenario = "parallel"
if scenario == "backward":
# for parking space 11543 in sunnyvale_with_two_offices
left_boundary_x = (
c_double * 3)(*[-13.6407054776, 0.0, 0.0515703622475])
left_boundary_y = (
c_double * 3)(*[0.0140634663703, 0.0, -5.15258191624])
down_boundary_x = (c_double * 2)(*[0.0515703622475, 2.8237895441])
down_boundary_y = (c_double * 2)(*[-5.15258191624, -5.15306980547])
right_boundary_x = (
c_double * 3)(*[2.8237895441, 2.7184833539, 16.3592013995])
right_boundary_y = (
c_double * 3)(*[-5.15306980547, -0.0398078878812, -0.011889513383])
up_boundary_x = (c_double * 2)(*[16.3591910364, -13.6406951857])
up_boundary_y = (c_double * 2)(*[5.60414234644, 5.61797800844])
# obstacles(x, y, size)
HybridAStar.AddVirtualObstacle(left_boundary_x, left_boundary_y, 3)
HybridAStar.AddVirtualObstacle(
down_boundary_x, down_boundary_y, 2)
HybridAStar.AddVirtualObstacle(
right_boundary_x, right_boundary_y, 3)
HybridAStar.AddVirtualObstacle(
up_boundary_x, up_boundary_y, 2)
ex = 1.359
ey = -3.86443643718
ephi = 1.581
XYbounds = [-13.6406951857, 16.3591910364, -
5.15258191624, 5.61797800844]
x = (c_double * num_output_buffer)()
y = (c_double * num_output_buffer)()
phi = (c_double * num_output_buffer)()
v = (c_double * num_output_buffer)()
a = (c_double * num_output_buffer)()
steer = (c_double * num_output_buffer)()
size = (c_ushort * 1)()
XYbounds_ctype = (c_double * 4)(*XYbounds)
start = time.time()
print("planning start")
success = True
if not HybridAStar.Plan(sx, sy, sphi, ex, ey, ephi, XYbounds_ctype):
print("planning fail")
success = False
end = time.time()
planning_time = end - start
print("planning time is " + str(planning_time))
# load result
x_out = []
y_out = []
phi_out = []
v_out = []
a_out = []
steer_out = []
if visualize_flag and success:
HybridAStar.GetResult(x, y, phi, v, a, steer, size)
for i in range(0, size[0]):
x_out.append(float(x[i]))
y_out.append(float(y[i]))
phi_out.append(float(phi[i]))
v_out.append(float(v[i]))
a_out.append(float(a[i]))
steer_out.append(float(steer[i]))
# plot
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
for i in range(0, size[0]):
downx = 1.055 * math.cos(phi_out[i] - math.pi / 2)
downy = 1.055 * math.sin(phi_out[i] - math.pi / 2)
leftx = 1.043 * math.cos(phi_out[i] - math.pi)
lefty = 1.043 * math.sin(phi_out[i] - math.pi)
x_shift_leftbottom = x_out[i] + downx + leftx
y_shift_leftbottom = y_out[i] + downy + lefty
car = patches.Rectangle((x_shift_leftbottom, y_shift_leftbottom), 3.89 + 1.043, 1.055*2,
angle=phi_out[i] * 180 / math.pi, linewidth=1, edgecolor='r', facecolor='none')
arrow = patches.Arrow(
x_out[i], y_out[i], 0.25*math.cos(phi_out[i]), 0.25*math.sin(phi_out[i]), 0.2)
ax.add_patch(car)
ax.add_patch(arrow)
ax.plot(sx, sy, "s")
ax.plot(ex, ey, "s")
if scenario == "backward":
left_boundary_x = [-13.6407054776, 0.0, 0.0515703622475]
left_boundary_y = [0.0140634663703, 0.0, -5.15258191624]
down_boundary_x = [0.0515703622475, 2.8237895441]
down_boundary_y = [-5.15258191624, -5.15306980547]
right_boundary_x = [2.8237895441, 2.7184833539, 16.3592013995]
right_boundary_y = [-5.15306980547, -0.0398078878812, -0.011889513383]
up_boundary_x = [16.3591910364, -13.6406951857]
up_boundary_y = [5.60414234644, 5.61797800844]
ax.plot(left_boundary_x, left_boundary_y, "k")
ax.plot(down_boundary_x, down_boundary_y, "k")
ax.plot(right_boundary_x, right_boundary_y, "k")
ax.plot(up_boundary_x, up_boundary_y, "k")
plt.axis('equal')
fig2 = plt.figure(2)
v_graph = fig2.add_subplot(311)
v_graph.title.set_text('v')
v_graph.plot(np.linspace(0, size[0], size[0]), v_out)
a_graph = fig2.add_subplot(312)
a_graph.title.set_text('a')
a_graph.plot(np.linspace(0, size[0], size[0]), a_out)
steer_graph = fig2.add_subplot(313)
steer_graph.title.set_text('steering')
steer_graph.plot(np.linspace(0, size[0], size[0]), steer_out)
plt.show()
if not visualize_flag :
if success :
HybridAStar.GetResult(x, y, phi, v, a, steer, size)
for i in range(0, size[0]):
x_out.append(float(x[i]))
y_out.append(float(y[i]))
phi_out.append(float(phi[i]))
v_out.append(float(v[i]))
a_out.append(float(a[i]))
steer_out.append(float(steer[i]))
return success, x_out, y_out, phi_out, v_out, a_out, steer_out, planning_time
if __name__ == '__main__':
visualize_flag = True
HybridAStarPlan(visualize_flag)
| msbeta/apollo | modules/tools/open_space_visualization/hybrid_a_star_visualizer.py | Python | apache-2.0 | 6,581 |
import os
import subprocess
SSH_OPTIONS = ['-o', 'StrictHostKeyChecking=no', '-o', 'PreferredAuthentications=publickey', '-o', 'PubkeyAuthentication=yes']
def rsync_get_file(uri_from, uri_to, user, host, port, key):
cmd = [
'rsync',
'-e',
'ssh -i {} -p {} {}'.format(key, port, ' '.join(SSH_OPTIONS)),
'{}@{}:{}'.format(user, host, uri_from),
uri_to,
]
_call(cmd)
def rsync_post_file(uri_from, uri_to, user, host, port, key):
_ensure_dir(uri_to, key, port, user, host)
cmd = [
'rsync',
'-e',
'ssh -i {} -p {} {}'.format(key, port, ' '.join(SSH_OPTIONS)),
uri_from,
'{}@{}:{}'.format(user, host, uri_to),
]
_call(cmd)
def scp_get_file(uri_from, uri_to, user, host, port, key):
cmd = [
'scp',
'-P', str(port),
'-i', key
] + SSH_OPTIONS + [
'{}@{}:{}'.format(user, host, uri_from),
uri_to,
]
_call(cmd)
def scp_post_file(uri_from, uri_to, user, host, port, key):
_ensure_dir(uri_to, key, port, user, host)
cmd = [
'scp',
'-P', str(port),
'-i', key,
] + SSH_OPTIONS + [
uri_from,
'{}@{}:{}'.format(user, host, uri_to),
]
_call(cmd)
def _ensure_dir(uri_to, key, port, user, host):
directory = os.path.dirname(uri_to)
cmd = [
'ssh',
'-i', key,
'-p', str(port),
] + SSH_OPTIONS + [
'{}@{}'.format(user, host),
'mkdir', '-p', directory,
]
_call(cmd)
def _call(cmd):
exit_code = subprocess.check_call(cmd)
if exit_code != 0:
raise Exception("{} exited with code {}".format(cmd[0], exit_code))
___all__ = [
'rsync_post_file',
'rsync_get_file',
'scp_post_file',
'scp_get_file'
]
| galaxyproject/pulsar | pulsar/client/transport/ssh.py | Python | apache-2.0 | 1,799 |
# Copyright (c) 2017-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .operations import upload_blueprint # NOQA
from .operations import delete # NOQA
from .operations import create # NOQA
from .operations import execute_start # NOQA
from .operations import refresh # NOQA
| cloudify-cosmo/cloudify-manager | cloudify_types/cloudify_types/component/__init__.py | Python | apache-2.0 | 832 |
#!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Quiz."""
# Python imports
import base64
import logging
import md5
import operator
import os
import re
import time
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import memcache
class QuizBaseModel(db.Model):
"""Base class for quiz models."""
class QuizTrunkModel(QuizBaseModel):
"""Maintains trunk for quiz model.
Attributes:
head: Maintians the head of a quiz.
"""
head = db.StringProperty()
class QuizRevisionModel(QuizBaseModel):
"""Maintains list of revisions for a quiz.
Quiz trunk associated with the revision is made parent of the model.
Attributes:
quiz_id: Id (key) for particular version of the quiz.
time_stamp: Time_stamp for a new revision.
commit_message: Commit message associated with new version.
"""
quiz_id = db.StringProperty()
time_stamp = db.DateTimeProperty(auto_now=True)
commit_message = db.StringProperty(default='Commiting a new version')
class QuizPropertyModel(QuizBaseModel):
"""Defines various properties for a quiz.
Attributes:
shuffle_questions: If set questions are presented in random order.
min_options: minimum number of options to be presented.
max_options: maximum number of options to be presented.
min_questions: minimum number of questions required to complete the quiz.
Used to track the progress.
repeat_questions: If set questions are repeated.
repeat_wrongly_answered_questions: If set wrongly answered questions are
repeated.
"""
shuffle_questions = db.BooleanProperty(default=True)
min_options = db.IntegerProperty(default=2)
max_options = db.IntegerProperty(default=10) # 0 implies all
min_questions = db.IntegerProperty(default=0) # 0 implies all
repeat_questions = db.BooleanProperty(default=False)
repeat_wrongly_answered_questions = db.BooleanProperty(default=False)
class QuizModel(QuizBaseModel):
"""Represents a quiz.
Attributes:
difficulty_level: Difficulty level for the quiz (range 0-10).
quiz_property: Reference to property asscociated with quiz.
title: Title of the quiz.
tags: Associated tags with quiz.
trunk: Reference to asscociated trunk with the quiz.
introduction: Introduction text to be shown on the start page for quiz.
"""
# implicit id
difficulty_level = db.RatingProperty(default=5)
quiz_property = db.ReferenceProperty(QuizPropertyModel)
title = db.StringProperty()
tags = db.ListProperty(db.Category)
trunk = db.ReferenceProperty(QuizTrunkModel)
introduction = db.StringProperty()
class ChoiceModel(QuizBaseModel):
"""Represents a choice/option provided to user for a question model.
Attributes:
body: Body of the choice.
message: Message to be displayed when choice is selected.
May act like a hint.
is_correct: If the choice selected is correct.
"""
# implicit id
body = db.TextProperty()
message = db.StringProperty()
is_correct = db.BooleanProperty(default=False)
def dump_to_dict(self):
"""Dumps choice to a dictionary for passing around as JSON object."""
data_dict = {'body': self.body,
'id': str(self.key())}
return data_dict
class QuestionModel(QuizBaseModel):
"""Represents a question.
Attributes:
body: Text asscociated with quiz.
choices: List of possible choices.
shuffle_choices: If set choices are randomly shuffled.
hints: Ordered list of progressive hints
"""
# implicit id
body = db.TextProperty()
choices = db.ListProperty(db.Key)
shuffle_choices = db.BooleanProperty(default=True)
hints = db.StringListProperty()
def dump_to_dict(self):
"""Dumps the question model to a dictionary for passing
around as JSON object."""
data_dict = {'id': str(self.key()),
'body': self.body,
'hints': self.hints,
'choices': [db.get(el).dump_to_dict() for el in self.choices]
}
if self.shuffle_choices and data_dict['choices']:
data_dict['choices'] = random.shuffle(data_dict['choices'])
return data_dict
class QuizQuestionListModel(QuizBaseModel):
"""Maintains a list of question with its quiz id.
This is necessary because questions may be shared between different quizes.
Attributes:
quiz: Reference to quiz object.
question: Reference to question object asscociated with quiz.
time_stamp: Time stamp.
"""
quiz = db.ReferenceProperty(QuizModel)
question = db.ReferenceProperty(QuestionModel)
time_stamp = db.DateTimeProperty(auto_now_add=True)
class ResponseModel(QuizBaseModel):
"""Stores response data required for producing next question.
Attributes:
session_id: Session Identifier.
answered_correctly: Set if the response resulted in correct answer.
question: Reference to question being answered.
quiz: Reference to associated quiz.
quiz_trunk: Reference to associated quiz trunk.
time_stamp: Time stamp of the response
attempts: Number of attempts so far, useful for scoring.
"""
session_id = db.StringProperty(required=True)
answered_correctly = db.BooleanProperty(db.Key)
question = db.ReferenceProperty(QuestionModel)
quiz = db.ReferenceProperty(QuizModel)
quiz_trunk = db.ReferenceProperty(QuizTrunkModel)
time_stamp = db.DateTimeProperty(auto_now=True)
attempts = db.IntegerProperty(default=0)
class QuizScoreModel(QuizBaseModel):
"""Stores progress status associated with a quiz and session.
Both score and progress are out of 100.
Attributes:
session_id: Session Identifier.
quiz: Reference to associated quiz.
quiz_trunk: Reference to associated quiz trunk.
score: Current score.
progress: Current progress status
questions_attempted: Number of questions attempted so far.
"""
quiz_trunk = db.ReferenceProperty(QuizTrunkModel)
session_id = db.StringProperty(required=True)
quiz = db.ReferenceProperty(QuizModel)
score = db.FloatProperty(default=0.0)
progress = db.FloatProperty(default=0.0)
questions_attempted = db.IntegerProperty(default=0)
| arjunsatyapal/lantern | demo1/quiz/models.py | Python | apache-2.0 | 6,723 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class BatchNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm(self):
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 2, 4, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float32')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_batchnorm_float16(self):
_run_batchnorm_correctness_test(
normalization.BatchNormalization, dtype='float16')
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision(self):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(4, 4, 3),
momentum=0.8,
dtype=policy.Policy('mixed_float16'))
x = np.random.normal(size=(10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(norm.beta.dtype.base_dtype, 'float32')
self.assertEqual(norm.gamma.dtype.base_dtype, 'float32')
@combinations.generate(combinations.combine(mode=['graph', 'eager'],
fused=[True, False]))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision_does_not_overflow(self, fused):
norm = keras.layers.BatchNormalization(
axis=-1,
input_shape=(1, 1, 1),
fused=fused,
dtype=policy.Policy('mixed_float16'))
x = np.array([-1000., 1000.]).reshape((2, 1, 1, 1))
y = norm(x, training=True)
expected_y = np.array([-1.0, 1.0]).reshape((2, 1, 1, 1))
self.assertAllClose(keras.backend.eval(y), expected_y)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
# We use the same data shape for all the data we use in this test.
# This will prevent any used tf.functions from retracing.
# This helps us verify that changing trainable and recompiling really
# does update the training loop, rather than a different data shape
# triggering a retrace.
data_shape = (100, 3)
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.random.random(data_shape), np.random.random(data_shape))
test_data = np.random.random(data_shape)
test_targets = np.random.random(data_shape)
test_loss = model.evaluate(test_data, test_targets)
bn.trainable = False
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_eager_batchnorm_in_custom_model_call_with_tf_function(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.bn = keras.layers.BatchNormalization()
@def_function.function()
def call(self, x, training):
return self.bn(x, training=training)
model = MyModel()
for _ in range(10):
x = constant_op.constant(0.5, shape=[1, 1])
model(x, training=True)
# Make sure the moving mean and variance have been updated
self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3)
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
class BatchNormalizationV1Test(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v1_fused_attribute(self):
norm = normalization.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2(self):
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': True},
input_shape=(3, 3, 3, 3))
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': None},
input_shape=(3, 3, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v2_fused_attribute(self):
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegex(ValueError, 'fused.*renorm'):
normalization_v2.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegex(ValueError, 'fused.*virtual_batch_size'):
normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2)
with self.assertRaisesRegex(ValueError, 'fused.*adjustment'):
normalization_v2.BatchNormalization(fused=True,
adjustment=lambda _: (1, 0))
norm = normalization_v2.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegex(ValueError, '4D or 5D input tensors'):
norm(inp)
def test_updates_in_wrap_function(self):
def my_func():
layer = normalization.BatchNormalization()
x = array_ops.ones((10, 1))
y = layer(x, training=True)
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
return y
wrapped_fn = wrap_function.wrap_function(my_func, [])
wrapped_fn()
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2_none_shape_and_virtual_batch_size(self):
# Test case for GitHub issue for 32380
norm = normalization_v2.BatchNormalization(virtual_batch_size=8)
inp = keras.layers.Input(shape=(None, None, 3))
_ = norm(inp)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused)
model.add(norm)
if dtype == 'float16':
# Keras models require float32 losses.
model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
@parameterized.parameters(
[normalization.BatchNormalization, normalization_v2.BatchNormalization])
class NormalizationLayersGraphModeOnlyTest(
test.TestCase, parameterized.TestCase):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with ops.Graph().as_default(), self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda x: math_ops.cast(x, dtype='float16')))
norm = layer(input_shape=(2, 2, 2), dtype=dtype)
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class LayerNormalizationTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2, -1)},
input_shape=(2, 8, 8, 3))
@keras_parameterized.run_all_keras_modes
def test_non_fused_layernorm(self):
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': -2},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -2)},
input_shape=(2, 8, 8, 3))
testing_utils.layer_test(
keras.layers.LayerNormalization,
kwargs={'axis': (-3, -1)},
input_shape=(2, 8, 8, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 0)
layer = keras.layers.LayerNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.LayerNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@keras_parameterized.run_all_keras_modes
def test_layernorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
model.add(norm)
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@keras_parameterized.run_all_keras_modes
def test_layernorm_correctness(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_layernorm_mixed_precision(self):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float16')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIncorrectAxisType(self):
with self.assertRaisesRegex(TypeError,
r'Expected an int or a list/tuple of ints'):
_ = normalization.LayerNormalization(axis={'axis': -1})
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidAxis(self):
with self.assertRaisesRegex(ValueError, r'Invalid axis: 3'):
layer_norm = normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDuplicateAxis(self):
with self.assertRaisesRegex(ValueError, r'Duplicate axis:'):
layer_norm = normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testFusedAttr(self):
layer_norm = normalization.LayerNormalization(axis=[-2, -1])
layer_norm.build(input_shape=(2, 2, 2))
self.assertEqual(layer_norm._fused, True)
class LayerNormalizationNumericsTest(keras_parameterized.TestCase):
"""Tests LayerNormalization has correct and numerically stable outputs."""
def _expected_layer_norm(self, x, beta, gamma, batch_input_shape, axis,
epsilon):
"""Returns the layer norm, which is computed using NumPy."""
broadcast_shape = [batch_input_shape[i] if i in axis else 1
for i in range(len(batch_input_shape))]
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
expected = (x - mean) / np.sqrt(var + epsilon)
expected *= np.reshape(gamma, broadcast_shape)
expected += np.reshape(beta, broadcast_shape)
return expected
def _test_forward_pass(self, batch_input_shape, axis, fp64_tol=1e-14,
fp32_tol=1e-6, fp16_tol=1e-2):
"""Tests the forward pass of layer normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of LayerNormalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
expected = self._expected_layer_norm(x, beta, gamma, batch_input_shape,
axis, epsilon)
for dtype in 'float64', 'float32', 'float16':
norm = normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
y = norm(keras.backend.cast(x, dtype))
actual = keras.backend.eval(y)
if dtype == 'float64':
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_forward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_forward_pass((4, 3), (0,))
self._test_forward_pass((3, 4), (1,))
self._test_forward_pass((4, 3, 2), (0,))
self._test_forward_pass((2, 4, 2), (1,))
self._test_forward_pass((2, 3, 4), (2,), fp16_tol=5e-2)
self._test_forward_pass((2, 3, 2), (0, 2))
self._test_forward_pass((2, 2, 2, 2), (1, 3))
self._test_forward_pass((2, 2, 2, 2), (2, 3))
self._test_forward_pass((2, 3, 4, 5), (3,))
def _test_backward_pass(self, batch_input_shape, axis, fp64_tol=1e-5,
fp32_tol=1e-5, fp16_tol=2e-2):
"""Tests the backwards pass of layer normalization.
Args:
batch_input_shape: The input shape that will be used to test, including
the batch dimension.
axis: A list of axises to normalize. Will be passed to the `axis` argument
of LayerNormalization.
fp64_tol: The relative and absolute tolerance for float64.
fp32_tol: The relative and absolute tolerance for float32.
fp16_tol: The relative and absolute tolerance for float16.
"""
param_shape = [batch_input_shape[i] for i in axis]
param_elems = 1
for dim in param_shape:
param_elems *= dim
beta = np.arange(param_elems, dtype='float64').reshape(param_shape)
gamma = np.arange(1, param_elems + 1, dtype='float64').reshape(param_shape)
x = np.random.normal(size=batch_input_shape)
for epsilon in 1e-12, 1e-3:
# Float64 must come first in this list, as we use the float64 numerical
# gradients to compare to the float32 and float16 symbolic gradients as
# well. Computing float32/float16 numerical gradients is too numerically
# unstable.
for dtype in 'float64', 'float32', 'float16':
norm = normalization.LayerNormalization(
axis=axis, dtype=dtype, batch_input_shape=batch_input_shape,
epsilon=epsilon, beta_initializer=keras.initializers.constant(beta),
gamma_initializer=keras.initializers.constant(gamma))
norm.build(x.shape)
# pylint: disable=cell-var-from-loop
def forward_fn(x, beta, gamma):
# We must monkey-patch the attributes of `norm` with the function
# arguments, so that the gradient checker will properly compute their
# gradients. The gradient checker computes gradients with respect to
# the input arguments of `f`.
with test.mock.patch.object(norm, 'beta', beta):
with test.mock.patch.object(norm, 'gamma', gamma):
return norm(x)
# pylint: enable=cell-var-from-loop
results = gradient_checker_v2.compute_gradient(
forward_fn, [keras.backend.cast(x, dtype), norm.beta, norm.gamma])
([x_grad_t, beta_grad_t, gamma_grad_t],
[x_grad_n, beta_grad_n, gamma_grad_n]) = results
if dtype == 'float64':
# We use the float64 numeric gradients as the reference, to compare
# against the symbolic gradients for all dtypes.
x_grad_ref = x_grad_n
beta_grad_ref = beta_grad_n
gamma_grad_ref = gamma_grad_n
tol = fp64_tol
elif dtype == 'float32':
tol = fp32_tol
else:
assert dtype == 'float16'
tol = fp16_tol
# We use absolute tolerances in addition to relative tolerances, because
# some of the values are very close to zero.
self.assertAllClose(x_grad_t, x_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(beta_grad_t, beta_grad_ref, rtol=tol, atol=tol)
self.assertAllClose(gamma_grad_t, gamma_grad_ref, rtol=tol, atol=tol)
# The gradient_checker_v2 does not work properly with LayerNorm in graph mode.
@testing_utils.run_v2_only
def test_backward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.
self._test_backward_pass((4, 3), (0,))
self._test_backward_pass((2, 4, 2), (1,))
self._test_backward_pass((2, 3, 4), (2,))
self._test_backward_pass((2, 3, 2), (0, 2), fp64_tol=5e-4, fp32_tol=5e-4)
self._test_backward_pass((2, 2, 2, 2), (1, 3))
self._test_backward_pass((2, 2, 2, 2), (2, 3))
if __name__ == '__main__':
test.main()
| karllessard/tensorflow | tensorflow/python/keras/layers/normalization_test.py | Python | apache-2.0 | 30,233 |
import numpy as np
from openfermioncirq.experiments.hfvqe.gradient_hf import (rhf_func_generator,
rhf_minimization)
from openfermioncirq.experiments.hfvqe.molecular_example import make_h6_1_3
def test_rhf_func_gen():
rhf_objective, molecule, parameters, _, _ = make_h6_1_3()
ansatz, energy, _ = rhf_func_generator(rhf_objective)
assert np.isclose(molecule.hf_energy, energy(parameters))
ansatz, energy, _, opdm_func = rhf_func_generator(
rhf_objective, initial_occ_vec=[1] * 3 + [0] * 3, get_opdm_func=True)
assert np.isclose(molecule.hf_energy, energy(parameters))
test_opdm = opdm_func(parameters)
u = ansatz(parameters)
initial_opdm = np.diag([1] * 3 + [0] * 3)
final_odpm = u @ initial_opdm @ u.T
assert np.allclose(test_opdm, final_odpm)
result = rhf_minimization(rhf_objective, initial_guess=parameters)
assert np.allclose(result.x, parameters)
| quantumlib/OpenFermion-Cirq | openfermioncirq/experiments/hfvqe/gradient_hf_test.py | Python | apache-2.0 | 942 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import contextlib
import copy
import uuid
import mock
import mox
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo.config import cfg
import six
from nova.compute import flavors
from nova import context
from nova import exception
from nova.network import model
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.pci import pci_manager
from nova.pci import pci_whitelist
from nova import policy
from nova import test
from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
# NOTE: Neutron client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make neutron client throw a custom
# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in lhs.iteritems():
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestNeutronClient(test.TestCase):
def test_withtoken(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=CONF.neutron.auth_strategy,
endpoint_url=CONF.neutron.url,
token=my_context.auth_token,
timeout=CONF.neutron.url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
neutronv2.get_client(my_context)
def test_withouttoken(self):
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
def test_withtoken_context_is_admin(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token',
is_admin=True)
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=CONF.neutron.auth_strategy,
endpoint_url=CONF.neutron.url,
token=my_context.auth_token,
timeout=CONF.neutron.url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
# Note that although we have admin set in the context we
# are not asking for an admin client, and so we auth with
# our own token
neutronv2.get_client(my_context)
def test_withouttoken_keystone_connection_error(self):
self.flags(auth_strategy='keystone', group='neutron')
self.flags(url='http://anyhost/', group='neutron')
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
neutronv2.get_client,
my_context)
def test_reuse_admin_token(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = 'new_token'
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
with contextlib.nested(
mock.patch.object(client.Client, "list_networks",
side_effect=mock.Mock),
mock.patch.object(client.Client, 'get_auth_info',
return_value={'auth_token': 'new_token1'}),
):
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', token_store.admin_auth_token)
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', token_store.admin_auth_token)
def test_admin_token_updated(self):
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = 'new_token'
tokens = [{'auth_token': 'new_token1'}, {'auth_token': 'new_token'}]
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
with contextlib.nested(
mock.patch.object(client.Client, "list_networks",
side_effect=mock.Mock),
mock.patch.object(client.Client, 'get_auth_info',
side_effect=tokens.pop),
):
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token', token_store.admin_auth_token)
client1 = neutronv2.get_client(my_context, True)
client1.list_networks(retrieve_all=False)
self.assertEqual('new_token1', token_store.admin_auth_token)
class TestNeutronv2Base(test.TestCase):
def setUp(self):
super(TestNeutronv2Base, self).setUp()
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
'availability_zone': 'nova',
'host': 'some_host',
'security_groups': []}
self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance2',
'availability_zone': 'nova',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'my_tenantid'}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': 'my_netid2',
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': 'my_tenantid'})
self.nets3 = self.nets2 + [{'id': 'my_netid3',
'name': 'my_netname3',
'tenant_id': 'my_tenantid'}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
# A network request with external networks
self.nets5 = self.nets1 + [{'id': 'the-external-one',
'name': 'out-of-this-world',
'router:external': True,
'tenant_id': 'should-be-an-admin'}]
# A network request with a duplicate
self.nets6 = []
self.nets6.append(self.nets1[0])
self.nets6.append(self.nets1[0])
# A network request with a combo
self.nets7 = []
self.nets7.append(self.nets2[1])
self.nets7.append(self.nets1[0])
self.nets7.append(self.nets2[1])
self.nets7.append(self.nets1[0])
# A network request with only external network
self.nets8 = [self.nets5[1]]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
self.nets5, self.nets6, self.nets7, self.nets8]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
'device_id': self.instance2['uuid'],
'device_owner': 'compute:nova',
'id': 'my_portid1',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.float_data1 = [{'port_id': 'my_portid1',
'fixed_ip_address': self.port_address,
'floating_ip_address': '172.0.1.2'}]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}],
'status': 'ACTIVE',
'admin_state_up': True}]
self.port_address2 = '10.0.2.2'
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': 'my_netid2',
'device_id': self.instance['uuid'],
'admin_state_up': True,
'status': 'ACTIVE',
'device_owner': 'compute:nova',
'id': 'my_portid2',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'fixed_ips':
[{'ip_address': self.port_address2,
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.float_data2 = []
self.float_data2.append(self.float_data1[0])
self.float_data2.append({'port_id': 'my_portid2',
'fixed_ip_address': '10.0.2.2',
'floating_ip_address': '172.0.2.2'})
self.port_data3 = [{'network_id': 'my_netid1',
'device_id': 'device_id3',
'status': 'DOWN',
'admin_state_up': True,
'device_owner': 'compute:nova',
'id': 'my_portid3',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data_n = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
{'id': 'my_subid2',
'cidr': '20.0.1.0/24',
'network_id': 'my_netid2',
'gateway_ip': '20.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
'name': 'ext_net',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
'name': 'nova',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_unassociated = {'tenant_id': 'my_tenantid',
'id': 'fip_id1',
'floating_ip_address': '172.24.4.227',
'floating_network_id': self.fip_pool['id'],
'port_id': None,
'fixed_ip_address': None,
'router_id': None}
fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
self.fip_associated = {'tenant_id': 'my_tenantid',
'id': 'fip_id2',
'floating_ip_address': '172.24.4.228',
'floating_network_id': self.fip_pool['id'],
'port_id': self.port_data2[1]['id'],
'fixed_ip_address': fixed_ip_address,
'router_id': 'router_id1'}
self._returned_nw_info = []
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
self.instance2 = fake_instance.fake_instance_obj(self.context,
**self.instance2)
api = neutronapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
has_portbinding = False
has_extra_dhcp_opts = False
dhcp_options = kwargs.get('dhcp_options')
if dhcp_options is not None:
has_extra_dhcp_opts = True
if kwargs.get('portbinding'):
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
neutronv2.get_client(
mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn(
self.moxed_client)
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
else:
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
macs = kwargs.get('macs')
if macs:
macs = set(macs)
req_net_ids = []
ordered_networks = []
port = {}
if 'requested_networks' in kwargs:
for request in kwargs['requested_networks']:
if request.port_id:
if request.port_id == 'my_portid3':
self.moxed_client.show_port(request.port_id
).AndReturn(
{'port': {'id': 'my_portid3',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
''}})
ports['my_netid1'] = [self.port_data1[0],
self.port_data3[0]]
ports[request.port_id] = self.port_data3[0]
request.network_id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
self.moxed_client.show_port(request.port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
''}})
ports[request.port_id] = self.port_data1[0]
request.network_id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
fixed_ips[request.network_id] = request.address
req_net_ids.append(request.network_id)
ordered_networks.append(request)
else:
for n in nets:
ordered_networks.append(
objects.NetworkRequest(network_id=n['id']))
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
# search all req_net_ids as in api.py
search_ids = req_net_ids
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance.project_id,
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
if (('requested_networks' not in kwargs or
kwargs['requested_networks'].as_tuples() == [(None, None, None)])
and len(nets) > 1):
self.mox.ReplayAll()
return api
ports_in_requested_net_order = []
nets_in_requested_net_order = []
for request in ordered_networks:
port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
# Network lookup for available network_id
network = None
for net in nets:
if net['id'] == request.network_id:
network = net
break
# if net_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
else:
continue
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
if not has_portbinding:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
else:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
if request.port_id:
port = ports[request.port_id]
self.moxed_client.update_port(request.port_id,
MyComparator(port_req_body)
).AndReturn(
{'port': port})
ports_in_requested_net_order.append(request.port_id)
else:
request.address = fixed_ips.get(request.network_id)
if request.address:
port_req_body['port']['fixed_ips'] = [
{'ip_address': str(request.address)}]
port_req_body['port']['network_id'] = request.network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance.project_id
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
res_port = {'port': {'id': 'fake'}}
if has_extra_dhcp_opts:
port_req_body['port']['extra_dhcp_opts'] = dhcp_options
if kwargs.get('_break') == 'mac' + request.network_id:
self.mox.ReplayAll()
return api
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
ports_in_requested_net_order.append(res_port['port']['id'])
nets_in_requested_net_order.append(network)
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets_in_requested_net_order,
port_ids=ports_in_requested_net_order
).AndReturn(self._returned_nw_info)
self.mox.ReplayAll()
return api
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEqual('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEqual('172.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
self.assertEqual('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEqual('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
version=4, type='dns')
self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
# This line here does not wrap net_info_cache in jsonutils.dumps()
# intentionally to test the other code path when it's not unicode.
instance['info_cache'] = {'network_info': net_info_cache}
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
net_ids = [port['network_id'] for port in port_data]
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
for i in xrange(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
if x['fixed_ip_address'] == ip['ip_address']]
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[i - 1]['id']).AndReturn(
{'floatingips': float_data})
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context, instance)
for i in xrange(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = self._stub_allocate_for_instance(net_idx, **kwargs)
return api.allocate_for_instance(self.context, self.instance, **kwargs)
class TestNeutronv2(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_get_instance_nw_info_1(self):
# Test to get one port in one network and subnet.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
# Test to get one port in each of two networks and subnets.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets_add_interface(self):
# This tests that adding an interface to an instance does not
# remove the first instance from the instance.
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': self.port_data2[0]['id'],
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
self.nets2,
[self.port_data2[1]['id']])
def test_get_instance_nw_info_remove_ports_from_neutron(self):
# This tests that when a port is removed in neutron it
# is also removed from the nova.
network_model = model.Network(id=self.port_data2[0]['network_id'],
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
None,
None)
def test_get_instance_nw_info_ignores_neturon_ports(self):
# Tests that only ports in the network_cache are updated
# and ports returned from neutron that match the same
# instance_id/device_id are ignored.
port_data2 = copy.copy(self.port_data2)
# set device_id on the ports to be the same.
port_data2[1]['device_id'] = port_data2[0]['device_id']
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
port_data2,
None,
None)
def _fake_get_instance_nw_info_helper(self, network_cache,
current_neutron_ports,
networks=None, port_ids=None):
"""Helper function to test get_instance_nw_info.
:param network_cache - data already in the nova network cache.
:param current_neutron_ports - updated list of ports from neutron.
:param networks - networks of ports being added to instance.
:param port_ids - new ports being added to instance.
"""
# keep a copy of the original ports/networks to pass to
# get_instance_nw_info() as the code below changes them.
original_port_ids = copy.copy(port_ids)
original_networks = copy.copy(networks)
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': current_neutron_ports})
ifaces = network_cache['info_cache']['network_info']
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
nets = [{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
if networks is None:
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
else:
networks = networks + [
dict(id=iface['network']['id'],
name=iface['network']['label'],
tenant_id=iface['network']['meta']['tenant_id'])
for iface in ifaces]
port_ids = [iface['id'] for iface in ifaces] + port_ids
index = 0
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
for ip in current_neutron_port['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=current_neutron_port['id']).AndReturn(
{'floatingips': [self.float_data2[index]]})
self.moxed_client.list_subnets(
id=mox.SameElementsAs([ip['subnet_id']])
).AndReturn(
{'subnets': [self.subnet_data_n[index]]})
self.moxed_client.list_ports(
network_id=current_neutron_port['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
index += 1
self.mox.ReplayAll()
self.instance['info_cache'] = network_cache
instance = copy.copy(self.instance)
instance['info_cache'] = network_cache['info_cache']
nw_infs = api.get_instance_nw_info(self.context,
instance,
networks=original_networks,
port_ids=original_port_ids)
self.assertEqual(index, len(nw_infs))
# ensure that nic ordering is preserved
for iface_index in range(index):
self.assertEqual(nw_infs[iface_index]['id'],
port_ids[iface_index])
def test_get_instance_nw_info_without_subnet(self):
# Test get instance_nw_info for a port without subnet.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
id=[self.port_data1[0]['network_id']]).AndReturn(
{'networks': self.nets1})
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
net_info_cache = []
for port in self.port_data3:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
instance)
id_suffix = 3
self.assertEqual(0, len(nw_inf.fixed_ips()))
self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.QOS_QUEUE}]})
self.mox.ReplayAll()
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
self.assertEqual(
{constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
api.extensions)
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.QOS_QUEUE}]})
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 1
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, flavor))
instance = {'system_metadata': sys_meta}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
None, port_req_body)
self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
def test_allocate_for_instance_1(self):
# Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
api = self._stub_allocate_for_instance(net_idx=2)
self.assertRaises(exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
# The macs kwarg should be accepted, as a set, the
# _allocate_for_instance helper checks that the mac is used to create a
# port.
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
def test_allocate_for_instance_accepts_only_portid(self):
# Make sure allocate_for_instance works when only a portid is provided
self._returned_nw_info = self.port_data1
result = self._allocate_for_instance(
requested_networks=objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')]))
self.assertEqual(self.port_data1, result)
def test_allocate_for_instance_not_enough_macs_via_ports(self):
# using a hypervisor MAC via a pre-created port will stop it being
# used to dynamically create a port on a network. We put the network
# first in requested_networks so that if the code were to not pre-check
# requested ports, it would incorrectly assign the mac and not fail.
requested_networks = objects.NetworkRequestList(
objects = [
objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(port_id='my_portid1')])
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac1']),
_break='mac' + self.nets2[1]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac1']))
def test_allocate_for_instance_not_enough_macs(self):
# If not enough MAC addresses are available to allocate to networks, an
# error should be raised.
# We could pass in macs=set(), but that wouldn't tell us that
# allocate_for_instance tracks used macs properly, so we pass in one
# mac, and ask for two networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(network_id=self.nets2[0]['id'])])
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2']),
_break='mac' + self.nets2[0]['id'])
with mock.patch.object(api, '_delete_ports'):
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance,
requested_networks=requested_networks,
macs=set(['my_mac2']))
def test_allocate_for_instance_two_macs_two_networks(self):
# If two MACs are available and two networks requested, two new ports
# get made and no exceptions raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
objects.NetworkRequest(network_id=self.nets2[0]['id'])])
self._allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2', 'my_mac1']))
def test_allocate_for_instance_mac_conflicting_requested_port(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')])
api = self._stub_allocate_for_instance(
net_idx=1, requested_networks=requested_networks,
macs=set(['unknown:mac']),
_break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['unknown:mac']))
def test_allocate_for_instance_without_requested_networks(self):
api = self._stub_allocate_for_instance(net_idx=3)
self.assertRaises(exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_with_requested_non_available_network(self):
"""verify that a non available network is ignored.
self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
Do not create a port on a non available network self.nets3[2].
"""
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets3[0], self.nets3[2], self.nets3[1])])
self._allocate_for_instance(net_idx=2,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets3[1], self.nets3[0], self.nets3[2])])
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'],
address='10.0.1.0')])
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')])
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': model.NetworkInfo([])})
self.mox.ReplayAll()
nwinfo = api.allocate_for_instance(self.context, self.instance)
self.assertEqual(len(nwinfo), 0)
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets2[0], self.nets2[1])])
self.moxed_client.list_networks(
id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'tenant_id': self.instance.project_id,
},
}
port_req_body['port'].update(binding_port_req_body['port'])
port = {'id': 'portid_' + network['id']}
api._populate_neutron_extension_values(self.context,
self.instance, None, binding_port_req_body).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
NeutronOverQuota = exceptions.OverQuotaClient()
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
index += 1
self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
self.mox.ReplayAll()
self.assertRaises(exception.PortLimitExceeded,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=requested_networks)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets2[0], self.nets2[1])])
self.moxed_client.list_networks(
id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
binding_port_req_body = {
'port': {
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance.uuid,
'tenant_id': self.instance.project_id,
},
}
api._populate_neutron_extension_values(self.context,
self.instance, None, binding_port_req_body).AndReturn(None)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance,
requested_networks=requested_networks)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
api._get_available_networks(self.context, self.instance.project_id,
[]).AndRaise(BailOutEarly)
self.mox.ReplayAll()
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest()])
self.assertRaises(BailOutEarly,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=requested_networks)
def test_allocate_for_instance_second_time(self):
# Make sure that allocate_for_instance only returns ports that it
# allocated during _that_ run.
new_port = {'id': 'fake'}
self._returned_nw_info = self.port_data1 + [new_port]
nw_info = self._allocate_for_instance()
self.assertEqual(nw_info, [new_port])
def test_allocate_for_instance_port_in_use(self):
# If a port is already in use, an exception should be raised.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='my_portid1')])
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks',
_device=True)
self.assertRaises(exception.PortInUse,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks)
def test_allocate_for_instance_with_externalnet_forbidden(self):
"""Only one network is available, it's external, and the client
is unauthorized to use it.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
# no networks in the tenant
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
# external network is shared
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': self.nets8})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.ExternalNetworkAttachForbidden,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_with_externalnet_multiple(self):
"""Multiple networks are available, one the client is authorized
to use, and an external one the client is unauthorized to use.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
# network found in the tenant
self.moxed_client.list_networks(
tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
# external network is shared
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': self.nets8})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(
exception.NetworkAmbiguous,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_with_externalnet_admin_ctx(self):
"""Only one network is available, it's external, and the client
is authorized.
"""
admin_ctx = context.RequestContext('userid', 'my_tenantid',
is_admin=True)
api = self._stub_allocate_for_instance(net_idx=8)
api.allocate_for_instance(admin_ctx, self.instance)
def _deallocate_for_instance(self, number, requested_networks=None):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
api = neutronapi.API()
port_data = number == 1 and self.port_data1 or self.port_data2
ret_data = copy.deepcopy(port_data)
if requested_networks:
if isinstance(requested_networks, objects.NetworkRequestList):
# NOTE(danms): Temporary and transitional
with mock.patch('nova.utils.is_neutron', return_value=True):
requested_networks = requested_networks.as_tuples()
for net, fip, port, request_id in requested_networks:
ret_data.append({'network_id': net,
'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
'id': port,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [],
'mac_address': 'fake_mac', })
self.moxed_client.list_ports(
device_id=self.instance.uuid).AndReturn(
{'ports': ret_data})
if requested_networks:
for net, fip, port, request_id in requested_networks:
self.moxed_client.update_port(port)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id'])
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(self.context,
self.instance.uuid,
{'network_info': '[]'})
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance,
requested_networks=requested_networks)
def test_deallocate_for_instance_1_with_requested(self):
requested = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake-net',
address='1.2.3.4',
port_id='fake-port')])
# Test to deallocate in one port env.
self._deallocate_for_instance(1, requested_networks=requested)
def test_deallocate_for_instance_2_with_requested(self):
requested = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake-net',
address='1.2.3.4',
port_id='fake-port')])
# Test to deallocate in one port env.
self._deallocate_for_instance(2, requested_networks=requested)
def test_deallocate_for_instance_1(self):
# Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_deallocate_for_instance_port_not_found(self):
# TODO(mriedem): Remove this conversion when all neutronv2 APIs are
# converted to handling instance objects.
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
port_data = self.port_data1
self.moxed_client.list_ports(
device_id=self.instance.uuid).AndReturn(
{'ports': port_data})
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id']).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.delete_port(port_data[0]['id'])
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data[1:]})
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
net_ids = [port['network_id'] for port in port_data]
self.moxed_client.list_networks(id=net_ids).AndReturn(
{'networks': nets})
float_data = number == 1 and self.float_data1 or self.float_data2
for data in port_data[1:]:
for ip in data['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=data['id']).AndReturn(
{'floatingips': float_data[1:]})
for port in port_data[1:]:
self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
self.mox.ReplayAll()
nwinfo = api.deallocate_port_for_instance(self.context, instance,
port_data[0]['id'])
self.assertEqual(len(nwinfo), len(port_data[1:]))
if len(port_data) > 1:
self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
def test_deallocate_port_for_instance_1(self):
# Test to deallocate the first and only port
self._test_deallocate_port_for_instance(1)
def test_deallocate_port_for_instance_2(self):
# Test to deallocate the first port of two
self._test_deallocate_port_for_instance(2)
def test_list_ports(self):
search_opts = {'parm': 'value'}
self.moxed_client.list_ports(**search_opts)
self.mox.ReplayAll()
neutronapi.API().list_ports(self.context, **search_opts)
def test_show_port(self):
self.moxed_client.show_port('foo')
self.mox.ReplayAll()
neutronapi.API().show_port(self.context, 'foo')
def test_validate_networks(self):
requested_networks = [('my_netid1', None, None, None),
('my_netid2', None, None, None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_without_port_quota_on_network_side(self):
requested_networks = [('my_netid1', None, None, None),
('my_netid2', None, None, None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ex_1(self):
requested_networks = [('my_netid1', None, None, None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1'])).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2", six.text_type(ex))
def test_validate_networks_ex_2(self):
requested_networks = [('my_netid1', None, None, None),
('my_netid2', None, None, None),
('my_netid3', None, None, None)]
ids = ['my_netid1', 'my_netid2', 'my_netid3']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2, my_netid3", six.text_type(ex))
def test_validate_networks_duplicate_disable(self):
"""Verify that the correct exception is thrown when duplicate
network ids are passed to validate_networks, when nova config flag
allow_duplicate_networks is set to its default value: False
"""
requested_networks = [('my_netid1', None, None, None),
('my_netid1', None, None, None)]
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_duplicate_enable(self):
"""Verify that no duplicateNetworks exception is thrown when duplicate
network ids are passed to validate_networks, when nova config flag
allow_duplicate_networks is set to its non default value: True
"""
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid1')])
ids = ['my_netid1', 'my_netid1']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_allocate_for_instance_with_requested_networks_duplicates(self):
# specify a duplicate network to allocate to instance
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=net['id'])
for net in (self.nets6[0], self.nets6[1])])
self._allocate_for_instance(net_idx=6,
requested_networks=requested_networks)
def test_allocate_for_instance_requested_networks_duplicates_port(self):
# specify first port and last port that are in same network
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port['id'])
for port in (self.port_data1[0], self.port_data3[0])])
self._allocate_for_instance(net_idx=6,
requested_networks=requested_networks)
def test_allocate_for_instance_requested_networks_duplicates_combo(self):
# specify a combo net_idx=7 : net2, port in net1, net2, port in net1
self.flags(allow_duplicate_networks=True, group='neutron')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid2'),
objects.NetworkRequest(port_id=self.port_data1[0]['id']),
objects.NetworkRequest(network_id='my_netid2'),
objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
self._allocate_for_instance(net_idx=7,
requested_networks=requested_networks)
def test_validate_networks_not_specified(self):
requested_networks = objects.NetworkRequestList(objects=[])
self.moxed_client.list_networks(
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkAmbiguous,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_not_found(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id='my_netid1',
port_id='3123-ad34-bc43-32332ca33e')])
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.PortNotFound,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_show_rasies_non404(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id='my_netid1',
port_id='3123-ad34-bc43-32332ca33e')])
NeutronNotFound = exceptions.NeutronClientException(status_code=0)
self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exceptions.NeutronClientException,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_in_use(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
self.moxed_client.show_port(self.port_data3[0]['id']).\
AndReturn({'port': self.port_data3[0]})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortInUse,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_no_subnet_id(self):
port_a = self.port_data3[0]
port_a['device_id'] = None
port_a['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortRequiresFixedIP,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_no_subnet_id(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='his_netid4')])
ids = ['his_netid4']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets4})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkRequiresSubnet,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network_disable(self):
"""Verify that duplicateNetworks exception is thrown when ports on same
duplicate network are passed to validate_networks, when nova config
flag allow_duplicate_networks is set to its default False
"""
self.flags(allow_duplicate_networks=False, group='neutron')
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn(
{'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn(
{'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network_enable(self):
"""Verify that duplicateNetworks exception is not thrown when ports
on same duplicate network are passed to validate_networks, when nova
config flag allow_duplicate_networks is set to its True
"""
self.flags(allow_duplicate_networks=True, group='neutron')
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn(
{'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn(
{'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_no_quota(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# => instances which can be created = 0
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid2')])
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 2}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 0)
def test_validate_networks_with_ports_and_networks(self):
# Test validation for a request for one instance needing
# one port allocated via nova with another port being passed in.
port_b = self.port_data2[1]
port_b['device_id'] = None
port_b['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
ids = ['my_netid1']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 5}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def test_validate_networks_one_port_and_no_networks(self):
# Test that show quota is not called if no networks are
# passed in and only ports.
port_b = self.port_data2[1]
port_b['device_id'] = None
port_b['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def test_validate_networks_some_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is 5 and 2 ports are in use
# => instances which can be created = 1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid2')])
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 5}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 1)
def test_validate_networks_unlimited_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is -1 (unlimited)
# => instances which can be created = 1
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='my_netid1'),
objects.NetworkRequest(network_id='my_netid2')])
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': -1}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 2)
def test_validate_networks_no_quota_but_ports_supplied(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=port_a['id']),
objects.NetworkRequest(port_id=port_b['id'])])
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_instance_uuids_by_ip_filter(self):
self._mock_list_ports()
filters = {'ip': '^10\\.0\\.1\\.2$'}
api = neutronapi.API()
result = api.get_instance_uuids_by_ip_filter(self.context, filters)
self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = neutronapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = neutronapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = neutronapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets,
req_ids=None, context=None):
api = neutronapi.API()
nets = prv_nets + pub_nets
if req_ids:
mox_list_params = {'id': req_ids}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': prv_nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(
context if context else self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
def test_get_available_networks_with_custom_policy(self):
rules = {'network:attach_external_network':
common_policy.parse_rule('')}
policy.set_rules(rules)
req_ids = [net['id'] for net in self.nets5]
self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
expected = {'id': fip_data['id'],
'address': fip_data['floating_ip_address'],
'pool': self.fip_pool['name'],
'project_id': fip_data['tenant_id'],
'fixed_ip_id': fip_data['port_id'],
'fixed_ip':
{'address': fip_data['fixed_ip_address']},
'instance': ({'uuid': self.port_data2[idx]['device_id']}
if fip_data['port_id']
else None)}
if expected['instance'] is not None:
expected['fixed_ip']['instance_uuid'] = \
expected['instance']['uuid']
return expected
def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
api = neutronapi.API()
fip_id = fip_data['id']
net_id = fip_data['floating_network_id']
address = fip_data['floating_ip_address']
if by_address:
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
else:
self.moxed_client.show_floatingip(fip_id).\
AndReturn({'floatingip': fip_data})
self.moxed_client.show_network(net_id).\
AndReturn({'network': self.fip_pool})
if fip_data['port_id']:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[idx]})
self.mox.ReplayAll()
expected = self._get_expected_fip_model(fip_data, idx)
if by_address:
fip = api.get_floating_ip_by_address(self.context, address)
else:
fip = api.get_floating_ip(self.context, fip_id)
self.assertEqual(expected, fip)
def test_get_floating_ip_unassociated(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0)
def test_get_floating_ip_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1)
def test_get_floating_ip_by_address(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0,
by_address=True)
def test_get_floating_ip_by_address_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1,
by_address=True)
def test_get_floating_ip_by_address_not_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': []})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ip_by_id_not_found(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(status_code=404)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFound,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_raises_non404(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(status_code=0)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exceptions.NeutronClientException,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_by_address_multiple_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated] * 2})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ips_by_project(self):
api = neutronapi.API()
project_id = self.context.project_id
self.moxed_client.list_floatingips(tenant_id=project_id).\
AndReturn({'floatingips': [self.fip_unassociated,
self.fip_associated]})
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.moxed_client.list_ports(tenant_id=project_id).\
AndReturn({'ports': self.port_data2})
self.mox.ReplayAll()
expected = [self._get_expected_fip_model(self.fip_unassociated),
self._get_expected_fip_model(self.fip_associated, idx=1)]
fips = api.get_floating_ips_by_project(self.context)
self.assertEqual(expected, fips)
def _test_get_instance_id_by_floating_address(self, fip_data,
associated=False):
api = neutronapi.API()
address = fip_data['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
if associated:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.ReplayAll()
if associated:
expected = self.port_data2[1]['device_id']
else:
expected = None
fip = api.get_instance_id_by_floating_address(self.context, address)
self.assertEqual(expected, fip)
def test_get_instance_id_by_floating_address(self):
self._test_get_instance_id_by_floating_address(self.fip_unassociated)
def test_get_instance_id_by_floating_address_associated(self):
self._test_get_instance_id_by_floating_address(self.fip_associated,
associated=True)
def test_allocate_floating_ip(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, 'ext_net')
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_addr_gen_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.IpAddressGenerationFailureClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_exhausted_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.ExternalIpAddressExhaustedClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_with_pool_id(self):
api = neutronapi.API()
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'id': pool_id}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, pool_id)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_default_pool(self):
api = neutronapi.API()
pool_name = self.fip_pool_nova['name']
pool_id = self.fip_pool_nova['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool_nova]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
def test_disassociate_and_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
floating_ip = {'address': address}
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.disassociate_and_release_floating_ip(self.context, None,
floating_ip)
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpAssociated,
api.release_floating_ip, self.context, address)
def _setup_mock_for_refresh_cache(self, api, instances):
nw_info = self.mox.CreateMock(model.NetworkInfo)
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
for instance in instances:
nw_info.json()
api._get_instance_nw_info(mox.IgnoreArg(), instance).\
AndReturn(nw_info)
api.db.instance_info_cache_update(mox.IgnoreArg(),
instance['uuid'],
mox.IgnoreArg())
def test_associate_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fixed_address = self.port_address2
fip_id = self.fip_unassociated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[1]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
'fixed_ip_address': fixed_address}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance,
address, fixed_address)
@mock.patch('nova.objects.Instance.get_by_uuid')
def test_reassociate_floating_ip(self, mock_get):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
new_fixed_address = self.port_address
fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance2['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': 'my_portid1',
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
mock_get.return_value = fake_instance.fake_instance_obj(
self.context, **self.instance)
self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
self.instance2])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance2,
address, new_fixed_address)
def test_associate_floating_ip_not_found_fixed_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.associate_floating_ip, self.context,
self.instance, address, fixed_address)
def test_disassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fip_id = self.fip_associated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': None}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
def test_add_fixed_ip_to_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
network_id = 'my_netid1'
search_opts = {'network_id': network_id}
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
search_opts = {'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'network_id': network_id}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [{'subnet_id': 'my_subid1'},
{'subnet_id': 'my_subid1'}],
},
}
port = self.port_data1[0]
port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
def test_remove_fixed_ip_from_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
address = '10.0.0.3'
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [],
},
}
port = self.port_data1[0]
port['fixed_ips'] = []
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.remove_fixed_ip_from_instance(self.context, self.instance, address)
def test_list_floating_ips_without_l3_support(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(
status_code=404)
self.moxed_client.list_floatingips(
fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
self.mox.ReplayAll()
neutronv2.get_client('fake')
floatingips = api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 1)
self.assertEqual(floatingips, [])
def test_nw_info_get_ips(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'}],
'id': 'port-id',
}
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.ReplayAll()
neutronv2.get_client('fake')
result = api._nw_info_get_ips(self.moxed_client, fake_port)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['address'], '1.1.1.1')
self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
def test_nw_info_get_subnets(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}],
'id': 'port-id',
}
fake_subnet = model.Subnet(cidr='1.0.0.0/8')
fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(self.context, fake_port).AndReturn(
[fake_subnet])
self.mox.ReplayAll()
neutronv2.get_client('fake')
subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['ips']), 1)
self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
def _test_nw_info_build_network(self, vif_type):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(net['subnets'], fake_subnets)
self.assertEqual(net['id'], 'net-id')
self.assertEqual(net['label'], 'foo')
self.assertEqual(net.get_meta('tenant_id'), 'tenant')
self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
return net, iid
def test_nw_info_build_network_ovs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge)
self.assertNotIn('should_create_bridge', net)
self.assertEqual(iid, 'port-id')
def test_nw_info_build_network_dvs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
self.assertEqual('foo-net-id', net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertNotIn('ovs_interfaceid', net)
self.assertIsNone(iid)
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual(net['bridge'], 'brqnet-id')
self.assertTrue(net['should_create_bridge'])
self.assertIsNone(iid)
def test_nw_info_build_network_other(self):
net, iid = self._test_nw_info_build_network(None)
self.assertIsNone(net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertIsNone(iid)
def test_nw_info_build_no_match(self):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id1',
'tenant_id': 'tenant',
'binding:vif_type': model.VIF_TYPE_OVS,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('tenant', net['meta']['tenant_id'])
def test_build_network_info_model(self):
api = neutronapi.API()
fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
'info_cache': {'network_info': []}}
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=False and status='DOWN' thus vif.active=True
{'id': 'port2',
'network_id': 'net-id',
'admin_state_up': False,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:02',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=True and status='DOWN' thus vif.active=False
{'id': 'port0',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:03',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port3',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:04',
'binding:vif_type': model.VIF_TYPE_HW_VEB,
'binding:vnic_type': model.VNIC_TYPE_DIRECT,
'binding:profile': {'pci_vendor_info': '1137:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'},
'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port4',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:05',
'binding:vif_type': model.VIF_TYPE_802_QBH,
'binding:vnic_type': model.VNIC_TYPE_MACVTAP,
'binding:profile': {'pci_vendor_info': '1137:0047',
'pci_slot': '0000:0a:00.2',
'physical_network': 'phynet1'},
'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
},
# admin_state_up=True and status='ACTIVE' thus vif.active=True
# This port has no binding:vnic_type to verify default is assumed
{'id': 'port5',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:06',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
# No binding:vnic_type
'binding:vif_details': {},
},
# This does not match the networks we provide below,
# so it should be ignored (and is here to verify that)
{'id': 'port6',
'network_id': 'other-net-id',
'admin_state_up': True,
'status': 'DOWN',
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [
{'id': 'net-id',
'name': 'foo',
'tenant_id': 'fake',
}
]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1],
fake_ports[3], fake_ports[4], fake_ports[5]]
for requested_port in requested_ports:
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
for requested_port in requested_ports:
api._get_subnets_from_port(self.context, requested_port
).AndReturn(fake_subnets)
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_infos = api._build_network_info_model(self.context, fake_inst,
fake_nets,
[fake_ports[2]['id'],
fake_ports[0]['id'],
fake_ports[1]['id'],
fake_ports[3]['id'],
fake_ports[4]['id'],
fake_ports[5]['id']])
self.assertEqual(len(nw_infos), 6)
index = 0
for nw_info in nw_infos:
self.assertEqual(nw_info['address'],
requested_ports[index]['mac_address'])
self.assertEqual(nw_info['devname'], 'tapport' + str(index))
self.assertIsNone(nw_info['ovs_interfaceid'])
self.assertEqual(nw_info['type'],
requested_ports[index]['binding:vif_type'])
if nw_info['type'] == model.VIF_TYPE_BRIDGE:
self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
self.assertEqual(nw_info['vnic_type'],
requested_ports[index].get('binding:vnic_type',
model.VNIC_TYPE_NORMAL))
self.assertEqual(nw_info.get('details'),
requested_ports[index].get('binding:vif_details'))
self.assertEqual(nw_info.get('profile'),
requested_ports[index].get('binding:profile'))
index += 1
self.assertEqual(nw_infos[0]['active'], False)
self.assertEqual(nw_infos[1]['active'], True)
self.assertEqual(nw_infos[2]['active'], True)
self.assertEqual(nw_infos[3]['active'], True)
self.assertEqual(nw_infos[4]['active'], True)
self.assertEqual(nw_infos[5]['active'], True)
self.assertEqual(nw_infos[0]['id'], 'port0')
self.assertEqual(nw_infos[1]['id'], 'port1')
self.assertEqual(nw_infos[2]['id'], 'port2')
self.assertEqual(nw_infos[3]['id'], 'port3')
self.assertEqual(nw_infos[4]['id'], 'port4')
self.assertEqual(nw_infos[5]['id'], 'port5')
@mock.patch('nova.network.neutronv2.api.API._nw_info_get_subnets')
@mock.patch('nova.network.neutronv2.api.API._nw_info_get_ips')
@mock.patch('nova.network.neutronv2.api.API._nw_info_build_network')
@mock.patch('nova.network.neutronv2.api.API._gather_port_ids_and_networks')
def test_build_network_info_model_empty(
self, mock_gather_port_ids_and_networks,
mock_nw_info_build_network,
mock_nw_info_get_ips,
mock_nw_info_get_subnets):
api = neutronapi.API()
fake_inst = objects.Instance()
fake_inst.project_id = 'fake'
fake_inst.uuid = 'uuid'
fake_inst.info_cache = objects.InstanceInfoCache()
fake_inst.info_cache.network_info = model.NetworkInfo()
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
'binding:vnic_type': model.VNIC_TYPE_NORMAL,
'binding:vif_details': {},
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
mock_gather_port_ids_and_networks.return_value = (None, None)
mock_nw_info_build_network.return_value = (None, None)
mock_nw_info_get_ips.return_value = []
mock_nw_info_get_subnets.return_value = fake_subnets
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_infos = api._build_network_info_model(
self.context, fake_inst)
self.assertEqual(len(nw_infos), 1)
def test_get_subnets_from_port(self):
api = neutronapi.API()
port_data = copy.copy(self.port_data1[0])
subnet_data1 = copy.copy(self.subnet_data1)
subnet_data1[0]['host_routes'] = [
{'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'}
]
self.moxed_client.list_subnets(
id=[port_data['fixed_ips'][0]['subnet_id']]
).AndReturn({'subnets': subnet_data1})
self.moxed_client.list_ports(
network_id=subnet_data1[0]['network_id'],
device_owner='network:dhcp').AndReturn({'ports': []})
self.mox.ReplayAll()
subnets = api._get_subnets_from_port(self.context, port_data)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['routes']), 1)
self.assertEqual(subnets[0]['routes'][0]['cidr'],
subnet_data1[0]['host_routes'][0]['destination'])
self.assertEqual(subnets[0]['routes'][0]['gateway']['address'],
subnet_data1[0]['host_routes'][0]['nexthop'])
def test_get_all_empty_list_networks(self):
api = neutronapi.API()
self.moxed_client.list_networks().AndReturn({'networks': []})
self.mox.ReplayAll()
networks = api.get_all(self.context)
self.assertEqual(networks, [])
def test_get_floating_ips_by_fixed_address(self):
# NOTE(lbragstad): We need to reset the mocks in order to assert
# a NotImplementedError is raised when calling the method under test.
self.mox.ResetAll()
fake_fixed = '192.168.1.4'
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.get_floating_ips_by_fixed_address,
self.context, fake_fixed)
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def test_get_port_vnic_info_1(self, mock_get_client):
api = neutronapi.API()
self.mox.ResetAll()
test_port = {
'port': {'id': 'my_port_id1',
'network_id': 'net-id',
'binding:vnic_type': model.VNIC_TYPE_DIRECT,
},
}
test_net = {'network': {'provider:physical_network': 'phynet1'}}
mock_client = mock_get_client()
mock_client.show_port.return_value = test_port
mock_client.show_network.return_value = test_net
vnic_type, phynet_name = api._get_port_vnic_info(
self.context, mock_client, test_port['port']['id'])
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'network_id'])
mock_client.show_network.assert_called_once_with(
test_port['port']['network_id'],
fields='provider:physical_network')
self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
self.assertEqual(phynet_name, 'phynet1')
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def _test_get_port_vnic_info(self, mock_get_client,
binding_vnic_type=None):
api = neutronapi.API()
self.mox.ResetAll()
test_port = {
'port': {'id': 'my_port_id2',
'network_id': 'net-id',
},
}
if binding_vnic_type:
test_port['port']['binding:vnic_type'] = binding_vnic_type
mock_client = mock_get_client()
mock_client.show_port.return_value = test_port
vnic_type, phynet_name = api._get_port_vnic_info(
self.context, mock_client, test_port['port']['id'])
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'network_id'])
self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type)
self.assertFalse(phynet_name)
def test_get_port_vnic_info_2(self):
self._test_get_port_vnic_info(binding_vnic_type=model.VNIC_TYPE_NORMAL)
def test_get_port_vnic_info_3(self):
self._test_get_port_vnic_info()
@mock.patch.object(neutronapi.API, "_get_port_vnic_info")
@mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
def test_create_pci_requests_for_sriov_ports(self, mock_get_client,
mock_get_port_vnic_info):
api = neutronapi.API()
self.mox.ResetAll()
requested_networks = objects.NetworkRequestList(
objects = [
objects.NetworkRequest(port_id='my_portid1'),
objects.NetworkRequest(network_id='net1'),
objects.NetworkRequest(port_id='my_portid2'),
objects.NetworkRequest(port_id='my_portid3'),
objects.NetworkRequest(port_id='my_portid4')])
pci_requests = objects.InstancePCIRequests(requests=[])
mock_get_port_vnic_info.side_effect = [
(model.VNIC_TYPE_DIRECT, 'phynet1'),
(model.VNIC_TYPE_NORMAL, ''),
(model.VNIC_TYPE_MACVTAP, 'phynet1'),
(model.VNIC_TYPE_MACVTAP, 'phynet2')
]
api.create_pci_requests_for_sriov_ports(
None, pci_requests, requested_networks)
self.assertEqual(3, len(pci_requests.requests))
has_pci_request_id = [net.pci_request_id is not None for net in
requested_networks.objects]
expected_results = [True, False, False, True, True]
self.assertEqual(expected_results, has_pci_request_id)
class TestNeutronv2WithMock(test.TestCase):
"""Used to test Neutron V2 API with mock."""
def setUp(self):
super(TestNeutronv2WithMock, self).setUp()
self.api = neutronapi.API()
self.context = context.RequestContext(
'fake-user', 'fake-project',
auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
@mock.patch('nova.openstack.common.lockutils.lock')
def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
instance = objects.Instance(uuid=uuid.uuid4())
api = neutronapi.API()
mock_lock.side_effect = test.TestingException
self.assertRaises(test.TestingException,
api.get_instance_nw_info, 'context', instance)
mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks,
ids, list_port_values):
def _fake_list_ports(**search_opts):
for args, return_value in list_port_values:
if args == search_opts:
return return_value
self.fail('Unexpected call to list_ports %s' % search_opts)
with contextlib.nested(
mock.patch.object(client.Client, 'list_ports',
side_effect=_fake_list_ports),
mock.patch.object(client.Client, 'list_networks',
return_value={'networks': nets}),
mock.patch.object(client.Client, 'show_quota',
return_value={'quota': {'port': 50}})) as (
list_ports_mock, list_networks_mock, show_quota_mock):
self.api.validate_networks(self.context, requested_networks, 1)
self.assertEqual(len(list_port_values),
len(list_ports_mock.call_args_list))
list_networks_mock.assert_called_once_with(id=ids)
show_quota_mock.assert_called_once_with(tenant_id='fake-project')
def test_validate_networks_fixed_ip_no_dup1(self):
# Test validation for a request for a network with a
# fixed ip that is not already in use because no fixed ips in use
nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'fake-project'}]
requested_networks = [('my_netid1', '10.0.1.2', None, None)]
ids = ['my_netid1']
list_port_values = [({'network_id': 'my_netid1',
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project'},
{'ports': []})]
self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks,
ids, list_port_values)
def test_validate_networks_fixed_ip_no_dup2(self):
# Test validation for a request for a network with a
# fixed ip that is not already in use because not used on this net id
nets2 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'fake-project'},
{'id': 'my_netid2',
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': 'fake-project'}]
requested_networks = [('my_netid1', '10.0.1.2', None, None),
('my_netid2', '10.0.1.3', None, None)]
ids = ['my_netid1', 'my_netid2']
list_port_values = [({'network_id': 'my_netid1',
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'},
{'ports': []}),
({'network_id': 'my_netid2',
'fixed_ips': 'ip_address=10.0.1.3',
'fields': 'device_id'},
{'ports': []}),
({'tenant_id': 'fake-project'},
{'ports': []})]
self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks,
ids, list_port_values)
def test_validate_networks_fixed_ip_dup(self):
# Test validation for a request for a network with a
# fixed ip that is already in use
requested_networks = [('my_netid1', '10.0.1.2', None, None)]
list_port_mock_params = {'network_id': 'my_netid1',
'fixed_ips': 'ip_address=10.0.1.2',
'fields': 'device_id'}
list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]}
with mock.patch.object(client.Client, 'list_ports',
return_value=list_port_mock_return) as (
list_ports_mock):
self.assertRaises(exception.FixedIpAlreadyInUse,
self.api.validate_networks,
self.context, requested_networks, 1)
list_ports_mock.assert_called_once_with(**list_port_mock_params)
def test_allocate_floating_ip_exceed_limit(self):
# Verify that the correct exception is thrown when quota exceed
pool_name = 'dummy'
api = neutronapi.API()
with contextlib.nested(
mock.patch.object(client.Client, 'create_floatingip'),
mock.patch.object(api,
'_get_floating_ip_pool_id_by_name_or_id')) as (
create_mock, get_mock):
create_mock.side_effect = exceptions.OverQuotaClient()
self.assertRaises(exception.FloatingIpLimitExceeded,
api.allocate_floating_ip,
self.context, pool_name)
def test_create_port_for_instance_no_more_ip(self):
instance = fake_instance.fake_instance_obj(self.context)
net = {'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': instance['project_id']}
with mock.patch.object(client.Client, 'create_port',
side_effect=exceptions.IpAddressGenerationFailureClient()) as (
create_port_mock):
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
self.assertRaises(exception.NoMoreFixedIps,
self.api._create_port,
neutronv2.get_client(self.context),
instance, net['id'], port_req_body)
create_port_mock.assert_called_once_with(port_req_body)
@mock.patch.object(client.Client, 'create_port',
side_effect=exceptions.MacAddressInUseClient())
def test_create_port_for_instance_mac_address_in_use(self,
create_port_mock):
# Create fake data.
instance = fake_instance.fake_instance_obj(self.context)
net = {'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': instance['project_id']}
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone,
'mac_address': 'XX:XX:XX:XX:XX:XX'}}
available_macs = set(['XX:XX:XX:XX:XX:XX'])
# Run the code.
self.assertRaises(exception.PortInUse,
self.api._create_port,
neutronv2.get_client(self.context),
instance, net['id'], port_req_body,
available_macs=available_macs)
# Assert the calls.
create_port_mock.assert_called_once_with(port_req_body)
def test_get_network_detail_not_found(self):
api = neutronapi.API()
expected_exc = exceptions.NetworkNotFoundClient()
network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
with mock.patch.object(client.Client, 'show_network',
side_effect=expected_exc) as (
fake_show_network):
self.assertRaises(exception.NetworkNotFound,
api.get,
self.context,
network_uuid)
fake_show_network.assert_called_once_with(network_uuid)
def test_deallocate_for_instance_uses_delete_helper(self):
# setup fake data
instance = fake_instance.fake_instance_obj(self.context)
port_data = {'ports': [{'id': str(uuid.uuid4())}]}
ports = set([port['id'] for port in port_data.get('ports')])
api = neutronapi.API()
# setup mocks
mock_client = mock.Mock()
mock_client.list_ports.return_value = port_data
with contextlib.nested(
mock.patch.object(neutronv2, 'get_client',
return_value=mock_client),
mock.patch.object(api, '_delete_ports')
) as (
mock_get_client, mock_delete
):
# run the code
api.deallocate_for_instance(self.context, instance)
# assert the calls
mock_client.list_ports.assert_called_once_with(
device_id=instance.uuid)
mock_delete.assert_called_once_with(
mock_client, instance, ports, raise_if_fail=True)
def _test_delete_ports(self, expect_raise):
results = [exceptions.NeutronClientException, None]
mock_client = mock.Mock()
with mock.patch.object(mock_client, 'delete_port',
side_effect=results):
api = neutronapi.API()
api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'],
raise_if_fail=expect_raise)
def test_delete_ports_raise(self):
self.assertRaises(exceptions.NeutronClientException,
self._test_delete_ports, True)
def test_delete_ports_no_raise(self):
self._test_delete_ports(False)
def test_delete_ports_never_raise_404(self):
mock_client = mock.Mock()
mock_client.delete_port.side_effect = exceptions.PortNotFoundClient
api = neutronapi.API()
api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'],
raise_if_fail=True)
mock_client.delete_port.assert_called_once_with('port1')
def test_deallocate_port_for_instance_fails(self):
mock_client = mock.Mock()
api = neutronapi.API()
with contextlib.nested(
mock.patch.object(neutronv2, 'get_client',
return_value=mock_client),
mock.patch.object(api, '_delete_ports',
side_effect=exceptions.Unauthorized),
mock.patch.object(api, 'get_instance_nw_info')
) as (
get_client, delete_ports, get_nw_info
):
self.assertRaises(exceptions.Unauthorized,
api.deallocate_port_for_instance,
self.context, instance={'uuid': 'fake'},
port_id='fake')
# make sure that we didn't try to reload nw info
self.assertFalse(get_nw_info.called)
class TestNeutronv2ModuleMethods(test.TestCase):
def test_gather_port_ids_and_networks_wrong_params(self):
api = neutronapi.API()
# Test with networks not None and port_ids is None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
[{'network': {'name': 'foo'}}], None)
# Test with networks is None and port_ids not None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
None, ['list', 'of', 'port_ids'])
def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
neutronapi._ensure_requested_network_ordering(
lambda x: x,
l,
None)
def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
None)
self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
def test_ensure_requested_network_ordering_with_preference(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
[1, 2, 3])
self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_allocate_for_instance_portbinding(self):
self._allocate_for_instance(1, portbinding=True)
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
self.mox.ReplayAll()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
None, port_req_body)
self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
self.assertFalse(port_req_body['port'].get('binding:profile'))
@mock.patch.object(pci_whitelist, 'get_pci_device_devspec')
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
def test_populate_neutron_extension_values_binding_sriov(self,
mock_get_instance_pci_devs,
mock_get_pci_device_devspec):
api = neutronapi.API()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
}
PciDevice = collections.namedtuple('PciDevice',
['vendor_id', 'product_id', 'address'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1',
}
mock_get_instance_pci_devs.return_value = [mydev]
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'phynet1'}
mock_get_pci_device_devspec.return_value = devspec
api._populate_neutron_binding_profile(instance,
pci_req_id, port_req_body)
self.assertEqual(port_req_body['port']['binding:profile'], profile)
def _test_update_port_binding_false(self, func_name, *args):
api = neutronapi.API()
func = getattr(api, func_name)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(False)
self.mox.ReplayAll()
func(*args)
def _test_update_port_binding_true(self, expected_bind_host,
func_name, *args):
api = neutronapi.API()
func = getattr(api, func_name)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
port_req_body = {'port':
{'binding:host_id': expected_bind_host}}
self.moxed_client.update_port('test1',
port_req_body).AndReturn(None)
self.mox.ReplayAll()
func(*args)
def _test_update_port_true_exception(self, expected_bind_host,
func_name, *args):
api = neutronapi.API()
func = getattr(api, func_name)
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
port_req_body = {'port':
{'binding:host_id': expected_bind_host}}
self.moxed_client.update_port('test1',
port_req_body).AndRaise(
Exception("fail to update port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
func,
*args)
def test_migrate_instance_finish_binding_false(self):
self._test_update_port_binding_false('migrate_instance_finish',
self.context, None,
{'dest_compute': 'fake'})
def test_migrate_instance_finish_binding_true(self):
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host'}
self._test_update_port_binding_true('dest_host',
'migrate_instance_finish',
self.context, self.instance,
migration)
def test_migrate_instance_finish_binding_true_exception(self):
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host'}
self._test_update_port_true_exception('dest_host',
'migrate_instance_finish',
self.context,
self.instance,
migration)
def test_setup_instance_network_on_host_false(self):
self._test_update_port_binding_false(
'setup_instance_network_on_host', self.context, None,
'fake_host')
def test_setup_instance_network_on_host_true(self):
self._test_update_port_binding_true('fake_host',
'setup_instance_network_on_host',
self.context,
self.instance,
'fake_host')
def test_setup_instance_network_on_host_exception(self):
self._test_update_port_true_exception(
'fake_host', 'setup_instance_network_on_host',
self.context, self.instance, 'fake_host')
def test_associate_not_implemented(self):
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.associate,
self.context, 'id')
class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2ExtraDhcpOpts, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
self._allocate_for_instance(1, extra_dhcp_opts=False)
def test_allocate_for_instance_extradhcpopts(self):
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
class TestNeutronClientForAdminScenarios(test.TestCase):
def _test_get_client_for_admin(self, use_id=False, admin_context=False):
def client_mock(*args, **kwargs):
client.Client.httpclient = mock.MagicMock()
self.flags(auth_strategy=None, group='neutron')
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
if use_id:
self.flags(admin_tenant_id='admin_tenant_id', group='neutron')
self.flags(admin_user_id='admin_user_id', group='neutron')
if admin_context:
my_context = context.get_admin_context()
else:
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
kwargs = {
'auth_url': CONF.neutron.admin_auth_url,
'password': CONF.neutron.admin_password,
'endpoint_url': CONF.neutron.url,
'auth_strategy': None,
'timeout': CONF.neutron.url_timeout,
'insecure': False,
'ca_cert': None,
'token': None}
if use_id:
kwargs['tenant_id'] = CONF.neutron.admin_tenant_id
kwargs['user_id'] = CONF.neutron.admin_user_id
else:
kwargs['tenant_name'] = CONF.neutron.admin_tenant_name
kwargs['username'] = CONF.neutron.admin_username
client.Client.__init__(**kwargs).WithSideEffects(client_mock)
self.mox.ReplayAll()
# clean global
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = None
if admin_context:
# Note that the context does not contain a token but is
# an admin context which will force an elevation to admin
# credentials.
neutronv2.get_client(my_context)
else:
# Note that the context is not elevated, but the True is passed in
# which will force an elevation to admin credentials even though
# the context has an auth_token.
neutronv2.get_client(my_context, True)
def test_get_client_for_admin(self):
self._test_get_client_for_admin()
def test_get_client_for_admin_with_id(self):
self._test_get_client_for_admin(use_id=True)
def test_get_client_for_admin_context(self):
self._test_get_client_for_admin(admin_context=True)
def test_get_client_for_admin_context_with_id(self):
self._test_get_client_for_admin(use_id=True, admin_context=True)
| eayunstack/nova | nova/tests/network/test_neutronv2.py | Python | apache-2.0 | 148,482 |
import pytest
import copy
import json
from awx.main.utils.common import (
model_instance_diff,
model_to_dict,
)
@pytest.mark.django_db
def test_model_to_dict_user(alice):
username = copy.copy(alice.username)
password = copy.copy(alice.password)
output_dict = model_to_dict(alice)
assert output_dict['username'] == username
assert output_dict['password'] == 'hidden'
assert alice.username == password
assert alice.password == password
@pytest.mark.django_db
def test_model_to_dict_credential(credential):
name = copy.copy(credential.name)
inputs = copy.copy(credential.inputs)
output_dict = model_to_dict(credential)
assert output_dict['name'] == name
assert output_dict['inputs'] == 'hidden'
assert credential.name == name
assert credential.inputs == inputs
@pytest.mark.django_db
def test_model_to_dict_notification_template(notification_template_with_encrypt):
old_configuration = copy.deepcopy(notification_template_with_encrypt.notification_configuration)
output_dict = model_to_dict(notification_template_with_encrypt)
new_configuration = json.loads(output_dict['notification_configuration'])
assert notification_template_with_encrypt.notification_configuration == old_configuration
assert new_configuration['token'] == '$encrypted$'
assert new_configuration['channels'] == old_configuration['channels']
@pytest.mark.django_db
def test_model_instance_diff(alice, bob):
alice_name = copy.copy(alice.username)
alice_pass = copy.copy(alice.password)
bob_name = copy.copy(bob.username)
bob_pass = copy.copy(bob.password)
output_dict = model_instance_diff(alice, bob)
assert alice_name == alice.username
assert alice_pass == alice.password
assert bob_name == bob.username
assert bob_pass == bob.password
assert output_dict['username'][0] == alice_name
assert output_dict['username'][1] == bob_name
assert output_dict['password'] == ('hidden', 'hidden')
assert hasattr(alice, 'is_superuser')
assert hasattr(bob, 'is_superuser')
assert 'is_superuser' not in output_dict
| snahelou/awx | awx/main/tests/functional/utils/test_common.py | Python | apache-2.0 | 2,126 |
#
# MLDB-2107-scalar-format.py
# Mathieu Marquis Bolduc, 2017-01-10
# This file is part of MLDB. Copyright 2017 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
class MLDB2107ScalarFormatTest(MldbUnitTest): # noqa
@classmethod
def setUpClass(cls):
ds = mldb.create_dataset({'id' : 'ds', 'type' : 'sparse.mutable'})
ds.record_row('row0', [['x', 'A', 0]])
ds.record_row('row1', [['x', 'B', 0]])
ds.commit()
def test_int(self):
n = mldb.get('/v1/query', q="select x from (select 17 as x)", format='atom').json()
self.assertEqual(17, n)
def test_float(self):
n = mldb.get('/v1/query', q="select x from (select 2.3 as x)", format='atom').json()
self.assertEqual(2.3, n)
def test_string(self):
n = mldb.get('/v1/query', q="select x from (select 'blah' as x)", format='atom').json()
self.assertEqual('blah', n)
def test_bool(self):
n = mldb.get('/v1/query', q="select x from (select false as x)", format='atom').json()
self.assertEqual(False, n)
def test_error_columns(self):
msg = "Query with atom format returned multiple columns"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select x,y from (select false as x, 1 as y)", format='atom').json()
def test_error_rows(self):
msg = "Query with atom format returning multiple rows"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select x from ds", format='atom').json()
def test_multiple_rows_limit(self):
n = mldb.get('/v1/query', q="select x from ds limit 1", format='atom').json()
self.assertEqual('B', n)
def test_error_no_rows(self):
msg = "Query with atom format returned no rows."
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select x from ds where x = 'patate'", format='atom').json()
def test_error_no_column(self):
msg = "Query with atom format returned no column"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select COLUMN EXPR (WHERE columnName() IN ('Z')) from (select 17 as x)", format='atom').json()
if __name__ == '__main__':
mldb.run_tests()
| mldbai/mldb | testing/MLDB-2107-scalar-format.py | Python | apache-2.0 | 2,377 |
import copy
import six
from eclcli.common import command
from eclcli.common import utils
class ListLicense(command.Lister):
def get_parser(self, prog_name):
parser = super(ListLicense, self).get_parser(prog_name)
parser.add_argument(
"--license-type",
help="License type name as string of which you want to list license",
metavar='<license-type>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
search_opts = {
"license_type":parsed_args.license_type
}
self.log.debug('search options: %s',search_opts)
columns = [
'ID', 'Key', 'Assigned From', 'Expires At', 'License Type',
]
column_headers = columns
data = dh_client.licenses.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns
) for s in data))
class ListLicenseType(command.Lister):
def get_parser(self, prog_name):
parser = super(ListLicenseType, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
columns = [
'ID', 'Name', 'Has License Key', 'Unit', 'Description'
]
column_headers = columns
data = dh_client.licenses.list_license_types()
return (column_headers,
(utils.get_item_properties(
s, columns
) for s in data))
class CreateLicense(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateLicense, self).get_parser(prog_name)
parser.add_argument(
"license_type",
help="License type name as string of which you want to create license",
metavar='<license-type>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
self.log.debug('license type: %s',parsed_args.license_type)
rows = [
"ID",
"Key",
"Assigned From",
"Expires At",
"License Type"
]
row_headers = rows
data = dh_client.licenses.create(license_type=parsed_args.license_type)
return (row_headers,
utils.get_item_properties(
data, rows
))
class DeleteLicense(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteLicense, self).get_parser(prog_name)
parser.add_argument(
"license_ids",
nargs="+",
help="IDs of licenses to be deleted",
metavar='<license-ids>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
self.log.debug('license id: %s',parsed_args.license_ids)
for license_id in parsed_args.license_ids:
dh_client.licenses.delete(license_id)
| anythingrandom/eclcli | eclcli/dh/v2/license.py | Python | apache-2.0 | 3,173 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
使用paramiko模块远程管理服务器
通过key登录
'''
import paramiko
private_key_path = 'D:\workspace\Python-oldboy\day07\zhangyage_pass'
#key = paramiko.RSAKey.from_private_key_file(filename, password)
key = paramiko.RSAKey.from_private_key_file(private_key_path,'12345678') #private_key_path是秘钥文件的位置,'12345678'是秘钥的口令
ssh = paramiko.SSHClient() #实例化一个客户端
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #自动恢复yes,在我们使用ssh客户端链接的时候第一次的时候都会让我们输入一个yes确定的
ssh.connect('192.168.75.133', 22, username='root', pkey=key)
stdin,stdout,stderr = ssh.exec_command('ifconfig') #定义三个变量进行输出,默认输出是个元组会赋值给三个变量
print stdout.read()
ssh.close() | zhangyage/Python-oldboy | day07/ssh_client2.py | Python | apache-2.0 | 880 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from textwrap import dedent
from pants.engine.internals.native_engine import FileDigest
from pants.jvm.resolve.common import ArtifactRequirement, Coordinate, Coordinates
from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile
from pants.jvm.resolve.coursier_test_util import TestCoursierWrapper
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
EMPTY_RESOLVE = """
# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
# {{
# "version": 1,
# "generated_with_requirements": [
# ]
# }}
# --- END PANTS LOCKFILE METADATA ---
"""
DEFAULT_LOCKFILE = (
TestCoursierWrapper(
CoursierResolvedLockfile(
(
CoursierLockfileEntry(
coord=Coordinate(
group="org.scala-lang", artifact="scala-library", version="2.13.6"
),
file_name="org.scala-lang_scala-library_2.13.6.jar",
direct_dependencies=Coordinates(),
dependencies=Coordinates(),
file_digest=FileDigest(
"f19ed732e150d3537794fd3fe42ee18470a3f707efd499ecd05a99e727ff6c8a", 5955737
),
),
)
)
)
.serialize(
[
ArtifactRequirement(
coordinate=Coordinate(
group="org.scala-lang", artifact="scala-library", version="2.13.6"
)
)
]
)
.replace("{", "{{")
.replace("}", "}}")
)
DEFAULT_SCALA_LIBRARY_TARGET = textwrap.dedent(
"""\
jvm_artifact(
name="org.scala-lang_scala-library_2.13.6",
group="org.scala-lang",
artifact="scala-library",
version="2.13.6",
)
"""
)
def test_java() -> None:
sources = {
"src/org/pantsbuild/test/Hello.java": dedent(
"""\
package org.pantsbuild.test;
public class Hello {{
public static void main(String[] args) {{
System.out.println("Hello, World!");
}}
}}
"""
),
"src/org/pantsbuild/test/BUILD": dedent(
"""\
java_sources()
deploy_jar(
name="test_deploy_jar",
main="org.pantsbuild.test.Hello",
dependencies=[":test"],
)
"""
),
"lockfile": EMPTY_RESOLVE,
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.experimental.java",
f"--source-root-patterns=['{tmpdir}/src']",
"--pants-ignore=__pycache__",
f'--jvm-resolves={{"empty": "{tmpdir}/lockfile"}}',
"--jvm-default-resolve=empty",
"run",
f"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar",
]
result = run_pants(args)
assert result.stdout.strip() == "Hello, World!"
def test_scala() -> None:
sources = {
"src/org/pantsbuild/test/Hello.scala": dedent(
"""\
package org.pantsbuild.test;
object Hello {{
def main(args: Array[String]): Unit = {{
println("Hello, World!")
}}
}}
"""
),
"src/org/pantsbuild/test/BUILD": dedent(
"""\
scala_sources()
deploy_jar(
name="test_deploy_jar",
main="org.pantsbuild.test.Hello",
dependencies=[":test"],
)
"""
),
"BUILD": DEFAULT_SCALA_LIBRARY_TARGET,
"lockfile": DEFAULT_LOCKFILE,
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.experimental.scala",
f"--source-root-patterns=['{tmpdir}/src']",
"--pants-ignore=__pycache__",
f'--jvm-resolves={{"jvm-default": "{tmpdir}/lockfile"}}',
"--jvm-default-resolve=jvm-default",
"run",
f"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar",
]
result = run_pants(args)
assert result.stdout.strip() == "Hello, World!"
| pantsbuild/pants | src/python/pants/jvm/run_deploy_jar_intergration_test.py | Python | apache-2.0 | 4,405 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.docformatter.skip_field import SkipDocformatterField
from pants.backend.python.lint.docformatter.subsystem import Docformatter
from pants.backend.python.lint.python_fmt import PythonFmtRequest
from pants.backend.python.target_types import PythonSources
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import PexRequest, PexRequirements, VenvPex, VenvPexProcess
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintRequest, LintResult, LintResults
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import Digest
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class DocformatterFieldSet(FieldSet):
required_fields = (PythonSources,)
sources: PythonSources
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipDocformatterField).value
class DocformatterRequest(PythonFmtRequest, LintRequest):
field_set_type = DocformatterFieldSet
@dataclass(frozen=True)
class SetupRequest:
request: DocformatterRequest
check_only: bool
@dataclass(frozen=True)
class Setup:
process: Process
original_digest: Digest
def generate_args(
*, source_files: SourceFiles, docformatter: Docformatter, check_only: bool
) -> Tuple[str, ...]:
return ("--check" if check_only else "--in-place", *docformatter.args, *source_files.files)
@rule(level=LogLevel.DEBUG)
async def setup_docformatter(setup_request: SetupRequest, docformatter: Docformatter) -> Setup:
docformatter_pex_request = Get(
VenvPex,
PexRequest(
output_filename="docformatter.pex",
internal_only=True,
requirements=PexRequirements(docformatter.all_requirements),
interpreter_constraints=InterpreterConstraints(docformatter.interpreter_constraints),
main=docformatter.main,
),
)
source_files_request = Get(
SourceFiles,
SourceFilesRequest(field_set.sources for field_set in setup_request.request.field_sets),
)
source_files, docformatter_pex = await MultiGet(source_files_request, docformatter_pex_request)
source_files_snapshot = (
source_files.snapshot
if setup_request.request.prior_formatter_result is None
else setup_request.request.prior_formatter_result
)
process = await Get(
Process,
VenvPexProcess(
docformatter_pex,
argv=generate_args(
source_files=source_files,
docformatter=docformatter,
check_only=setup_request.check_only,
),
input_digest=source_files_snapshot.digest,
output_files=source_files_snapshot.files,
description=(
f"Run Docformatter on {pluralize(len(setup_request.request.field_sets), 'file')}."
),
level=LogLevel.DEBUG,
),
)
return Setup(process, original_digest=source_files_snapshot.digest)
@rule(desc="Format with docformatter", level=LogLevel.DEBUG)
async def docformatter_fmt(request: DocformatterRequest, docformatter: Docformatter) -> FmtResult:
if docformatter.skip:
return FmtResult.skip(formatter_name="Docformatter")
setup = await Get(Setup, SetupRequest(request, check_only=False))
result = await Get(ProcessResult, Process, setup.process)
return FmtResult.from_process_result(
result, original_digest=setup.original_digest, formatter_name="Docformatter"
)
@rule(desc="Lint with docformatter", level=LogLevel.DEBUG)
async def docformatter_lint(
request: DocformatterRequest, docformatter: Docformatter
) -> LintResults:
if docformatter.skip:
return LintResults([], linter_name="Docformatter")
setup = await Get(Setup, SetupRequest(request, check_only=True))
result = await Get(FallibleProcessResult, Process, setup.process)
return LintResults(
[LintResult.from_fallible_process_result(result)], linter_name="Docformatter"
)
def rules():
return [
*collect_rules(),
UnionRule(PythonFmtRequest, DocformatterRequest),
UnionRule(LintRequest, DocformatterRequest),
*pex.rules(),
]
| benjyw/pants | src/python/pants/backend/python/lint/docformatter/rules.py | Python | apache-2.0 | 4,834 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateDatabase
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-spanner-admin-database
# [START spanner_v1_generated_DatabaseAdmin_CreateDatabase_async]
from google.cloud import spanner_admin_database_v1
async def sample_create_database():
# Create a client
client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
# Initialize request argument(s)
request = spanner_admin_database_v1.CreateDatabaseRequest(
parent="parent_value",
create_statement="create_statement_value",
)
# Make the request
operation = client.create_database(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END spanner_v1_generated_DatabaseAdmin_CreateDatabase_async]
| googleapis/python-spanner | samples/generated_samples/spanner_v1_generated_database_admin_create_database_async.py | Python | apache-2.0 | 1,667 |
from ..broker import Broker
class BasicServicesBroker(Broker):
controller = "basic_services"
def authenticate(self, **kwargs):
"""Authenticates the user with NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param username: The username of the user as whom to login.
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param password: The password of the user as whom to login.
:type password: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` %Y-%m-%d %H:%M:%S
:param datetime_format: The format to use for date/time input and output.
:type datetime_format: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timezone: Date/time input and output will be performed in the specified timezone. Should be specified as HH:MM offset from GMT. For example, -05:00 specified US Eastern Time, whereas +09:00 specifies Tokyo time. Alternatively, a timezone name may be used. See the API Data Structures page for details. If omitted, the server's configured timezone will be used.
:type timezone: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("authenticate"), kwargs)
def base_uri(self, **kwargs):
"""Returns the base URI for the specified version.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param version: The API version for which the base_uri is needed.
:type version: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("base_uri"), kwargs)
def license_info(self, **kwargs):
"""Returns license information for this NetMRI server.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return serial_number: NetMRI serial number.
:rtype serial_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_id: NetMRI License identifier.
:rtype license_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_expiration: NetMRI License expiration.
:rtype license_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_type: NetMRI License type
:rtype license_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mode: NetMRI operation mode. One of 'standalone', 'master' or 'collector'.
:rtype mode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return maintenance_expiration: Maintenance expiration for appliance.
:rtype maintenance_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_limit: Licensed limit of devices.
:rtype device_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return interface_limit: Licensed limit of interfaces.
:rtype interface_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_limit: Licensed limit of number of ports controlled by SPM.
:rtype spm_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_short_name: Short symbolic names of licensed features.
:rtype modules_short_name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_support: Support statuses for corresponding modules in modules_short_names.
:rtype modules_support: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_expiration: Expiration times for corresponding modules in modules_short_names.
:rtype modules_expiration: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_name: Long names for corresponding modules in modules_short_names.
:rtype modules_name: Array of String
"""
return self.api_request(self._get_method_fullname("license_info"), kwargs)
def server_info(self, **kwargs):
"""Returns basic information regarding this NetMRI server.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param api_versions_only_ind: Only include API version information in the output.
:type api_versions_only_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return netmri_version: The NetMRI version number running on this appliance or virtual machine.
:rtype netmri_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return latest_api_version: The most recent API version supported by this NetMRI.
:rtype latest_api_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return requested_api_version: The API version that executed this call.
:rtype requested_api_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return host_name: The configured host name of the NetMRI appliance.
:rtype host_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return operating_mode: Indicates if the NetMRI is running in standalone, collector, or operations center mode.
:rtype operating_mode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mgmt_ip: The IPv4 management address of this NetMRI, if configured.
:rtype mgmt_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mgmt_ip6: The IPv6 management address of this NetMRI, if configured.
:rtype mgmt_ip6: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return scan_ip: The IPv4 SCAN (analysis) address of this NetMRI, if configured.
:rtype scan_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return scan_ip6: The IPv6 SCAN (analysis) address of this NetMRI, if configured.
:rtype scan_ip6: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return operational_status: The status of NetMRI. Usually ready, can also be upgrading. Values might change in the future.
:rtype operational_status: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return supported_api_versions: All API versions supported by this NetMRI.
:rtype supported_api_versions: Array of String
"""
return self.api_request(self._get_method_fullname("server_info"), kwargs)
def server_time(self, **kwargs):
"""Returns the current system time of this NetMRI server.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("server_time"), kwargs)
def restart(self, **kwargs):
"""Restarts the application.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("restart"), kwargs)
def consolidate(self, **kwargs):
"""Runs consolidation
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param managers: Comma-separated list of consolidator managers. Must be one of Aggregate, Config, Event, Group, Issue, Job, Normal, Policy, Routing, Settings, Stats, Subnet, Switching, Time, Topology, Voip, Vulnerability, Wireless
:type managers: Array
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param collector: Collector name. In case when this method called on OC this parameter is required
:type collector: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("consolidate"), kwargs)
def settings_generate(self, **kwargs):
"""Generates xml with current configuration data
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param version: The version of xml to be generated
:type version: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return xml: A string containing the full xml as collected from the running config.
:rtype xml: String
"""
return self.api_request(self._get_method_fullname("settings_generate"), kwargs)
def settings_current(self, **kwargs):
"""Reports the status of an xml configuration file
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return xml: A string containing the full xml as collected from the running config.
:rtype xml: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return messages: An array of hashes that contain details about the validation process
:rtype messages: Array of Hash
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: A string representation of the status of the request. Will be one of; success, error, pending
:rtype status: String
"""
return self.api_request(self._get_method_fullname("settings_current"), kwargs)
def settings_apply(self, **kwargs):
"""Parses the xml provided by config_id, then applies the changes. You should not need to call this directly!
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param config_id: The configuration id reported when the xml was uploaded to the unit
:type config_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param mods: Modifications for applying
:type mods: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return xml: A string containing the full xml as collected from the running config.
:rtype xml: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return messages: An array of hashes that contain details about the validation process
:rtype messages: Array of Hash
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: A string representation of the status of the request. Will be one of; success, error, pending
:rtype status: String
"""
return self.api_request(self._get_method_fullname("settings_apply"), kwargs)
def settings_status(self, **kwargs):
"""Reports the status of an xml configuration file
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param config_id: The configuration id reported when the xml was uploaded to the unit
:type config_id: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return messages: An array of hashes that contain details about the validation process
:rtype messages: Array of Hash
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: A string representation of the status of the validation. Will be one of; success, error, pending
:rtype status: String
"""
return self.api_request(self._get_method_fullname("settings_status"), kwargs)
def settings_info(self, **kwargs):
"""Shows probe info, running_config, candidate_config, and list of installed dsb
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return grid_members: Hash of grid members info including master and slaves (probes)
:rtype grid_members: String
"""
return self.api_request(self._get_method_fullname("settings_info"), kwargs)
def set_session_value(self, **kwargs):
"""save data in a cache that is session wise
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param key: key associated with that value - will be used to retrieve the same value
:type key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param value: value to save in the session cache
:type value: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("set_session_value"), kwargs)
def get_session_value(self, **kwargs):
"""retrieve data in the session cache that formerly saved
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param key: key associated with that value - will be used to retrieve the same value
:type key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param default_value: Default value in case key/value does not exist in session. If key does not exist and default value is nil the response is 400 with record not found message
:type default_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return value: value associated with that key
:rtype value: String
"""
return self.api_request(self._get_method_fullname("get_session_value"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_9_0/basic_services_broker.py | Python | apache-2.0 | 18,594 |
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
)
from website import (
mails,
settings
)
from osf.utils.sanitize import unescape_entities
def send_archiver_size_exceeded_mails(src, user, stat_result, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_copy_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
url=url,
can_change_preferences=False,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_file_not_found_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
can_change_preferences=False,
user=user,
src=src,
results=results,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_uncaught_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def handle_archive_fail(reason, src, dst, user, result):
url = settings.INTERNAL_DOMAIN + src._id
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result, url)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result, url)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result, url)
elif reason == ARCHIVER_FORCED_FAILURE: # Forced failure using scripts.force_fail_registration
pass
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result, url)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_provider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user), log=False)
if hasattr(addon, 'on_add'):
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
from osf.models import ArchiveJob
link_archive_provider(node, user)
job = ArchiveJob.objects.create(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=node.creator)
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
"""
some annotations:
- `value` is the `extra` from a file upload in `registered_meta`
(see `Uploader.addFile` in website/static/js/registrationEditorExtensions.js)
- `node` is a Registration instance
- returns a `(file_info, node_id)` or `(None, None)` tuple, where `file_info` is from waterbutler's api
(see `addons.base.models.BaseStorageAddon._get_fileobj_child_metadata` and `waterbutler.core.metadata.BaseMetadata`)
"""
from osf.models import AbstractNode
orig_sha256 = value['sha256']
orig_name = unescape_entities(
value['selectedFileName'],
safe={
'<': '<',
'>': '>'
}
)
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, file_info, node_id in file_map:
registered_from_id = AbstractNode.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == file_info['name']:
return file_info, node_id
return None, None
def find_registration_files(values, node):
"""
some annotations:
- `values` is from `registered_meta`, e.g. `{ comments: [], value: '', extra: [] }`
- `node` is a Registration model instance
- returns a list of `(file_info, node_id, index)` or `(None, None, index)` tuples,
where `file_info` is from `find_registration_file` above
"""
ret = []
for i in range(len(values.get('extra', []))):
ret.append(find_registration_file(values['extra'][i], node) + (i,))
return ret
def get_title_for_question(schema, path):
path = path.split('.')
root = path.pop(0)
item = None
for page in schema['pages']:
questions = {
q['qid']: q
for q in page['questions']
}
if root in questions:
item = questions[root]
title = item.get('title')
while len(path):
item = item.get(path.pop(0), {})
title = item.get('title', title)
return title
def find_selected_files(schema, metadata):
"""
some annotations:
- `schema` is a RegistrationSchema instance
- `metadata` is from `registered_meta` (for the given schema)
- returns a dict that maps from each `osf-upload` question id (`.`-delimited path) to its chunk of metadata,
e.g. `{ 'q1.uploader': { comments: [], extra: [...], value: 'foo.pdf' } }`
"""
targets = []
paths = [('', p) for p in schema.schema['pages']]
while len(paths):
prefix, path = paths.pop(0)
if path.get('questions'):
paths = paths + [('', q) for q in path['questions']]
elif path.get('type'):
qid = path.get('qid', path.get('id'))
if path['type'] == 'object':
paths = paths + [('{}.{}.value'.format(prefix, qid), p) for p in path['properties']]
elif path['type'] == 'osf-upload':
targets.append('{}.{}'.format(prefix, qid).lstrip('.'))
selected = {}
for t in targets:
parts = t.split('.')
value = metadata.get(parts.pop(0))
while value and len(parts):
value = value.get(parts.pop(0))
if value:
selected[t] = value
return selected
VIEW_FILE_URL_TEMPLATE = '/project/{node_id}/files/osfstorage/{file_id}/'
def deep_get(obj, path):
parts = path.split('.')
item = obj
key = None
while len(parts):
key = parts.pop(0)
item[key] = item.get(key, {})
item = item[key]
return item
def migrate_file_metadata(dst, schema):
metadata = dst.registered_meta[schema._id]
missing_files = []
selected_files = find_selected_files(schema, metadata)
for path, selected in selected_files.items():
target = deep_get(metadata, path)
for archived_file_info, node_id, index in find_registration_files(selected, dst):
if not archived_file_info:
missing_files.append({
'file_name': selected['extra'][index]['selectedFileName'],
'question_title': get_title_for_question(schema.schema, path)
})
continue
archived_file_id = archived_file_info['path'].lstrip('/')
target['extra'][index]['viewUrl'] = VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, file_id=archived_file_id)
if missing_files:
from website.archiver.tasks import ArchivedFileNotFound
raise ArchivedFileNotFound(
registration=dst,
missing_files=missing_files
)
dst.registered_meta[schema._id] = metadata
dst.registration_responses = dst.flatten_registration_metadata()
dst.save()
| baylee-d/osf.io | website/archiver/utils.py | Python | apache-2.0 | 11,737 |
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from vitrage_dashboard.entities import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
]
| openstack/vitrage-dashboard | vitrage_dashboard/entities/urls.py | Python | apache-2.0 | 728 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Densely Connected Convolutional Networks.
Reference [
Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow_examples.models.densenet import distributed_train
from tensorflow_examples.models.densenet import utils
class DenseNetDistributedBenchmark(tf.test.Benchmark):
def __init__(self, output_dir=None, **kwargs):
self.output_dir = output_dir
def benchmark_with_function_custom_loops(self):
kwargs = utils.get_cifar10_kwargs()
self._run_and_report_benchmark(**kwargs)
def benchmark_with_function_custom_loops_300_epochs_2_gpus(self):
kwargs = utils.get_cifar10_kwargs()
kwargs.update({'epochs': 300, 'data_format': 'channels_first',
'bottleneck': False, 'compression': 1., 'num_gpu': 2,
'batch_size': 128})
self._run_and_report_benchmark(**kwargs)
def benchmark_with_function_custom_loops_300_epochs_8_gpus(self):
kwargs = utils.get_cifar10_kwargs()
kwargs.update({'epochs': 300, 'data_format': 'channels_first',
'bottleneck': False, 'compression': 1., 'num_gpu': 8,
'batch_size': 512})
self._run_and_report_benchmark(**kwargs)
def _run_and_report_benchmark(self, top_1_min=.944, top_1_max=.949, **kwargs):
"""Run the benchmark and report metrics.report.
Args:
top_1_min: Min value for top_1 accuracy. Default range is SOTA.
top_1_max: Max value for top_1 accuracy.
**kwargs: All args passed to the test.
"""
start_time_sec = time.time()
train_loss, train_acc, _, test_acc = distributed_train.main(**kwargs)
wall_time_sec = time.time() - start_time_sec
metrics = []
metrics.append({'name': 'accuracy_top_1',
'value': test_acc,
'min_value': top_1_min,
'max_value': top_1_max})
metrics.append({'name': 'training_accuracy_top_1',
'value': train_acc})
metrics.append({'name': 'train_loss',
'value': train_loss})
self.report_benchmark(wall_time=wall_time_sec, metrics=metrics)
if __name__ == '__main__':
tf.test.main()
| tensorflow/examples | tensorflow_examples/models/densenet/densenet_distributed_test.py | Python | apache-2.0 | 2,998 |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import gzip
import json
import os
import shlex
import sys
import tarfile
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import jinja2
import prettytable
import six
import swiftclient
import yaml
import zpmlib
from zpmlib import util
from zpmlib import zappbundler
from zpmlib import zapptemplate
_DEFAULT_UI_TEMPLATES = ['index.html.tmpl', 'style.css', 'zerocloud.js']
_ZAPP_YAML = 'python-zapp.yaml'
_ZAPP_WITH_UI_YAML = 'python-zapp-with-ui.yaml'
LOG = zpmlib.get_logger(__name__)
BUFFER_SIZE = 65536
#: path/filename of the system.map (job description) in every zapp
SYSTEM_MAP_ZAPP_PATH = 'boot/system.map'
#: Message displayed if insufficient auth settings are specified, either on the
#: command line or in environment variables. Shamelessly copied from
#: ``python-swiftclient``.
NO_AUTH_MSG = """\
Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables
to be set or overridden with -A, -U, or -K.
Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and
OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url,
--os-username, --os-password, --os-tenant-name or os-tenant-id. Note:
adding "-V 2" is necessary for this."""
#: Column labels for the execution summary table
EXEC_TABLE_HEADER = [
'Node',
'Status',
'Retcode',
'NodeT',
'SysT',
'UserT',
'DiskReads',
'DiskBytesR',
'DiskWrites',
'DiskBytesW',
'NetworkReads',
'NetworkBytesR',
'NetworkWrites',
'NetworkBytesW',
]
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files
def find_project_root():
"""
Starting from the `cwd`, search up the file system hierarchy until a
``zapp.yaml`` file is found. Once the file is found, return the directory
containing it. If no file is found, raise a `RuntimeError`.
"""
root = os.getcwd()
while not os.path.isfile(os.path.join(root, 'zapp.yaml')):
oldroot, root = root, os.path.dirname(root)
if root == oldroot:
raise RuntimeError("no zapp.yaml file found")
return root
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job
def _get_swift_zapp_url(swift_service_url, zapp_path):
"""
:param str swift_service_url:
The Swift service URL returned from a Keystone service catalog.
Example: http://localhost:8080/v1/AUTH_469a9cd20b5a4fc5be9438f66bb5ee04
:param str zapp_path:
<container>/<zapp-file-name>. Example:
test_container/myapp.zapp
Here's a typical usage example, with typical input and output:
>>> swift_service_url = ('http://localhost:8080/v1/'
... 'AUTH_469a9cd20b5a4fc5be9438f66bb5ee04')
>>> zapp_path = 'test_container/myapp.zapp'
>>> _get_swift_zapp_url(swift_service_url, zapp_path)
'swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/test_container/myapp.zapp'
"""
swift_path = urlparse.urlparse(swift_service_url).path
# TODO(larsbutler): Why do we need to check if the path contains '/v1/'?
# This is here due to legacy reasons, but it's not clear to me why this is
# needed.
if swift_path.startswith('/v1/'):
swift_path = swift_path[4:]
return 'swift://%s/%s' % (swift_path, zapp_path)
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job
def bundle_project(root, refresh_deps=False):
"""
Bundle the project under root.
"""
zapp_yaml = os.path.join(root, 'zapp.yaml')
zapp = yaml.safe_load(open(zapp_yaml))
zapp_name = zapp['meta']['name'] + '.zapp'
zapp_tar_path = os.path.join(root, zapp_name)
tar = tarfile.open(zapp_tar_path, 'w:gz')
job = _generate_job_desc(zapp)
job_json = json.dumps(job)
info = tarfile.TarInfo(name='boot/system.map')
# This size is only correct because json.dumps uses
# ensure_ascii=True by default and we thus have a 1-1
# correspondence between Unicode characters and bytes.
info.size = len(job_json)
LOG.info('adding %s' % info.name)
# In Python 3, we cannot use a str or bytes object with addfile,
# we need a BytesIO object. In Python 2, BytesIO is just StringIO.
# Since json.dumps produces an ASCII-only Unicode string in Python
# 3, it is safe to encode it to ASCII.
tar.addfile(info, BytesIO(job_json.encode('ascii')))
_add_file_to_tar(root, 'zapp.yaml', tar)
sections = ('bundling', 'ui')
# Keep track of the files we add, given the configuration in the zapp.yaml.
file_add_count = 0
for section in sections:
for pattern in zapp.get(section, []):
paths = glob.glob(os.path.join(root, pattern))
if len(paths) == 0:
LOG.warning(
"pattern '%(pat)s' in section '%(sec)s' matched no files",
dict(pat=pattern, sec=section)
)
else:
for path in paths:
_add_file_to_tar(root, path, tar)
file_add_count += len(paths)
if file_add_count == 0:
# None of the files specified in the "bundling" or "ui" sections were
# found. Something is wrong.
raise zpmlib.ZPMException(
"None of the files specified in the 'bundling' or 'ui' sections of"
" the zapp.yaml matched anything."
)
# Do template-specific bundling
zappbundler.bundle(root, zapp, tar, refresh_deps=refresh_deps)
tar.close()
print('created %s' % zapp_name)
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname)
def _find_ui_uploads(zapp, tar):
matches = set()
names = tar.getnames()
for pattern in zapp.get('ui', []):
matches.update(fnmatch.filter(names, pattern))
return sorted(matches)
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body)
class ZeroCloudConnection(swiftclient.Connection):
"""
An extension of the `swiftclient.Connection` which has the capability of
posting ZeroVM jobs to an instance of ZeroCloud (running on Swift).
"""
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth()
def post_job(self, job, response_dict=None, response_body_buffer=None):
"""Start a ZeroVM job, using a pre-uploaded zapp
:param object job:
Job description. This will be encoded as JSON and sent to
ZeroCloud.
"""
json_data = json.dumps(job)
LOG.debug('JOB: %s' % json_data)
return self._retry(None, _post_job, json_data,
response_dict=response_dict,
response_body_buffer=response_body_buffer)
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer)
def _get_zerocloud_conn(args):
version = args.auth_version
# no version was explicitly requested; try to guess it:
if version is None:
version = _guess_auth_version(args)
if version == '1.0':
if any([arg is None for arg in (args.auth, args.user, args.key)]):
raise zpmlib.ZPMException(
"Version 1 auth requires `--auth`, `--user`, and `--key`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.auth, args.user, args.key)
elif version == '2.0':
if any([arg is None for arg in
(args.os_auth_url, args.os_username, args.os_tenant_name,
args.os_password)]):
raise zpmlib.ZPMException(
"Version 2 auth requires `--os-auth-url`, `--os-username`, "
"`--os-password`, and `--os-tenant-name`."
"\nSee `zpm deploy --help` for more information."
)
conn = ZeroCloudConnection(args.os_auth_url, args.os_username,
args.os_password,
tenant_name=args.os_tenant_name,
auth_version='2.0')
else:
raise zpmlib.ZPMException(NO_AUTH_MSG)
return conn
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
)
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index
def _generate_uploads(conn, target, zapp_path, auth_opts):
"""Generate sequence of (container-and-file-path, data, content-type)
tuples.
"""
tar = tarfile.open(zapp_path, 'r:gz')
zapp_config = yaml.safe_load(tar.extractfile('zapp.yaml'))
remote_zapp_path = '%s/%s' % (target, os.path.basename(zapp_path))
swift_url = _get_swift_zapp_url(conn.url, remote_zapp_path)
job = _prepare_job(tar, zapp_config, swift_url)
yield (remote_zapp_path, gzip.open(zapp_path).read(), 'application/x-tar')
yield ('%s/%s' % (target, SYSTEM_MAP_ZAPP_PATH), json.dumps(job),
'application/json')
for path in _find_ui_uploads(zapp_config, tar):
output = tar.extractfile(path).read()
if path.endswith('.tmpl'):
tmpl = jinja2.Template(output.decode('utf-8'))
output = tmpl.render(auth_opts=auth_opts, zapp=zapp_config)
# drop the .tmpl extension
path = os.path.splitext(path)[0]
ui_path = '%s/%s' % (target, path)
yield (ui_path, output, None)
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth
def _guess_auth_version(args):
"""Guess the auth version from first the command line args and/or envvars.
Command line arguments override environment variables, so we check those
first.
Auth v1 arguments:
* ``--auth``
* ``--user``
* ``--key``
Auth v2 arguments:
* ``--os-auth-url``
* ``--os-username``
* ``--os-password``
* ``--os-tenant-name``
If all of the v1 and v2 arguments are specified, default to 1.0 (this is
how ``python-swiftclient`` behaves).
If no auth version can be determined from the command line args, we check
environment variables.
Auth v1 vars:
* ``ST_AUTH``
* ``ST_USER``
* ``ST_KEY``
Auth v2 vars:
* ``OS_AUTH_URL``
* ``OS_USERNAME``
* ``OS_PASSWORD``
* ``OS_TENANT_NAME``
The same rule above applies; if both sets of variables are specified,
default to 1.0.
If no auth version can be determined, return `None`.
:param args:
:class:`argparse.Namespace`, representing the args specified on the
command line.
:returns: '1.0', '2.0', or ``None``
"""
v1 = (args.auth, args.user, args.key)
v2 = (args.os_auth_url, args.os_username, args.os_password,
args.os_tenant_name)
if all(v1) and not all(v2):
return '1.0'
elif all(v2) and not all(v1):
return '2.0'
elif all(v1) and all(v2):
# All vars for v1 and v2 auth are set, so we follow the
# `python-swiftclient` behavior and default to 1.0.
return '1.0'
else:
# deduce from envvars
env = os.environ
v1_env = (env.get('ST_AUTH'), env.get('ST_USER'), env.get('ST_KEY'))
v2_env = (env.get('OS_AUTH_URL'), env.get('OS_USERNAME'),
env.get('OS_PASSWORD'), env.get('OS_TENANT_NAME'))
if all(v1_env) and not all(v2_env):
return '1.0'
if all(v2_env) and not all(v1_env):
return '2.0'
elif all(v1_env) and all(v2_env):
# Same as above, if all v1 and v2 vars are set, default to 1.0.
return '1.0'
else:
# Insufficient auth details have been specified.
return None
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read())
def _get_exec_table(resp):
"""Build an execution summary table from a job execution response.
:param dict resp:
Response dictionary from job execution. Must contain a ``headers`` key
at least (and will typically contain ``status`` and ``reason`` as
well).
:returns:
Tuple of total execution time (`str`),
``prettytable.PrettyTable`` containing the summary of all node
executions in the job.
"""
headers = resp['headers']
total_time, table_data = _get_exec_table_data(headers)
table = prettytable.PrettyTable(EXEC_TABLE_HEADER)
for row in table_data:
table.add_row(row)
return total_time, table
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data
def execute(args, response_body_buffer=None):
"""Execute a zapp remotely on a ZeroCloud deployment.
:returns:
A `dict` with response data, including the keys 'status', 'reason', and
'headers'.
"""
conn = _get_zerocloud_conn(args)
resp = dict()
if args.container:
job_filename = SYSTEM_MAP_ZAPP_PATH
try:
headers, content = conn.get_object(args.container, job_filename)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise zpmlib.ZPMException("Could not find %s" % exc.http_path)
else:
raise zpmlib.ZPMException(str(exc))
job = json.loads(content)
conn.post_job(job, response_dict=resp,
response_body_buffer=response_body_buffer)
LOG.debug('RESP STATUS: %s %s', resp['status'], resp['reason'])
LOG.debug('RESP HEADERS: %s', resp['headers'])
else:
size = os.path.getsize(args.zapp)
zapp_file = open(args.zapp, 'rb')
data_reader = iter(lambda: zapp_file.read(BUFFER_SIZE), b'')
conn.post_zapp(data_reader, response_dict=resp, content_length=size,
response_body_buffer=response_body_buffer)
zapp_file.close()
return resp
def auth(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
print('Auth token: %s' % conn.token)
print('Storage URL: %s' % conn.url)
| zerovm/zerovm-cli | zpmlib/zpm.py | Python | apache-2.0 | 26,716 |
import torch
from torch import autograd, nn
batch_size = 1
seq_len = 7
input_size = 6
hidden_size = 4
example = [3, 2, 0, 0, 4, 5, 1, 1]
# input = autograd.Variable(torch.rand(seq_len, batch_size, input_size))
# print('input.size()', input.size())
embedding = nn.Embedding(input_size, hidden_size)
rnn = torch.nn.RNN(
input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
nonlinearity='tanh'
)
# criterion = torch.nn.
# print('rnn', rnn)
input = autograd.Variable(
torch.LongTensor(example[:-1]).view(seq_len, batch_size)
)
target = autograd.Variable(
torch.LongTensor(example[1:]).view(seq_len, batch_size)
)
print('input', input)
parameters = [p for p in rnn.parameters()] + [p for p in embedding.parameters()]
optimizer = torch.optim.Adam(parameters)
epoch = 0
while True:
embedded_input = embedding(input)
state = autograd.Variable(torch.zeros(1, batch_size, hidden_size))
out, state = rnn(embedded_input, state)
# print('out.size()', out.size())
# print('embedding.weight.transpose(0, 1).size()', embedding.weight.transpose(0, 1).size())
out_unembedded = out.view(-1, hidden_size) @ embedding.weight.transpose(0, 1)
_, pred = out_unembedded.max(1)
# out_unembedded = out_unembedded.view(seq_len, batch_size, input_size)
# print('out_unembedded.size()', out_unembedded.size())
# print('target.size()', target.size())
loss = torch.nn.functional.nll_loss(out_unembedded, target.view(-1))
# print('epoch %s loss %s' % (epoch, loss.data[0]))
if epoch % 500 == 0:
print('epoch', epoch)
print('input', input.data.view(1, -1))
print('target', target.data.view(1, -1))
print('pred', pred.data.view(1, -1))
# print('out', out.data.view(1, -1))
rnn.zero_grad()
embedding.zero_grad()
loss.backward()
optimizer.step()
# print('out.size()', out.size())
# print('state.size()', state.size())
epoch += 1
| hughperkins/pub-prototyping | py/pytorch/test_rnn.py | Python | apache-2.0 | 1,950 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnsessionpolicy_vpnvserver_binding(base_resource) :
""" Binding class showing the vpnvserver that can be bound to vpnsessionpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the session policy to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the session policy to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnsessionpolicy_vpnvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnsessionpolicy_vpnvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch vpnsessionpolicy_vpnvserver_binding resources.
"""
try :
obj = vpnsessionpolicy_vpnvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of vpnsessionpolicy_vpnvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnsessionpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count vpnsessionpolicy_vpnvserver_binding resources configued on NetScaler.
"""
try :
obj = vpnsessionpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of vpnsessionpolicy_vpnvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnsessionpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vpnsessionpolicy_vpnvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnsessionpolicy_vpnvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnsessionpolicy_vpnvserver_binding = [vpnsessionpolicy_vpnvserver_binding() for _ in range(length)]
| mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnsessionpolicy_vpnvserver_binding.py | Python | apache-2.0 | 5,197 |
from backend import photos, boards
p = photos()
#print p.new('asdf',1,1)
print p.get(1)
b = boards()
print p.all(1)
print b.get(1)
| teriyakichild/photoboard | photoboard/tests/test.py | Python | apache-2.0 | 133 |
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import getpass
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def get_credentials(gateway):
"""
Returns username and trading mode (paper/live) for IB Gateway.
Parameters
----------
gateway : str, required
name of IB Gateway service to get credentials for (for example, 'ibg1')
Returns
-------
dict
credentials
"""
statuses = list_gateway_statuses(gateways=[gateway])
if not statuses:
raise ValueError("no such IB Gateway: {0}".format(gateway))
response = houston.get("/{0}/credentials".format(gateway))
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def set_credentials(gateway, username=None, password=None, trading_mode=None):
"""
Set username/password and trading mode (paper/live) for IB Gateway.
Can be used to set new credentials or switch between paper and live trading
(must have previously entered live credentials). Setting new credentials will
restart IB Gateway and takes a moment to complete.
Credentials are encrypted at rest and never leave your deployment.
Parameters
----------
gateway : str, required
name of IB Gateway service to set credentials for (for example, 'ibg1')
username : str, optional
IBKR username (optional if only modifying trading environment)
password : str, optional
IBKR password (if omitted and username is provided, will be prompted
for password)
trading_mode : str, optional
the trading mode to use ('paper' or 'live')
Returns
-------
dict
status message
"""
statuses = list_gateway_statuses(gateways=[gateway])
if not statuses:
raise ValueError("no such IB Gateway: {0}".format(gateway))
if username and not password:
password = getpass.getpass(prompt="Enter IBKR Password: ")
data = {}
if username:
data["username"] = username
if password:
data["password"] = password
if trading_mode:
data["trading_mode"] = trading_mode
response = houston.put("/{0}/credentials".format(gateway), data=data, timeout=180)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_get_or_set_credentials(*args, **kwargs):
if kwargs.get("username", None) or kwargs.get("password", None) or kwargs.get("trading_mode", None):
return json_to_cli(set_credentials, *args, **kwargs)
else:
return json_to_cli(get_credentials, gateway=kwargs.get("gateway", None))
def list_gateway_statuses(status=None, gateways=None):
"""
Query statuses of IB Gateways.
Parameters
----------
status : str, optional
limit to IB Gateways in this status. Possible choices: running, stopped, error
gateways : list of str, optional
limit to these IB Gateways
Returns
-------
dict of gateway:status (if status arg not provided), or list of gateways (if status arg provided)
"""
params = {}
if gateways:
params["gateways"] = gateways
if status:
params["status"] = status
response = houston.get("/ibgrouter/gateways", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_gateway_statuses(*args, **kwargs):
return json_to_cli(list_gateway_statuses, *args, **kwargs)
def start_gateways(gateways=None, wait=False):
"""
Start one or more IB Gateways.
Parameters
----------
gateways : list of str, optional
limit to these IB Gateways
wait: bool
wait for the IB Gateway to start before returning (default is to start
the gateways asynchronously)
Returns
-------
dict
status message
"""
params = {"wait": wait}
if gateways:
params["gateways"] = gateways
response = houston.post("/ibgrouter/gateways", params=params, timeout=120)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_start_gateways(*args, **kwargs):
return json_to_cli(start_gateways, *args, **kwargs)
def stop_gateways(gateways=None, wait=False):
"""
Stop one or more IB Gateways.
Parameters
----------
gateways : list of str, optional
limit to these IB Gateways
wait: bool
wait for the IB Gateway to stop before returning (default is to stop
the gateways asynchronously)
Returns
-------
dict
status message
"""
params = {"wait": wait}
if gateways:
params["gateways"] = gateways
response = houston.delete("/ibgrouter/gateways", params=params, timeout=60)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_stop_gateways(*args, **kwargs):
return json_to_cli(stop_gateways, *args, **kwargs)
def load_ibg_config(filename):
"""
Upload a new IB Gateway permissions config.
Permission configs are only necessary when running multiple IB Gateways with
differing market data permissions.
Parameters
----------
filename : str, required
the config file to upload
Returns
-------
dict
status message
"""
with open(filename) as file:
response = houston.put("/ibgrouter/config", data=file.read())
houston.raise_for_status_with_json(response)
return response.json()
def get_ibg_config():
"""
Returns the current IB Gateway permissions config.
Returns
-------
dict
the config as a dict
"""
response = houston.get("/ibgrouter/config")
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def _cli_load_or_show_config(filename=None):
if filename:
return json_to_cli(load_ibg_config, filename)
else:
return json_to_cli(get_ibg_config)
| quantrocket-llc/quantrocket-client | quantrocket/ibg.py | Python | apache-2.0 | 6,653 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
import h2o
class H2OXGBoostEstimator(H2OEstimator):
"""
XGBoost
Builds a eXtreme Gradient Boosting model using the native XGBoost backend.
"""
algo = "xgboost"
def __init__(self, **kwargs):
super(H2OXGBoostEstimator, self).__init__()
self._parms = {}
names_list = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models",
"keep_cross_validation_predictions", "keep_cross_validation_fold_assignment",
"score_each_iteration", "fold_assignment", "fold_column", "response_column", "ignored_columns",
"ignore_const_cols", "offset_column", "weights_column", "stopping_rounds", "stopping_metric",
"stopping_tolerance", "max_runtime_secs", "seed", "distribution", "tweedie_power",
"categorical_encoding", "quiet_mode", "export_checkpoints_dir", "ntrees", "max_depth", "min_rows",
"min_child_weight", "learn_rate", "eta", "sample_rate", "subsample", "col_sample_rate",
"colsample_bylevel", "col_sample_rate_per_tree", "colsample_bytree", "max_abs_leafnode_pred",
"max_delta_step", "monotone_constraints", "score_tree_interval", "min_split_improvement", "gamma",
"nthread", "max_bins", "max_leaves", "min_sum_hessian_in_leaf", "min_data_in_leaf", "sample_type",
"normalize_type", "rate_drop", "one_drop", "skip_drop", "tree_method", "grow_policy", "booster",
"reg_lambda", "reg_alpha", "dmatrix_type", "backend", "gpu_id"}
if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda")
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in names_list:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
assert_is_type(training_frame, None, H2OFrame)
self._parms["training_frame"] = training_frame
@property
def validation_frame(self):
"""
Id of the validation data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("validation_frame")
@validation_frame.setter
def validation_frame(self, validation_frame):
assert_is_type(validation_frame, None, H2OFrame)
self._parms["validation_frame"] = validation_frame
@property
def nfolds(self):
"""
Number of folds for K-fold cross-validation (0 to disable or >= 2).
Type: ``int`` (default: ``0``).
"""
return self._parms.get("nfolds")
@nfolds.setter
def nfolds(self, nfolds):
assert_is_type(nfolds, None, int)
self._parms["nfolds"] = nfolds
@property
def keep_cross_validation_models(self):
"""
Whether to keep the cross-validation models.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("keep_cross_validation_models")
@keep_cross_validation_models.setter
def keep_cross_validation_models(self, keep_cross_validation_models):
assert_is_type(keep_cross_validation_models, None, bool)
self._parms["keep_cross_validation_models"] = keep_cross_validation_models
@property
def keep_cross_validation_predictions(self):
"""
Whether to keep the predictions of the cross-validation models.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_predictions")
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, keep_cross_validation_predictions):
assert_is_type(keep_cross_validation_predictions, None, bool)
self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions
@property
def keep_cross_validation_fold_assignment(self):
"""
Whether to keep the cross-validation fold assignment.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("keep_cross_validation_fold_assignment")
@keep_cross_validation_fold_assignment.setter
def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment):
assert_is_type(keep_cross_validation_fold_assignment, None, bool)
self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def fold_assignment(self):
"""
Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
the folds based on the response variable, for classification problems.
One of: ``"auto"``, ``"random"``, ``"modulo"``, ``"stratified"`` (default: ``"auto"``).
"""
return self._parms.get("fold_assignment")
@fold_assignment.setter
def fold_assignment(self, fold_assignment):
assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
self._parms["fold_assignment"] = fold_assignment
@property
def fold_column(self):
"""
Column with cross-validation fold index assignment per observation.
Type: ``str``.
"""
return self._parms.get("fold_column")
@fold_column.setter
def fold_column(self, fold_column):
assert_is_type(fold_column, None, str)
self._parms["fold_column"] = fold_column
@property
def response_column(self):
"""
Response variable column.
Type: ``str``.
"""
return self._parms.get("response_column")
@response_column.setter
def response_column(self, response_column):
assert_is_type(response_column, None, str)
self._parms["response_column"] = response_column
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def offset_column(self):
"""
Offset column. This will be added to the combination of columns before applying the link function.
Type: ``str``.
"""
return self._parms.get("offset_column")
@offset_column.setter
def offset_column(self, offset_column):
assert_is_type(offset_column, None, str)
self._parms["offset_column"] = offset_column
@property
def weights_column(self):
"""
Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data
frame. This is typically the number of times a row is repeated, but non-integer values are supported as well.
During training, rows with higher weights matter more, due to the larger loss function pre-factor.
Type: ``str``.
"""
return self._parms.get("weights_column")
@weights_column.setter
def weights_column(self, weights_column):
assert_is_type(weights_column, None, str)
self._parms["weights_column"] = weights_column
@property
def stopping_rounds(self):
"""
Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Type: ``int`` (default: ``0``).
"""
return self._parms.get("stopping_rounds")
@stopping_rounds.setter
def stopping_rounds(self, stopping_rounds):
assert_is_type(stopping_rounds, None, int)
self._parms["stopping_rounds"] = stopping_rounds
@property
def stopping_metric(self):
"""
Metric to use for early stopping (AUTO: logloss for classification, deviance for regression). Note that custom
and custom_increasing can only be used in GBM and DRF with the Python client.
One of: ``"auto"``, ``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``,
``"lift_top_group"``, ``"misclassification"``, ``"mean_per_class_error"``, ``"custom"``, ``"custom_increasing"``
(default: ``"auto"``).
"""
return self._parms.get("stopping_metric")
@stopping_metric.setter
def stopping_metric(self, stopping_metric):
assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"))
self._parms["stopping_metric"] = stopping_metric
@property
def stopping_tolerance(self):
"""
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
Type: ``float`` (default: ``0.001``).
"""
return self._parms.get("stopping_tolerance")
@stopping_tolerance.setter
def stopping_tolerance(self, stopping_tolerance):
assert_is_type(stopping_tolerance, None, numeric)
self._parms["stopping_tolerance"] = stopping_tolerance
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def seed(self):
"""
Seed for pseudo random number generator (if applicable)
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def distribution(self):
"""
Distribution function
One of: ``"auto"``, ``"bernoulli"``, ``"multinomial"``, ``"gaussian"``, ``"poisson"``, ``"gamma"``,
``"tweedie"``, ``"laplace"``, ``"quantile"``, ``"huber"`` (default: ``"auto"``).
"""
return self._parms.get("distribution")
@distribution.setter
def distribution(self, distribution):
assert_is_type(distribution, None, Enum("auto", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"))
self._parms["distribution"] = distribution
@property
def tweedie_power(self):
"""
Tweedie power for Tweedie regression, must be between 1 and 2.
Type: ``float`` (default: ``1.5``).
"""
return self._parms.get("tweedie_power")
@tweedie_power.setter
def tweedie_power(self, tweedie_power):
assert_is_type(tweedie_power, None, numeric)
self._parms["tweedie_power"] = tweedie_power
@property
def categorical_encoding(self):
"""
Encoding scheme for categorical features
One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``,
``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"`` (default: ``"auto"``).
"""
return self._parms.get("categorical_encoding")
@categorical_encoding.setter
def categorical_encoding(self, categorical_encoding):
assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
self._parms["categorical_encoding"] = categorical_encoding
@property
def quiet_mode(self):
"""
Enable quiet mode
Type: ``bool`` (default: ``True``).
"""
return self._parms.get("quiet_mode")
@quiet_mode.setter
def quiet_mode(self, quiet_mode):
assert_is_type(quiet_mode, None, bool)
self._parms["quiet_mode"] = quiet_mode
@property
def export_checkpoints_dir(self):
"""
Automatically export generated models to this directory.
Type: ``str``.
"""
return self._parms.get("export_checkpoints_dir")
@export_checkpoints_dir.setter
def export_checkpoints_dir(self, export_checkpoints_dir):
assert_is_type(export_checkpoints_dir, None, str)
self._parms["export_checkpoints_dir"] = export_checkpoints_dir
@property
def ntrees(self):
"""
(same as n_estimators) Number of trees.
Type: ``int`` (default: ``50``).
"""
return self._parms.get("ntrees")
@ntrees.setter
def ntrees(self, ntrees):
assert_is_type(ntrees, None, int)
self._parms["ntrees"] = ntrees
@property
def max_depth(self):
"""
Maximum tree depth.
Type: ``int`` (default: ``6``).
"""
return self._parms.get("max_depth")
@max_depth.setter
def max_depth(self, max_depth):
assert_is_type(max_depth, None, int)
self._parms["max_depth"] = max_depth
@property
def min_rows(self):
"""
(same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
Type: ``float`` (default: ``1``).
"""
return self._parms.get("min_rows")
@min_rows.setter
def min_rows(self, min_rows):
assert_is_type(min_rows, None, numeric)
self._parms["min_rows"] = min_rows
@property
def min_child_weight(self):
"""
(same as min_rows) Fewest allowed (weighted) observations in a leaf.
Type: ``float`` (default: ``1``).
"""
return self._parms.get("min_child_weight")
@min_child_weight.setter
def min_child_weight(self, min_child_weight):
assert_is_type(min_child_weight, None, numeric)
self._parms["min_child_weight"] = min_child_weight
@property
def learn_rate(self):
"""
(same as eta) Learning rate (from 0.0 to 1.0)
Type: ``float`` (default: ``0.3``).
"""
return self._parms.get("learn_rate")
@learn_rate.setter
def learn_rate(self, learn_rate):
assert_is_type(learn_rate, None, numeric)
self._parms["learn_rate"] = learn_rate
@property
def eta(self):
"""
(same as learn_rate) Learning rate (from 0.0 to 1.0)
Type: ``float`` (default: ``0.3``).
"""
return self._parms.get("eta")
@eta.setter
def eta(self, eta):
assert_is_type(eta, None, numeric)
self._parms["eta"] = eta
@property
def sample_rate(self):
"""
(same as subsample) Row sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("sample_rate")
@sample_rate.setter
def sample_rate(self, sample_rate):
assert_is_type(sample_rate, None, numeric)
self._parms["sample_rate"] = sample_rate
@property
def subsample(self):
"""
(same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("subsample")
@subsample.setter
def subsample(self, subsample):
assert_is_type(subsample, None, numeric)
self._parms["subsample"] = subsample
@property
def col_sample_rate(self):
"""
(same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("col_sample_rate")
@col_sample_rate.setter
def col_sample_rate(self, col_sample_rate):
assert_is_type(col_sample_rate, None, numeric)
self._parms["col_sample_rate"] = col_sample_rate
@property
def colsample_bylevel(self):
"""
(same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("colsample_bylevel")
@colsample_bylevel.setter
def colsample_bylevel(self, colsample_bylevel):
assert_is_type(colsample_bylevel, None, numeric)
self._parms["colsample_bylevel"] = colsample_bylevel
@property
def col_sample_rate_per_tree(self):
"""
(same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("col_sample_rate_per_tree")
@col_sample_rate_per_tree.setter
def col_sample_rate_per_tree(self, col_sample_rate_per_tree):
assert_is_type(col_sample_rate_per_tree, None, numeric)
self._parms["col_sample_rate_per_tree"] = col_sample_rate_per_tree
@property
def colsample_bytree(self):
"""
(same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
Type: ``float`` (default: ``1``).
"""
return self._parms.get("colsample_bytree")
@colsample_bytree.setter
def colsample_bytree(self, colsample_bytree):
assert_is_type(colsample_bytree, None, numeric)
self._parms["colsample_bytree"] = colsample_bytree
@property
def max_abs_leafnode_pred(self):
"""
(same as max_delta_step) Maximum absolute value of a leaf node prediction
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_abs_leafnode_pred")
@max_abs_leafnode_pred.setter
def max_abs_leafnode_pred(self, max_abs_leafnode_pred):
assert_is_type(max_abs_leafnode_pred, None, float)
self._parms["max_abs_leafnode_pred"] = max_abs_leafnode_pred
@property
def max_delta_step(self):
"""
(same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
Type: ``float`` (default: ``0``).
"""
return self._parms.get("max_delta_step")
@max_delta_step.setter
def max_delta_step(self, max_delta_step):
assert_is_type(max_delta_step, None, float)
self._parms["max_delta_step"] = max_delta_step
@property
def monotone_constraints(self):
"""
A mapping representing monotonic constraints. Use +1 to enforce an increasing constraint and -1 to specify a
decreasing constraint.
Type: ``dict``.
"""
return self._parms.get("monotone_constraints")
@monotone_constraints.setter
def monotone_constraints(self, monotone_constraints):
assert_is_type(monotone_constraints, None, dict)
self._parms["monotone_constraints"] = monotone_constraints
@property
def score_tree_interval(self):
"""
Score the model after every so many trees. Disabled if set to 0.
Type: ``int`` (default: ``0``).
"""
return self._parms.get("score_tree_interval")
@score_tree_interval.setter
def score_tree_interval(self, score_tree_interval):
assert_is_type(score_tree_interval, None, int)
self._parms["score_tree_interval"] = score_tree_interval
@property
def min_split_improvement(self):
"""
(same as gamma) Minimum relative improvement in squared error reduction for a split to happen
Type: ``float`` (default: ``0``).
"""
return self._parms.get("min_split_improvement")
@min_split_improvement.setter
def min_split_improvement(self, min_split_improvement):
assert_is_type(min_split_improvement, None, float)
self._parms["min_split_improvement"] = min_split_improvement
@property
def gamma(self):
"""
(same as min_split_improvement) Minimum relative improvement in squared error reduction for a split to happen
Type: ``float`` (default: ``0``).
"""
return self._parms.get("gamma")
@gamma.setter
def gamma(self, gamma):
assert_is_type(gamma, None, float)
self._parms["gamma"] = gamma
@property
def nthread(self):
"""
Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits (-nthreads
parameter). Defaults to maximum available
Type: ``int`` (default: ``-1``).
"""
return self._parms.get("nthread")
@nthread.setter
def nthread(self, nthread):
assert_is_type(nthread, None, int)
self._parms["nthread"] = nthread
@property
def max_bins(self):
"""
For tree_method=hist only: maximum number of bins
Type: ``int`` (default: ``256``).
"""
return self._parms.get("max_bins")
@max_bins.setter
def max_bins(self, max_bins):
assert_is_type(max_bins, None, int)
self._parms["max_bins"] = max_bins
@property
def max_leaves(self):
"""
For tree_method=hist only: maximum number of leaves
Type: ``int`` (default: ``0``).
"""
return self._parms.get("max_leaves")
@max_leaves.setter
def max_leaves(self, max_leaves):
assert_is_type(max_leaves, None, int)
self._parms["max_leaves"] = max_leaves
@property
def min_sum_hessian_in_leaf(self):
"""
For tree_method=hist only: the mininum sum of hessian in a leaf to keep splitting
Type: ``float`` (default: ``100``).
"""
return self._parms.get("min_sum_hessian_in_leaf")
@min_sum_hessian_in_leaf.setter
def min_sum_hessian_in_leaf(self, min_sum_hessian_in_leaf):
assert_is_type(min_sum_hessian_in_leaf, None, float)
self._parms["min_sum_hessian_in_leaf"] = min_sum_hessian_in_leaf
@property
def min_data_in_leaf(self):
"""
For tree_method=hist only: the mininum data in a leaf to keep splitting
Type: ``float`` (default: ``0``).
"""
return self._parms.get("min_data_in_leaf")
@min_data_in_leaf.setter
def min_data_in_leaf(self, min_data_in_leaf):
assert_is_type(min_data_in_leaf, None, float)
self._parms["min_data_in_leaf"] = min_data_in_leaf
@property
def sample_type(self):
"""
For booster=dart only: sample_type
One of: ``"uniform"``, ``"weighted"`` (default: ``"uniform"``).
"""
return self._parms.get("sample_type")
@sample_type.setter
def sample_type(self, sample_type):
assert_is_type(sample_type, None, Enum("uniform", "weighted"))
self._parms["sample_type"] = sample_type
@property
def normalize_type(self):
"""
For booster=dart only: normalize_type
One of: ``"tree"``, ``"forest"`` (default: ``"tree"``).
"""
return self._parms.get("normalize_type")
@normalize_type.setter
def normalize_type(self, normalize_type):
assert_is_type(normalize_type, None, Enum("tree", "forest"))
self._parms["normalize_type"] = normalize_type
@property
def rate_drop(self):
"""
For booster=dart only: rate_drop (0..1)
Type: ``float`` (default: ``0``).
"""
return self._parms.get("rate_drop")
@rate_drop.setter
def rate_drop(self, rate_drop):
assert_is_type(rate_drop, None, float)
self._parms["rate_drop"] = rate_drop
@property
def one_drop(self):
"""
For booster=dart only: one_drop
Type: ``bool`` (default: ``False``).
"""
return self._parms.get("one_drop")
@one_drop.setter
def one_drop(self, one_drop):
assert_is_type(one_drop, None, bool)
self._parms["one_drop"] = one_drop
@property
def skip_drop(self):
"""
For booster=dart only: skip_drop (0..1)
Type: ``float`` (default: ``0``).
"""
return self._parms.get("skip_drop")
@skip_drop.setter
def skip_drop(self, skip_drop):
assert_is_type(skip_drop, None, float)
self._parms["skip_drop"] = skip_drop
@property
def tree_method(self):
"""
Tree method
One of: ``"auto"``, ``"exact"``, ``"approx"``, ``"hist"`` (default: ``"auto"``).
"""
return self._parms.get("tree_method")
@tree_method.setter
def tree_method(self, tree_method):
assert_is_type(tree_method, None, Enum("auto", "exact", "approx", "hist"))
self._parms["tree_method"] = tree_method
@property
def grow_policy(self):
"""
Grow policy - depthwise is standard GBM, lossguide is LightGBM
One of: ``"depthwise"``, ``"lossguide"`` (default: ``"depthwise"``).
"""
return self._parms.get("grow_policy")
@grow_policy.setter
def grow_policy(self, grow_policy):
assert_is_type(grow_policy, None, Enum("depthwise", "lossguide"))
self._parms["grow_policy"] = grow_policy
@property
def booster(self):
"""
Booster type
One of: ``"gbtree"``, ``"gblinear"``, ``"dart"`` (default: ``"gbtree"``).
"""
return self._parms.get("booster")
@booster.setter
def booster(self, booster):
assert_is_type(booster, None, Enum("gbtree", "gblinear", "dart"))
self._parms["booster"] = booster
@property
def reg_lambda(self):
"""
L2 regularization
Type: ``float`` (default: ``1``).
"""
return self._parms.get("reg_lambda")
@reg_lambda.setter
def reg_lambda(self, reg_lambda):
assert_is_type(reg_lambda, None, float)
self._parms["reg_lambda"] = reg_lambda
@property
def reg_alpha(self):
"""
L1 regularization
Type: ``float`` (default: ``0``).
"""
return self._parms.get("reg_alpha")
@reg_alpha.setter
def reg_alpha(self, reg_alpha):
assert_is_type(reg_alpha, None, float)
self._parms["reg_alpha"] = reg_alpha
@property
def dmatrix_type(self):
"""
Type of DMatrix. For sparse, NAs and 0 are treated equally.
One of: ``"auto"``, ``"dense"``, ``"sparse"`` (default: ``"auto"``).
"""
return self._parms.get("dmatrix_type")
@dmatrix_type.setter
def dmatrix_type(self, dmatrix_type):
assert_is_type(dmatrix_type, None, Enum("auto", "dense", "sparse"))
self._parms["dmatrix_type"] = dmatrix_type
@property
def backend(self):
"""
Backend. By default (auto), a GPU is used if available.
One of: ``"auto"``, ``"gpu"``, ``"cpu"`` (default: ``"auto"``).
"""
return self._parms.get("backend")
@backend.setter
def backend(self, backend):
assert_is_type(backend, None, Enum("auto", "gpu", "cpu"))
self._parms["backend"] = backend
@property
def gpu_id(self):
"""
Which GPU to use.
Type: ``int`` (default: ``0``).
"""
return self._parms.get("gpu_id")
@gpu_id.setter
def gpu_id(self, gpu_id):
assert_is_type(gpu_id, None, int)
self._parms["gpu_id"] = gpu_id
# Ask the H2O server whether a XGBoost model can be built (depends on availability of native backends)
@staticmethod
def available():
"""
Returns True if a XGBoost model can be built, or False otherwise.
"""
if "XGBoost" not in h2o.cluster().list_core_extensions():
print("Cannot build an XGBoost model - no backend found.")
return False
else:
return True
| h2oai/h2o-dev | h2o-py/h2o/estimators/xgboost.py | Python | apache-2.0 | 29,759 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import index
from google.cloud.aiplatform_v1beta1.types import index_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexServiceGrpcTransport
class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport):
"""gRPC AsyncIO backend transport for IndexService.
A service for creating and managing Vertex AI's Index
resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_index(
self,
) -> Callable[
[index_service.CreateIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create index method over gRPC.
Creates an Index.
Returns:
Callable[[~.CreateIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index" not in self._stubs:
self._stubs["create_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex",
request_serializer=index_service.CreateIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index"]
@property
def get_index(
self,
) -> Callable[[index_service.GetIndexRequest], Awaitable[index.Index]]:
r"""Return a callable for the get index method over gRPC.
Gets an Index.
Returns:
Callable[[~.GetIndexRequest],
Awaitable[~.Index]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/GetIndex",
request_serializer=index_service.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[index_service.ListIndexesRequest], Awaitable[index_service.ListIndexesResponse]
]:
r"""Return a callable for the list indexes method over gRPC.
Lists Indexes in a Location.
Returns:
Callable[[~.ListIndexesRequest],
Awaitable[~.ListIndexesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes",
request_serializer=index_service.ListIndexesRequest.serialize,
response_deserializer=index_service.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
@property
def update_index(
self,
) -> Callable[
[index_service.UpdateIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update index method over gRPC.
Updates an Index.
Returns:
Callable[[~.UpdateIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index" not in self._stubs:
self._stubs["update_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex",
request_serializer=index_service.UpdateIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_index"]
@property
def delete_index(
self,
) -> Callable[
[index_service.DeleteIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete index method over gRPC.
Deletes an Index. An Index can only be deleted when all its
[DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
had been undeployed.
Returns:
Callable[[~.DeleteIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index" not in self._stubs:
self._stubs["delete_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex",
request_serializer=index_service.DeleteIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index"]
__all__ = ("IndexServiceGrpcAsyncIOTransport",)
| sasha-gitg/python-aiplatform | google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py | Python | apache-2.0 | 16,914 |
# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def delete_auto_allocated_topology(self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topology(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and returns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error("Unknown error while provisioning topology for "
"tenant %(tenant_id)s. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
self._cleanup(
context, network_id=e.network_id,
router_id=e.router_id, subnets=e.subnets)
raise e.error
def _check_requirements(self, context, tenant_id):
"""Raise if requirements are not met."""
self._get_default_external_network(context)
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network"))
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
"""Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_id = self._get_auto_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
context, router_id, {'subnet_id': subnet['id']})
ignore_notfound(self.l3_plugin.delete_router, context, router_id)
if network_id:
ignore_notfound(
self.core_plugin.delete_network, context, network_id)
def ignore_notfound(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
try:
return func(*args, **kwargs)
except n_exc.NotFound:
pass
| huntxu/neutron | neutron/services/auto_allocate/db.py | Python | apache-2.0 | 17,330 |
#-*- coding:utf-8 -*-
'''
====================================================================================
Copyright 2013, 2014 Windy Darian (大地无敌), Studio "Sekai no Kagami"
(世界之镜制作组) of Seven Ocean Game Arts (七海游戏文化社
, 北京航空航天大学学生七海游戏文化社) @ http://sogarts.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
====================================================================================
Created on Jul 27, 2013
@author: Windy Darian (大地无敌)
'''
import os,sys
from StringIO import StringIO
from datetime import datetime
from panda3d.core import loadPrcFile,WindowProperties,loadPrcFileData# @UnresolvedImport
from direct.gui.OnscreenImage import OnscreenImage
from direct.showbase.ShowBase import ShowBase
from direct.filter.FilterManager import FilterManager
from direct.stdpy.file import open,exists
from direct.stdpy.threading import Lock
from direct.stdpy import pickle
from story_manager import StoryManager
from runtime_data import game_settings,read_text,restoreRuntimeData, getCurrentStyle as rgetStyle, setCurrentStyle as rsetStyle,restoreReadText
from runtime_data import global_data,restoreGlobalData, MAX_AUTOSAVE, MAX_QUICKSAVE
from runtime_data import loadDefaultSettings, restoreSettings
from audio_player import AudioPlayer
from save_load_form import SaveForm,SavingInfo,LoadForm
import gui.color_themes as color_themes
from safeprint import safeprint
from main_menu import MainMenu
from config_form import ConfigForm
_savingloadinglock = Lock()
def save_data(file_name, data, mode = 2):
_savingloadinglock.acquire()
try:
f = open(file_name,'wb')
pickle.dump(data, f, mode)
f.close()
except Exception as exp:
raise exp
finally:
_savingloadinglock.release()
def load_data(file_name):
_savingloadinglock.acquire()
try:
f = open(file_name,'rb')
loaded = pickle.load(f)
f.close()
except Exception as exp:
raise exp
finally:
_savingloadinglock.release()
return loaded
class SogalBase(ShowBase):
"""The ShowBase of the sogal
Attributes:
supportedResolutions: player's system supported resolutions, Note that it is constructed after the initialization of ShowBase.
"""
def __init__(self):
"初始化"
dir = os.path.dirname(game_settings['save_folder'])
if not os.path.exists(dir):
os.makedirs(dir)
self.initGameSettings()
#读取设置文件
loadPrcFile("config/PandaConfig.prc")
loadPrcFileData('', 'win-size ' + str(game_settings['screen_resolution'][0]) + ' ' + str(game_settings['screen_resolution'][1]) )
#构造Panda3D的ShowBase
ShowBase.__init__(self)
#Retrieving available resolutions
#self.makeDefaultPipe()
di = self.pipe.getDisplayInformation()
self.supportedResolutions = []
for index in range(di.getTotalDisplayModes()):
self.supportedResolutions.append((di.getDisplayModeWidth(index), di.getDisplayModeHeight(index)))
color_themes.initStyles()
props = WindowProperties( self.win.getProperties() )
props.setSize(int(game_settings['screen_resolution'][0]),int(game_settings['screen_resolution'][1]))
if game_settings['full_screen'] and not props.getFullscreen():
props.setFullscreen(True)
props.setTitle(game_settings['window_title'])
self.win.requestProperties(props)
self.cam2dp.node().getDisplayRegion(0).setSort(-20) #Set render2dp to background
self.disableMouse() #Disable panda3d's default mouse control
self.cam.node().getDisplayRegion(0).setActive(0) #disable default camera
self.audioPlayer = AudioPlayer()
self.focusStack = [] #a stack that shows windowstop window gets focus
self._loadReadText()
self._loadGlobalData()
#add event handlers
self.accept('alt-enter', self.toggleFullScreen)
self.accept('save_data', self.save)
self.accept('load_data', self.load)
self.accept('load_memory', self.loadMemory)
self.accept('request_focus', self.grantFocus)
self.accept('remove_focus', self.cancelFocus)
self.accept('return_to_title', self.returnToTitle)
self.accept('start_game', self.startGame)
self.accept('load_game', self.loadGame)
self.accept('config_form', self.showConfig)
self.accept('exit_game', self.exit)
self.accept('quick_save', self.quickSave)
self.accept('quick_load', self.quickLoad)
self.accept('auto_save', self.autoSave)
self.accept('print_screen', self.takeScrnShot)
self.accept('f10', self.takeScrnShot)
#Font setting
self.textFont = color_themes.default_font
#背景设置
self.setBackgroundColor(0,0,0,1);
self.backgroundImage = None
self.initGameWindows()
self.mainMenu = None
self.storyManager = None
def initGameWindows(self):
'''
Initializing the common save, load and config forms
if you want better customization with them,
override this!
'''
self.saveForm = SaveForm()
self.loadForm = LoadForm()
self.configForm = ConfigForm()
def initGameSettings(self):
'''
Initializing game settings
some complex game settings can be written here
This will run before panda3d ShowBase constructed
'''
loadDefaultSettings('config/default.sconf')
self._loadSettings()
def initMainMenu(self,customMainMenu = None):
'''Call this to initialize and show main menu'''
if not self.mainMenu:
if not customMainMenu:
self.mainMenu = MainMenu()
else: self.mainMenu = customMainMenu
self.mainMenu.open()
def isStarted(self):
return bool(self.storyManager)
def getCurrentFocus(self):
if len(self.focusStack) > 0:
return self.focusStack[-1]
else: return None
def hasFocus(self,obj):
'''returns whether the object is the current focus'''
return self.getCurrentFocus() == obj
def grantFocus(self,obj):
pre = self.getCurrentFocus()
if obj in self.focusStack:
self.focusStack.remove(obj)
self.focusStack.append(obj)
else:
self.focusStack.append(obj)
if pre != obj:
if pre:
pre.defocused()
obj.focused()
def cancelFocus(self,obj):
if obj in self.focusStack:
self.focusStack.remove(obj)
obj.defocused()
cur = self.getCurrentFocus()
if cur != obj and cur:
cur.focused()
def setGameBackgroundImage(self,path):
''' Load a total background image '''
if self.backgroundImage:
self.backgroundImage.destroy()
self.backgroundImage = OnscreenImage(parent=aspect2dp, image=path) # @UndefinedVariable
def save(self,saving,fileName,message):
info = SavingInfo(message,datetime.now())
try:
save_data(game_settings['save_folder'] + fileName + game_settings['save_type'], saving)
save_data(game_settings['save_folder'] + fileName + game_settings['save_infotype'], info)
except Exception as error:
safeprint(error)
return
self.saveForm.reloadMember(fileName)
self.loadForm.reloadMember(fileName)
self._saveReadText()
self._saveGlobalData()
def quickSave(self, saving, message):
global_data['currentQuicksave'] += 1
if global_data['currentQuicksave'] > MAX_QUICKSAVE:
global_data['currentQuicksave'] = 1
currentqs = global_data['currentQuicksave']
self.save(saving, 'quick_save' + str(currentqs), message)
def autoSave(self, saving, message):
global_data['currentAutosave'] += 1
if global_data['currentAutosave'] > MAX_AUTOSAVE:
global_data['currentAutosave'] = 1
currentas = global_data['currentAutosave']
self.save(saving, 'auto_save' + str(currentas), message)
def load(self,fileName):
try:
savedData = load_data(game_settings['save_folder'] + fileName + game_settings['save_type'])
except Exception as error:
safeprint(error)
return
if self.mainMenu:
self.mainMenu.close()
if self.storyManager:
self.storyManager.destroy()
self.audioPlayer.stopAll(0.5)
restoreRuntimeData(savedData)
self.audioPlayer.reload()
self.storyManager = StoryManager()
def quickLoad(self):
if self.hasQuickData():
self.load('quick_save' + str(global_data['currentQuicksave']))
def hasQuickData(self):
return exists(game_settings['save_folder'] + 'quick_save' + str(global_data['currentQuicksave']) + game_settings['save_type'])
def loadMemory(self,dumped):
try:
loaded = pickle.loads(dumped)
except Exception as exp:
safeprint(exp)
return
self.storyManager.destroy()
self.audioPlayer.stopAll(0.5)
restoreRuntimeData(loaded)
self.audioPlayer.reload()
self.storyManager = StoryManager()
def getStyle(self, sheet = None):
return rgetStyle(sheet)
def setStyle(self,value):
return rsetStyle(value)
def toggleFullScreen(self):
props = WindowProperties( self.win.getProperties() )
if not props.getFullscreen():
props.setSize(int(game_settings['screen_resolution'][0]),int(game_settings['screen_resolution'][1]))
props.setFullscreen(True)
else:
props.setFullscreen(False)
self.win.requestProperties(props)
game_settings['full_screen'] = not game_settings['full_screen']
if self.configForm:
self.configForm.refreshSettings()
messenger.send('window-event', [self])
def exitfunc(self, *args, **kwargs):
self._saveReadText()
self._saveGlobalData()
self._saveSettings()
return ShowBase.exitfunc(self, *args, **kwargs)
def startGame(self,scene):
if self.mainMenu:
self.mainMenu.close()
if self.storyManager:
self.storyManager.destroy()
self.audioPlayer.stopAll(0.5)
self.storyManager = StoryManager()
self.storyManager.beginScene(scene)
def loadGame(self):
self.loadForm.show()
def showConfig(self):
self.configForm.show()
def exit(self):
sys.exit()
def returnToTitle(self):
if self.storyManager:
self.storyManager.destroy()
self.audioPlayer.stopAll(0.5)
if self.mainMenu:
self.mainMenu.open()
def takeScrnShot(self):
'''Take a screenshot'''
dir = os.path.dirname('screenshots/')
if not os.path.exists(dir):
os.makedirs(dir)
self.screenshot(namePrefix = 'screenshots/screenshot', defaultFilename = 1)
def setScreenResolution(self, resolution = None, fullscreen = None):
game_settings['screen_resolution'] = resolution or game_settings['screen_resolution']
if fullscreen is not None:
game_settings['full_screen'] = fullscreen
self._applyScreenResolution()
def _loadReadText(self):
if not exists(game_settings['save_folder']+ 'read.dat'):
return
try:
read = load_data(game_settings['save_folder']+ 'read.dat')
except Exception as exp:
safeprint(exp)
return
restoreReadText(read)
def _loadGlobalData(self):
if not exists(game_settings['save_folder']+ 'global.dat'):
return
try:
gdata = load_data(game_settings['save_folder']+ 'global.dat')
except Exception as exp:
safeprint(exp)
return
restoreGlobalData(gdata)
def _loadSettings(self):
if not exists(game_settings['save_folder']+ 'config.dat'):
return
try:
settings = load_data(game_settings['save_folder'] + 'config.dat')
except Exception as error:
safeprint(error)
return
restoreSettings(settings)
def _saveReadText(self):
try:
save_data(game_settings['save_folder']+ 'read.dat', read_text)
except Exception as exp:
safeprint(exp)
def _saveGlobalData(self):
try:
save_data(game_settings['save_folder']+ 'global.dat', global_data)
except Exception as exp:
safeprint(exp)
def _saveSettings(self):
try:
save_data(game_settings['save_folder']+ 'config.dat', game_settings)
except Exception as exp:
safeprint(exp)
def _applyScreenResolution(self):
props = WindowProperties( self.win.getProperties() )
if not props.getFullscreen:
props.setSize(int(game_settings['screen_resolution'][0]),int(game_settings['screen_resolution'][1]))
props.setFullscreen(game_settings['full_screen'])
else:
props.setFullscreen(game_settings['full_screen'])
props.setSize(int(game_settings['screen_resolution'][0]),int(game_settings['screen_resolution'][1]))
self.win.requestProperties(props)
if self.configForm:
self.configForm.refreshSettings()
messenger.send('window-event', [self])
| WindyDarian/Sogal | sogasys/sogal_base.py | Python | apache-2.0 | 15,210 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20151124_1135'),
]
operations = [
migrations.AddField(
model_name='service',
name='short_name',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterField(
model_name='product',
name='unit',
field=models.IntegerField(choices=[(1, b'kg'), (2, b'L')]),
),
migrations.AlterField(
model_name='supplierservice',
name='service',
field=models.ForeignKey(related_name='service_suppliers', to='core.Service'),
),
migrations.AlterField(
model_name='supplierservice',
name='supplier',
field=models.ForeignKey(related_name='supplier_services', to='core.Supplier'),
),
]
| kanarelo/dairy | dairy/core/migrations/0009_auto_20151128_1236.py | Python | apache-2.0 | 992 |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .allow_trust_op import AllowTrustOp
from .begin_sponsoring_future_reserves_op import BeginSponsoringFutureReservesOp
from .bump_sequence_op import BumpSequenceOp
from .change_trust_op import ChangeTrustOp
from .claim_claimable_balance_op import ClaimClaimableBalanceOp
from .clawback_claimable_balance_op import ClawbackClaimableBalanceOp
from .clawback_op import ClawbackOp
from .create_account_op import CreateAccountOp
from .create_claimable_balance_op import CreateClaimableBalanceOp
from .create_passive_sell_offer_op import CreatePassiveSellOfferOp
from .liquidity_pool_deposit_op import LiquidityPoolDepositOp
from .liquidity_pool_withdraw_op import LiquidityPoolWithdrawOp
from .manage_buy_offer_op import ManageBuyOfferOp
from .manage_data_op import ManageDataOp
from .manage_sell_offer_op import ManageSellOfferOp
from .muxed_account import MuxedAccount
from .operation_type import OperationType
from .path_payment_strict_receive_op import PathPaymentStrictReceiveOp
from .path_payment_strict_send_op import PathPaymentStrictSendOp
from .payment_op import PaymentOp
from .revoke_sponsorship_op import RevokeSponsorshipOp
from .set_options_op import SetOptionsOp
from .set_trust_line_flags_op import SetTrustLineFlagsOp
__all__ = ["OperationBody"]
@type_checked
class OperationBody:
"""
XDR Source Code::
union switch (OperationType type)
{
case CREATE_ACCOUNT:
CreateAccountOp createAccountOp;
case PAYMENT:
PaymentOp paymentOp;
case PATH_PAYMENT_STRICT_RECEIVE:
PathPaymentStrictReceiveOp pathPaymentStrictReceiveOp;
case MANAGE_SELL_OFFER:
ManageSellOfferOp manageSellOfferOp;
case CREATE_PASSIVE_SELL_OFFER:
CreatePassiveSellOfferOp createPassiveSellOfferOp;
case SET_OPTIONS:
SetOptionsOp setOptionsOp;
case CHANGE_TRUST:
ChangeTrustOp changeTrustOp;
case ALLOW_TRUST:
AllowTrustOp allowTrustOp;
case ACCOUNT_MERGE:
MuxedAccount destination;
case INFLATION:
void;
case MANAGE_DATA:
ManageDataOp manageDataOp;
case BUMP_SEQUENCE:
BumpSequenceOp bumpSequenceOp;
case MANAGE_BUY_OFFER:
ManageBuyOfferOp manageBuyOfferOp;
case PATH_PAYMENT_STRICT_SEND:
PathPaymentStrictSendOp pathPaymentStrictSendOp;
case CREATE_CLAIMABLE_BALANCE:
CreateClaimableBalanceOp createClaimableBalanceOp;
case CLAIM_CLAIMABLE_BALANCE:
ClaimClaimableBalanceOp claimClaimableBalanceOp;
case BEGIN_SPONSORING_FUTURE_RESERVES:
BeginSponsoringFutureReservesOp beginSponsoringFutureReservesOp;
case END_SPONSORING_FUTURE_RESERVES:
void;
case REVOKE_SPONSORSHIP:
RevokeSponsorshipOp revokeSponsorshipOp;
case CLAWBACK:
ClawbackOp clawbackOp;
case CLAWBACK_CLAIMABLE_BALANCE:
ClawbackClaimableBalanceOp clawbackClaimableBalanceOp;
case SET_TRUST_LINE_FLAGS:
SetTrustLineFlagsOp setTrustLineFlagsOp;
case LIQUIDITY_POOL_DEPOSIT:
LiquidityPoolDepositOp liquidityPoolDepositOp;
case LIQUIDITY_POOL_WITHDRAW:
LiquidityPoolWithdrawOp liquidityPoolWithdrawOp;
}
"""
def __init__(
self,
type: OperationType,
create_account_op: CreateAccountOp = None,
payment_op: PaymentOp = None,
path_payment_strict_receive_op: PathPaymentStrictReceiveOp = None,
manage_sell_offer_op: ManageSellOfferOp = None,
create_passive_sell_offer_op: CreatePassiveSellOfferOp = None,
set_options_op: SetOptionsOp = None,
change_trust_op: ChangeTrustOp = None,
allow_trust_op: AllowTrustOp = None,
destination: MuxedAccount = None,
manage_data_op: ManageDataOp = None,
bump_sequence_op: BumpSequenceOp = None,
manage_buy_offer_op: ManageBuyOfferOp = None,
path_payment_strict_send_op: PathPaymentStrictSendOp = None,
create_claimable_balance_op: CreateClaimableBalanceOp = None,
claim_claimable_balance_op: ClaimClaimableBalanceOp = None,
begin_sponsoring_future_reserves_op: BeginSponsoringFutureReservesOp = None,
revoke_sponsorship_op: RevokeSponsorshipOp = None,
clawback_op: ClawbackOp = None,
clawback_claimable_balance_op: ClawbackClaimableBalanceOp = None,
set_trust_line_flags_op: SetTrustLineFlagsOp = None,
liquidity_pool_deposit_op: LiquidityPoolDepositOp = None,
liquidity_pool_withdraw_op: LiquidityPoolWithdrawOp = None,
) -> None:
self.type = type
self.create_account_op = create_account_op
self.payment_op = payment_op
self.path_payment_strict_receive_op = path_payment_strict_receive_op
self.manage_sell_offer_op = manage_sell_offer_op
self.create_passive_sell_offer_op = create_passive_sell_offer_op
self.set_options_op = set_options_op
self.change_trust_op = change_trust_op
self.allow_trust_op = allow_trust_op
self.destination = destination
self.manage_data_op = manage_data_op
self.bump_sequence_op = bump_sequence_op
self.manage_buy_offer_op = manage_buy_offer_op
self.path_payment_strict_send_op = path_payment_strict_send_op
self.create_claimable_balance_op = create_claimable_balance_op
self.claim_claimable_balance_op = claim_claimable_balance_op
self.begin_sponsoring_future_reserves_op = begin_sponsoring_future_reserves_op
self.revoke_sponsorship_op = revoke_sponsorship_op
self.clawback_op = clawback_op
self.clawback_claimable_balance_op = clawback_claimable_balance_op
self.set_trust_line_flags_op = set_trust_line_flags_op
self.liquidity_pool_deposit_op = liquidity_pool_deposit_op
self.liquidity_pool_withdraw_op = liquidity_pool_withdraw_op
def pack(self, packer: Packer) -> None:
self.type.pack(packer)
if self.type == OperationType.CREATE_ACCOUNT:
if self.create_account_op is None:
raise ValueError("create_account_op should not be None.")
self.create_account_op.pack(packer)
return
if self.type == OperationType.PAYMENT:
if self.payment_op is None:
raise ValueError("payment_op should not be None.")
self.payment_op.pack(packer)
return
if self.type == OperationType.PATH_PAYMENT_STRICT_RECEIVE:
if self.path_payment_strict_receive_op is None:
raise ValueError("path_payment_strict_receive_op should not be None.")
self.path_payment_strict_receive_op.pack(packer)
return
if self.type == OperationType.MANAGE_SELL_OFFER:
if self.manage_sell_offer_op is None:
raise ValueError("manage_sell_offer_op should not be None.")
self.manage_sell_offer_op.pack(packer)
return
if self.type == OperationType.CREATE_PASSIVE_SELL_OFFER:
if self.create_passive_sell_offer_op is None:
raise ValueError("create_passive_sell_offer_op should not be None.")
self.create_passive_sell_offer_op.pack(packer)
return
if self.type == OperationType.SET_OPTIONS:
if self.set_options_op is None:
raise ValueError("set_options_op should not be None.")
self.set_options_op.pack(packer)
return
if self.type == OperationType.CHANGE_TRUST:
if self.change_trust_op is None:
raise ValueError("change_trust_op should not be None.")
self.change_trust_op.pack(packer)
return
if self.type == OperationType.ALLOW_TRUST:
if self.allow_trust_op is None:
raise ValueError("allow_trust_op should not be None.")
self.allow_trust_op.pack(packer)
return
if self.type == OperationType.ACCOUNT_MERGE:
if self.destination is None:
raise ValueError("destination should not be None.")
self.destination.pack(packer)
return
if self.type == OperationType.INFLATION:
return
if self.type == OperationType.MANAGE_DATA:
if self.manage_data_op is None:
raise ValueError("manage_data_op should not be None.")
self.manage_data_op.pack(packer)
return
if self.type == OperationType.BUMP_SEQUENCE:
if self.bump_sequence_op is None:
raise ValueError("bump_sequence_op should not be None.")
self.bump_sequence_op.pack(packer)
return
if self.type == OperationType.MANAGE_BUY_OFFER:
if self.manage_buy_offer_op is None:
raise ValueError("manage_buy_offer_op should not be None.")
self.manage_buy_offer_op.pack(packer)
return
if self.type == OperationType.PATH_PAYMENT_STRICT_SEND:
if self.path_payment_strict_send_op is None:
raise ValueError("path_payment_strict_send_op should not be None.")
self.path_payment_strict_send_op.pack(packer)
return
if self.type == OperationType.CREATE_CLAIMABLE_BALANCE:
if self.create_claimable_balance_op is None:
raise ValueError("create_claimable_balance_op should not be None.")
self.create_claimable_balance_op.pack(packer)
return
if self.type == OperationType.CLAIM_CLAIMABLE_BALANCE:
if self.claim_claimable_balance_op is None:
raise ValueError("claim_claimable_balance_op should not be None.")
self.claim_claimable_balance_op.pack(packer)
return
if self.type == OperationType.BEGIN_SPONSORING_FUTURE_RESERVES:
if self.begin_sponsoring_future_reserves_op is None:
raise ValueError(
"begin_sponsoring_future_reserves_op should not be None."
)
self.begin_sponsoring_future_reserves_op.pack(packer)
return
if self.type == OperationType.END_SPONSORING_FUTURE_RESERVES:
return
if self.type == OperationType.REVOKE_SPONSORSHIP:
if self.revoke_sponsorship_op is None:
raise ValueError("revoke_sponsorship_op should not be None.")
self.revoke_sponsorship_op.pack(packer)
return
if self.type == OperationType.CLAWBACK:
if self.clawback_op is None:
raise ValueError("clawback_op should not be None.")
self.clawback_op.pack(packer)
return
if self.type == OperationType.CLAWBACK_CLAIMABLE_BALANCE:
if self.clawback_claimable_balance_op is None:
raise ValueError("clawback_claimable_balance_op should not be None.")
self.clawback_claimable_balance_op.pack(packer)
return
if self.type == OperationType.SET_TRUST_LINE_FLAGS:
if self.set_trust_line_flags_op is None:
raise ValueError("set_trust_line_flags_op should not be None.")
self.set_trust_line_flags_op.pack(packer)
return
if self.type == OperationType.LIQUIDITY_POOL_DEPOSIT:
if self.liquidity_pool_deposit_op is None:
raise ValueError("liquidity_pool_deposit_op should not be None.")
self.liquidity_pool_deposit_op.pack(packer)
return
if self.type == OperationType.LIQUIDITY_POOL_WITHDRAW:
if self.liquidity_pool_withdraw_op is None:
raise ValueError("liquidity_pool_withdraw_op should not be None.")
self.liquidity_pool_withdraw_op.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "OperationBody":
type = OperationType.unpack(unpacker)
if type == OperationType.CREATE_ACCOUNT:
create_account_op = CreateAccountOp.unpack(unpacker)
return cls(type=type, create_account_op=create_account_op)
if type == OperationType.PAYMENT:
payment_op = PaymentOp.unpack(unpacker)
return cls(type=type, payment_op=payment_op)
if type == OperationType.PATH_PAYMENT_STRICT_RECEIVE:
path_payment_strict_receive_op = PathPaymentStrictReceiveOp.unpack(unpacker)
return cls(
type=type, path_payment_strict_receive_op=path_payment_strict_receive_op
)
if type == OperationType.MANAGE_SELL_OFFER:
manage_sell_offer_op = ManageSellOfferOp.unpack(unpacker)
return cls(type=type, manage_sell_offer_op=manage_sell_offer_op)
if type == OperationType.CREATE_PASSIVE_SELL_OFFER:
create_passive_sell_offer_op = CreatePassiveSellOfferOp.unpack(unpacker)
return cls(
type=type, create_passive_sell_offer_op=create_passive_sell_offer_op
)
if type == OperationType.SET_OPTIONS:
set_options_op = SetOptionsOp.unpack(unpacker)
return cls(type=type, set_options_op=set_options_op)
if type == OperationType.CHANGE_TRUST:
change_trust_op = ChangeTrustOp.unpack(unpacker)
return cls(type=type, change_trust_op=change_trust_op)
if type == OperationType.ALLOW_TRUST:
allow_trust_op = AllowTrustOp.unpack(unpacker)
return cls(type=type, allow_trust_op=allow_trust_op)
if type == OperationType.ACCOUNT_MERGE:
destination = MuxedAccount.unpack(unpacker)
return cls(type=type, destination=destination)
if type == OperationType.INFLATION:
return cls(type=type)
if type == OperationType.MANAGE_DATA:
manage_data_op = ManageDataOp.unpack(unpacker)
return cls(type=type, manage_data_op=manage_data_op)
if type == OperationType.BUMP_SEQUENCE:
bump_sequence_op = BumpSequenceOp.unpack(unpacker)
return cls(type=type, bump_sequence_op=bump_sequence_op)
if type == OperationType.MANAGE_BUY_OFFER:
manage_buy_offer_op = ManageBuyOfferOp.unpack(unpacker)
return cls(type=type, manage_buy_offer_op=manage_buy_offer_op)
if type == OperationType.PATH_PAYMENT_STRICT_SEND:
path_payment_strict_send_op = PathPaymentStrictSendOp.unpack(unpacker)
return cls(
type=type, path_payment_strict_send_op=path_payment_strict_send_op
)
if type == OperationType.CREATE_CLAIMABLE_BALANCE:
create_claimable_balance_op = CreateClaimableBalanceOp.unpack(unpacker)
return cls(
type=type, create_claimable_balance_op=create_claimable_balance_op
)
if type == OperationType.CLAIM_CLAIMABLE_BALANCE:
claim_claimable_balance_op = ClaimClaimableBalanceOp.unpack(unpacker)
return cls(type=type, claim_claimable_balance_op=claim_claimable_balance_op)
if type == OperationType.BEGIN_SPONSORING_FUTURE_RESERVES:
begin_sponsoring_future_reserves_op = (
BeginSponsoringFutureReservesOp.unpack(unpacker)
)
return cls(
type=type,
begin_sponsoring_future_reserves_op=begin_sponsoring_future_reserves_op,
)
if type == OperationType.END_SPONSORING_FUTURE_RESERVES:
return cls(type=type)
if type == OperationType.REVOKE_SPONSORSHIP:
revoke_sponsorship_op = RevokeSponsorshipOp.unpack(unpacker)
return cls(type=type, revoke_sponsorship_op=revoke_sponsorship_op)
if type == OperationType.CLAWBACK:
clawback_op = ClawbackOp.unpack(unpacker)
return cls(type=type, clawback_op=clawback_op)
if type == OperationType.CLAWBACK_CLAIMABLE_BALANCE:
clawback_claimable_balance_op = ClawbackClaimableBalanceOp.unpack(unpacker)
return cls(
type=type, clawback_claimable_balance_op=clawback_claimable_balance_op
)
if type == OperationType.SET_TRUST_LINE_FLAGS:
set_trust_line_flags_op = SetTrustLineFlagsOp.unpack(unpacker)
return cls(type=type, set_trust_line_flags_op=set_trust_line_flags_op)
if type == OperationType.LIQUIDITY_POOL_DEPOSIT:
liquidity_pool_deposit_op = LiquidityPoolDepositOp.unpack(unpacker)
return cls(type=type, liquidity_pool_deposit_op=liquidity_pool_deposit_op)
if type == OperationType.LIQUIDITY_POOL_WITHDRAW:
liquidity_pool_withdraw_op = LiquidityPoolWithdrawOp.unpack(unpacker)
return cls(type=type, liquidity_pool_withdraw_op=liquidity_pool_withdraw_op)
return cls(type=type)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "OperationBody":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "OperationBody":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.type == other.type
and self.create_account_op == other.create_account_op
and self.payment_op == other.payment_op
and self.path_payment_strict_receive_op
== other.path_payment_strict_receive_op
and self.manage_sell_offer_op == other.manage_sell_offer_op
and self.create_passive_sell_offer_op == other.create_passive_sell_offer_op
and self.set_options_op == other.set_options_op
and self.change_trust_op == other.change_trust_op
and self.allow_trust_op == other.allow_trust_op
and self.destination == other.destination
and self.manage_data_op == other.manage_data_op
and self.bump_sequence_op == other.bump_sequence_op
and self.manage_buy_offer_op == other.manage_buy_offer_op
and self.path_payment_strict_send_op == other.path_payment_strict_send_op
and self.create_claimable_balance_op == other.create_claimable_balance_op
and self.claim_claimable_balance_op == other.claim_claimable_balance_op
and self.begin_sponsoring_future_reserves_op
== other.begin_sponsoring_future_reserves_op
and self.revoke_sponsorship_op == other.revoke_sponsorship_op
and self.clawback_op == other.clawback_op
and self.clawback_claimable_balance_op
== other.clawback_claimable_balance_op
and self.set_trust_line_flags_op == other.set_trust_line_flags_op
and self.liquidity_pool_deposit_op == other.liquidity_pool_deposit_op
and self.liquidity_pool_withdraw_op == other.liquidity_pool_withdraw_op
)
def __str__(self):
out = []
out.append(f"type={self.type}")
out.append(
f"create_account_op={self.create_account_op}"
) if self.create_account_op is not None else None
out.append(
f"payment_op={self.payment_op}"
) if self.payment_op is not None else None
out.append(
f"path_payment_strict_receive_op={self.path_payment_strict_receive_op}"
) if self.path_payment_strict_receive_op is not None else None
out.append(
f"manage_sell_offer_op={self.manage_sell_offer_op}"
) if self.manage_sell_offer_op is not None else None
out.append(
f"create_passive_sell_offer_op={self.create_passive_sell_offer_op}"
) if self.create_passive_sell_offer_op is not None else None
out.append(
f"set_options_op={self.set_options_op}"
) if self.set_options_op is not None else None
out.append(
f"change_trust_op={self.change_trust_op}"
) if self.change_trust_op is not None else None
out.append(
f"allow_trust_op={self.allow_trust_op}"
) if self.allow_trust_op is not None else None
out.append(
f"destination={self.destination}"
) if self.destination is not None else None
out.append(
f"manage_data_op={self.manage_data_op}"
) if self.manage_data_op is not None else None
out.append(
f"bump_sequence_op={self.bump_sequence_op}"
) if self.bump_sequence_op is not None else None
out.append(
f"manage_buy_offer_op={self.manage_buy_offer_op}"
) if self.manage_buy_offer_op is not None else None
out.append(
f"path_payment_strict_send_op={self.path_payment_strict_send_op}"
) if self.path_payment_strict_send_op is not None else None
out.append(
f"create_claimable_balance_op={self.create_claimable_balance_op}"
) if self.create_claimable_balance_op is not None else None
out.append(
f"claim_claimable_balance_op={self.claim_claimable_balance_op}"
) if self.claim_claimable_balance_op is not None else None
out.append(
f"begin_sponsoring_future_reserves_op={self.begin_sponsoring_future_reserves_op}"
) if self.begin_sponsoring_future_reserves_op is not None else None
out.append(
f"revoke_sponsorship_op={self.revoke_sponsorship_op}"
) if self.revoke_sponsorship_op is not None else None
out.append(
f"clawback_op={self.clawback_op}"
) if self.clawback_op is not None else None
out.append(
f"clawback_claimable_balance_op={self.clawback_claimable_balance_op}"
) if self.clawback_claimable_balance_op is not None else None
out.append(
f"set_trust_line_flags_op={self.set_trust_line_flags_op}"
) if self.set_trust_line_flags_op is not None else None
out.append(
f"liquidity_pool_deposit_op={self.liquidity_pool_deposit_op}"
) if self.liquidity_pool_deposit_op is not None else None
out.append(
f"liquidity_pool_withdraw_op={self.liquidity_pool_withdraw_op}"
) if self.liquidity_pool_withdraw_op is not None else None
return f"<OperationBody {[', '.join(out)]}>"
| StellarCN/py-stellar-base | stellar_sdk/xdr/operation_body.py | Python | apache-2.0 | 23,177 |
"""
Support for displaying the current CPU speed.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.cpuspeed/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['py-cpuinfo==0.2.6']
_LOGGER = logging.getLogger(__name__)
ATTR_BRAND = 'Brand'
ATTR_HZ = 'GHz Advertised'
ATTR_VENDOR = 'Vendor ID'
DEFAULT_NAME = 'CPU speed'
ICON = 'mdi:pulse'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the CPU speed sensor."""
name = config.get(CONF_NAME)
add_devices([CpuSpeedSensor(name)])
class CpuSpeedSensor(Entity):
"""Representation of a CPU sensor."""
def __init__(self, name):
"""Initialize the sensor."""
self._name = name
self._state = None
self._unit_of_measurement = 'GHz'
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.info is not None:
return {
ATTR_VENDOR: self.info['vendor_id'],
ATTR_BRAND: self.info['brand'],
ATTR_HZ: round(self.info['hz_advertised_raw'][0]/10**9, 2)
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the state."""
from cpuinfo import cpuinfo
self.info = cpuinfo.get_cpu_info()
self._state = round(float(self.info['hz_actual_raw'][0])/10**9, 2)
| morphis/home-assistant | homeassistant/components/sensor/cpuspeed.py | Python | apache-2.0 | 2,269 |
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import language_v1beta2
from google.cloud.language_v1beta2.proto import language_service_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestLanguageServiceClient(object):
def test_analyze_sentiment(self):
# Setup Expected Response
language = 'language-1613589672'
expected_response = {'language': language}
expected_response = language_service_pb2.AnalyzeSentimentResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup Request
document = {}
response = client.analyze_sentiment(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeSentimentRequest(
document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_sentiment_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_sentiment(document)
def test_analyze_entities(self):
# Setup Expected Response
language = 'language-1613589672'
expected_response = {'language': language}
expected_response = language_service_pb2.AnalyzeEntitiesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup Request
document = {}
response = client.analyze_entities(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeEntitiesRequest(
document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_entities_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_entities(document)
def test_analyze_entity_sentiment(self):
# Setup Expected Response
language = 'language-1613589672'
expected_response = {'language': language}
expected_response = language_service_pb2.AnalyzeEntitySentimentResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup Request
document = {}
response = client.analyze_entity_sentiment(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeEntitySentimentRequest(
document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_entity_sentiment_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_entity_sentiment(document)
def test_analyze_syntax(self):
# Setup Expected Response
language = 'language-1613589672'
expected_response = {'language': language}
expected_response = language_service_pb2.AnalyzeSyntaxResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup Request
document = {}
response = client.analyze_syntax(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeSyntaxRequest(
document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_syntax_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_syntax(document)
def test_classify_text(self):
# Setup Expected Response
expected_response = {}
expected_response = language_service_pb2.ClassifyTextResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup Request
document = {}
response = client.classify_text(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.ClassifyTextRequest(
document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_classify_text_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup request
document = {}
with pytest.raises(CustomException):
client.classify_text(document)
def test_annotate_text(self):
# Setup Expected Response
language = 'language-1613589672'
expected_response = {'language': language}
expected_response = language_service_pb2.AnnotateTextResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup Request
document = {}
features = {}
response = client.annotate_text(document, features)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnnotateTextRequest(
document=document, features=features)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_annotate_text_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = language_v1beta2.LanguageServiceClient(channel=channel)
# Setup request
document = {}
features = {}
with pytest.raises(CustomException):
client.annotate_text(document, features)
| tseaver/gcloud-python | language/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py | Python | apache-2.0 | 8,919 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine.resources.openstack.heat import resource_group
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as stackm
from heat.tests import common
from heat.tests import utils
template = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
}
}
}
}
}
template2 = {
"heat_template_version": "2013-05-23",
"resources": {
"dummy": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "baz"
}
},
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": {"get_attr": ["dummy", "Foo"]}
}
}
}
}
}
}
template_repl = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_%index%",
"listprop": [
"%index%_0",
"%index%_1",
"%index%_2"
]
}
}
}
}
}
}
template_attr = {
"heat_template_version": "2014-10-16",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "ResourceWithComplexAttributesType",
"properties": {
}
}
}
}
},
"outputs": {
"nested_strings": {
"value": {"get_attr": ["group1", "nested_dict", "string"]}
}
}
}
class ResourceGroupTest(common.HeatTestCase):
def setUp(self):
common.HeatTestCase.setUp(self)
self.m.StubOutWithMock(stackm.Stack, 'validate')
def test_assemble_nested(self):
"""Tests nested stack creation based on props.
Tests that the nested stack that implements the group is created
appropriately based on properties.
"""
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
templ = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
},
"2": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
}
}
}
self.assertEqual(templ, resg._assemble_nested(['0', '1', '2']).t)
def test_assemble_nested_include(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
expect['resources']["0"]['properties'] = {"Foo": None}
self.assertEqual(
expect, resg._assemble_nested(['0'], include_all=True).t)
def test_assemble_nested_include_zero(self):
templ = copy.deepcopy(template)
templ['resources']['group1']['properties']['count'] = 0
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
}
self.assertEqual(expect, resg._assemble_nested([]).t)
def test_assemble_nested_with_metadata(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
res_def['metadata'] = {
'priority': 'low',
'role': 'webserver'
}
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {},
"metadata": {
'priority': 'low',
'role': 'webserver'
}
}
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
def test_assemble_nested_rolling_update(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "baz"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
def test_assemble_nested_rolling_update_none(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 0).t)
def test_assemble_nested_rolling_update_failed_resource(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "baz"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
res0 = resg._nested['0']
res0.status = res0.FAILED
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
def test_assemble_nested_missing_param(self):
# Setup
# Change the standard testing template to use a get_param lookup
# within the resource definition
templ = copy.deepcopy(template)
res_def = templ['resources']['group1']['properties']['resource_def']
res_def['properties']['Foo'] = {'get_param': 'bar'}
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
# Test - This should not raise a ValueError about "bar" not being
# provided
nested_tmpl = resg._assemble_nested(['0', '1'])
# Verify
expected = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
}
}
self.assertEqual(expected, nested_tmpl.t)
def test_index_var(self):
stack = utils.parse_stack(template_repl)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_0",
"listprop": [
"0_0", "0_1", "0_2"
]
}
},
"1": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_1",
"listprop": [
"1_0", "1_1", "1_2"
]
}
},
"2": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_2",
"listprop": [
"2_0", "2_1", "2_2"
]
}
}
}
}
nested = resg._assemble_nested(['0', '1', '2']).t
for res in nested['resources']:
res_prop = nested['resources'][res]['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
def test_custom_index_var(self):
templ = copy.deepcopy(template_repl)
templ['resources']['group1']['properties']['index_var'] = "__foo__"
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_%index%",
"listprop": [
"%index%_0", "%index%_1", "%index%_2"
]
}
}
}
}
nested = resg._assemble_nested(['0']).t
res_prop = nested['resources']['0']['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
props = copy.deepcopy(templ['resources']['group1']['properties'])
res_def = props['resource_def']
res_def['properties']['Foo'] = "Bar___foo__"
res_def['properties']['listprop'] = ["__foo___0",
"__foo___1",
"__foo___2"]
res_def['type'] = "ResourceWithListProp__foo__"
snip = snip.freeze(properties=props)
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp__foo__",
"properties": {
"Foo": "Bar_0",
"listprop": [
"0_0", "0_1", "0_2"
]
}
}
}
}
nested = resg._assemble_nested(['0']).t
res_prop = nested['resources']['0']['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
def test_assemble_no_properties(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
del res_def['properties']
stack = utils.parse_stack(templ)
resg = stack.resources['group1']
self.assertIsNone(resg.validate())
def test_invalid_res_type(self):
"""Test that error raised for unknown resource type."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['resource_def']['type'] = "idontexist"
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
exp_msg = 'The Resource Type (idontexist) could not be found.'
self.assertIn(exp_msg, six.text_type(exc))
def test_reference_attr(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertIsNone(resgrp.validate())
def test_invalid_removal_policies_nolist(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = 'notallowed'
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = "removal_policies: \"'notallowed'\" is not a list"
self.assertIn(errstr, six.text_type(exc))
def test_invalid_removal_policies_nomap(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = ['notallowed']
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = '"notallowed" is not a map'
self.assertIn(errstr, six.text_type(exc))
def test_child_template(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
def check_res_names(names):
self.assertEqual(list(names), ['0', '1'])
return 'tmpl'
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._assemble_nested = mock.Mock()
resgrp._assemble_nested.side_effect = check_res_names
resgrp.properties.data[resgrp.COUNT] = 2
self.assertEqual('tmpl', resgrp.child_template())
self.assertEqual(1, resgrp._assemble_nested.call_count)
def test_child_params(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertEqual({}, resgrp.child_params())
def test_handle_create(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.create_with_template = mock.Mock(return_value=None)
self.assertIsNone(resgrp.handle_create())
self.assertEqual(1, resgrp.create_with_template.call_count)
def test_handle_create_with_batching(self):
stack = utils.parse_stack(tmpl_with_default_updt_policy())
defn = stack.t.resource_definitions(stack)['group1']
props = stack.t.t['resources']['group1']['properties'].copy()
props['count'] = 10
update_policy = {'batch_create': {'max_batch_size': 3}}
snip = defn.freeze(properties=props, update_policy=update_policy)
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.patchobject(scheduler.TaskRunner, 'start')
checkers = resgrp.handle_create()
self.assertEqual(4, len(checkers))
def test_run_to_completion(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._check_status_complete = mock.Mock(side_effect=[False, True])
resgrp.update_with_template = mock.Mock(return_value=None)
next(resgrp._run_to_completion(snip, 200))
self.assertEqual(1, resgrp.update_with_template.call_count)
def test_update_in_failed(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.state_set('CREATE', 'FAILED')
resgrp._assemble_nested = mock.Mock(return_value='tmpl')
resgrp.properties.data[resgrp.COUNT] = 2
self.patchobject(scheduler.TaskRunner, 'start')
resgrp.handle_update(snip, None, None)
self.assertTrue(resgrp._assemble_nested.called)
def test_handle_delete(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.delete_nested = mock.Mock(return_value=None)
resgrp.handle_delete()
resgrp.delete_nested.assert_called_once_with()
def test_handle_update_size(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._assemble_nested = mock.Mock(return_value=None)
resgrp.properties.data[resgrp.COUNT] = 5
self.patchobject(scheduler.TaskRunner, 'start')
resgrp.handle_update(snip, None, None)
self.assertTrue(resgrp._assemble_nested.called)
class ResourceGroupBlackList(common.HeatTestCase):
"""This class tests ResourceGroup._name_blacklist()."""
# 1) no resource_list, empty blacklist
# 2) no resource_list, existing blacklist
# 3) resource_list not in nested()
# 4) resource_list (refid) not in nested()
# 5) resource_list in nested() -> saved
# 6) resource_list (refid) in nested() -> saved
scenarios = [
('1', dict(data_in=None, rm_list=[],
nested_rsrcs=[], expected=[],
saved=False)),
('2', dict(data_in='0,1,2', rm_list=[],
nested_rsrcs=[], expected=['0', '1', '2'],
saved=False)),
('3', dict(data_in='1,3', rm_list=['6'],
nested_rsrcs=['0', '1', '3'],
expected=['1', '3'],
saved=False)),
('4', dict(data_in='0,1', rm_list=['id-7'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1'],
saved=False)),
('5', dict(data_in='0,1', rm_list=['3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True)),
('6', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True)),
]
def test_blacklist(self):
stack = utils.parse_stack(template)
resg = stack['group1']
# mock properties
resg.properties = mock.MagicMock()
resg.properties.__getitem__.return_value = [
{'resource_list': self.rm_list}]
# mock data get/set
resg.data = mock.Mock()
resg.data.return_value.get.return_value = self.data_in
resg.data_set = mock.Mock()
# mock nested access
def stack_contains(name):
return name in self.nested_rsrcs
def by_refid(name):
rid = name.replace('id-', '')
if rid not in self.nested_rsrcs:
return None
res = mock.Mock()
res.name = rid
return res
nested = mock.MagicMock()
nested.__contains__.side_effect = stack_contains
nested.__iter__.side_effect = iter(self.nested_rsrcs)
nested.resource_by_refid.side_effect = by_refid
resg.nested = mock.Mock(return_value=nested)
blacklist = resg._name_blacklist()
self.assertEqual(set(self.expected), blacklist)
if self.saved:
resg.data_set.assert_called_once_with('name_blacklist',
','.join(blacklist))
class ResourceGroupEmptyParams(common.HeatTestCase):
"""This class tests ResourceGroup.build_resource_definition()."""
scenarios = [
('non_empty', dict(value='Bar', expected={'Foo': 'Bar'},
expected_include={'Foo': 'Bar'})),
('empty_None', dict(value=None, expected={},
expected_include={'Foo': None})),
('empty_boolean', dict(value=False, expected={'Foo': False},
expected_include={'Foo': False})),
('empty_string', dict(value='', expected={'Foo': ''},
expected_include={'Foo': ''})),
('empty_number', dict(value=0, expected={'Foo': 0},
expected_include={'Foo': 0})),
('empty_json', dict(value={}, expected={'Foo': {}},
expected_include={'Foo': {}})),
('empty_list', dict(value=[], expected={'Foo': []},
expected_include={'Foo': []}))
]
def test_definition(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = self.value
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exp1 = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
self.expected)
exp2 = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
self.expected_include)
rdef = resg.get_resource_def()
self.assertEqual(exp1, resg.build_resource_definition('0', rdef))
rdef = resg.get_resource_def(include_all=True)
self.assertEqual(
exp2, resg.build_resource_definition('0', rdef))
class ResourceGroupNameListTest(common.HeatTestCase):
"""This class tests ResourceGroup._resource_names()."""
# 1) no blacklist, 0 count
# 2) no blacklist, x count
# 3) blacklist (not effecting)
# 4) blacklist with pruning
scenarios = [
('1', dict(blacklist=[], count=0,
expected=[])),
('2', dict(blacklist=[], count=4,
expected=['0', '1', '2', '3'])),
('3', dict(blacklist=['5', '6'], count=3,
expected=['0', '1', '2'])),
('4', dict(blacklist=['2', '4'], count=4,
expected=['0', '1', '3', '5'])),
]
def test_names(self):
stack = utils.parse_stack(template)
resg = stack['group1']
resg.properties = mock.MagicMock()
resg.properties.get.return_value = self.count
resg._name_blacklist = mock.MagicMock(return_value=self.blacklist)
self.assertEqual(self.expected, list(resg._resource_names()))
class ResourceGroupAttrTest(common.HeatTestCase):
def test_aggregate_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
expected = ['0', '1']
self.assertEqual(expected, resg.FnGetAtt('foo'))
self.assertEqual(expected, resg.FnGetAtt('Foo'))
def test_index_dotted_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
self.assertEqual('0', resg.FnGetAtt('resource.0.Foo'))
self.assertEqual('1', resg.FnGetAtt('resource.1.Foo'))
def test_index_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
self.assertEqual('0', resg.FnGetAtt('resource.0', 'Foo'))
self.assertEqual('1', resg.FnGetAtt('resource.1', 'Foo'))
def test_index_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack(template_attr,
expect_attrs={'0': 2, '1': 2})
self.assertEqual(2, resg.FnGetAtt('resource.0',
'nested_dict', 'dict', 'b'))
self.assertEqual(2, resg.FnGetAtt('resource.1',
'nested_dict', 'dict', 'b'))
def test_aggregate_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack(template_attr,
expect_attrs={'0': 3, '1': 3})
expected = [3, 3]
self.assertEqual(expected, resg.FnGetAtt('nested_dict', 'list', 2))
def test_aggregate_refs(self):
"""Test resource id aggregation."""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected, resg.FnGetAtt("refs"))
def test_aggregate_refs_with_index(self):
"""Test resource id aggregation with index."""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected[0], resg.FnGetAtt("refs", 0))
self.assertEqual(expected[1], resg.FnGetAtt("refs", 1))
self.assertIsNone(resg.FnGetAtt("refs", 2))
def test_aggregate_refs_map(self):
resg = self._create_dummy_stack()
found = resg.FnGetAtt("refs_map")
expected = {'0': 'ID-0', '1': 'ID-1'}
self.assertEqual(expected, found)
def test_aggregate_outputs(self):
"""Test outputs aggregation."""
expected = {'0': ['foo', 'bar'], '1': ['foo', 'bar']}
resg = self._create_dummy_stack(template_attr, expect_attrs=expected)
self.assertEqual(expected, resg.FnGetAtt('attributes', 'list'))
def test_aggregate_outputs_no_path(self):
"""Test outputs aggregation with missing path."""
resg = self._create_dummy_stack(template_attr)
self.assertRaises(exception.InvalidTemplateAttribute,
resg.FnGetAtt, 'attributes')
def test_index_refs(self):
"""Tests getting ids of individual resources."""
resg = self._create_dummy_stack()
self.assertEqual("ID-0", resg.FnGetAtt('resource.0'))
self.assertEqual("ID-1", resg.FnGetAtt('resource.1'))
self.assertRaises(exception.InvalidTemplateAttribute, resg.FnGetAtt,
'resource.2')
@mock.patch.object(grouputils, 'get_rsrc_id')
def test_get_attribute(self, mock_get_rsrc_id):
stack = utils.parse_stack(template)
mock_get_rsrc_id.side_effect = ['0', '1']
rsrc = stack['group1']
self.assertEqual(['0', '1'], rsrc.FnGetAtt(rsrc.REFS))
def test_get_attribute_convg(self):
cache_data = {'group1': {
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'attrs': {'refs': ['rsrc1', 'rsrc2']}
}}
stack = utils.parse_stack(template, cache_data=cache_data)
rsrc = stack['group1']
self.assertEqual(['rsrc1', 'rsrc2'], rsrc.FnGetAtt(rsrc.REFS))
def _create_dummy_stack(self, template_data=template, expect_count=2,
expect_attrs=None):
stack = utils.parse_stack(template_data)
resg = stack['group1']
fake_res = {}
if expect_attrs is None:
expect_attrs = {}
for resc in range(expect_count):
res = str(resc)
fake_res[res] = mock.Mock()
fake_res[res].stack = stack
fake_res[res].FnGetRefId.return_value = 'ID-%s' % res
if res in expect_attrs:
fake_res[res].FnGetAtt.return_value = expect_attrs[res]
else:
fake_res[res].FnGetAtt.return_value = res
resg.nested = mock.Mock(return_value=fake_res)
names = [str(name) for name in range(expect_count)]
resg._resource_names = mock.Mock(return_value=names)
return resg
class ReplaceTest(common.HeatTestCase):
# 1. no min_in_service
# 2. min_in_service > count and existing with no blacklist
# 3. min_in_service > count and existing with blacklist
# 4. existing > count and min_in_service with blacklist
# 5. existing > count and min_in_service with no blacklist
# 6. all existing blacklisted
# 7. count > existing and min_in_service with no blacklist
# 8. count > existing and min_in_service with blacklist
# 9. count < existing - blacklisted
# 10. pause_sec > 0
scenarios = [
('1', dict(min_in_service=0, count=2,
existing=['0', '1'], black_listed=['0'],
batch_size=1, pause_sec=0, tasks=2)),
('2', dict(min_in_service=3, count=2,
existing=['0', '1'], black_listed=[],
batch_size=2, pause_sec=0, tasks=3)),
('3', dict(min_in_service=3, count=2,
existing=['0', '1'], black_listed=['0'],
batch_size=2, pause_sec=0, tasks=3)),
('4', dict(min_in_service=3, count=2,
existing=['0', '1', '2', '3'], black_listed=['2', '3'],
batch_size=1, pause_sec=0, tasks=4)),
('5', dict(min_in_service=2, count=2,
existing=['0', '1', '2', '3'], black_listed=[],
batch_size=2, pause_sec=0, tasks=2)),
('6', dict(min_in_service=2, count=3,
existing=['0', '1'], black_listed=['0', '1'],
batch_size=2, pause_sec=0, tasks=2)),
('7', dict(min_in_service=0, count=5,
existing=['0', '1'], black_listed=[],
batch_size=1, pause_sec=0, tasks=5)),
('8', dict(min_in_service=0, count=5,
existing=['0', '1'], black_listed=['0'],
batch_size=1, pause_sec=0, tasks=5)),
('9', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
black_listed=['0'],
batch_size=2, pause_sec=0, tasks=2)),
('10', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
black_listed=['0'],
batch_size=2, pause_sec=10, tasks=3))]
def setUp(self):
super(ReplaceTest, self).setUp()
templ = copy.deepcopy(template)
self.stack = utils.parse_stack(templ)
snip = self.stack.t.resource_definitions(self.stack)['group1']
self.group = resource_group.ResourceGroup('test', snip, self.stack)
self.group.update_with_template = mock.Mock()
self.group.check_update_complete = mock.Mock()
def test_rolling_updates(self):
self.group._nested = get_fake_nested_stack(self.existing)
self.group.get_size = mock.Mock(return_value=self.count)
self.group._name_blacklist = mock.Mock(
return_value=set(self.black_listed))
tasks = self.group._replace(self.min_in_service, self.batch_size,
self.pause_sec)
self.assertEqual(self.tasks,
len(tasks))
def tmpl_with_bad_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"foo": {}}
return t
def tmpl_with_default_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"rolling_update": {}}
return t
def tmpl_with_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"rolling_update": {
"min_in_service": "1",
"max_batch_size": "2",
"pause_time": "1"
}}
return t
def get_fake_nested_stack(names):
nested_t = '''
heat_template_version: 2015-04-30
description: Resource Group
resources:
'''
resource_snip = '''
'%s':
type: OverwrittenFnGetRefIdType
properties:
foo: bar
'''
resources = [nested_t]
for res_name in names:
resources.extend([resource_snip % res_name])
nested_t = ''.join(resources)
return utils.parse_stack(template_format.parse(nested_t))
class RollingUpdatePolicyTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyTest, self).setUp()
def test_parse_without_update_policy(self):
stack = utils.parse_stack(template)
stack.validate()
grp = stack['group1']
self.assertFalse(grp.update_policy['rolling_update'])
def test_parse_with_update_policy(self):
tmpl = tmpl_with_updt_policy()
stack = utils.parse_stack(tmpl)
stack.validate()
tmpl_grp = tmpl['resources']['group1']
tmpl_policy = tmpl_grp['update_policy']['rolling_update']
tmpl_batch_sz = int(tmpl_policy['max_batch_size'])
grp = stack['group1']
self.assertTrue(grp.update_policy)
self.assertEqual(2, len(grp.update_policy))
self.assertIn('rolling_update', grp.update_policy)
policy = grp.update_policy['rolling_update']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(1, int(policy['min_in_service']))
self.assertEqual(tmpl_batch_sz, int(policy['max_batch_size']))
self.assertEqual(1, policy['pause_time'])
def test_parse_with_default_update_policy(self):
tmpl = tmpl_with_default_updt_policy()
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['group1']
self.assertTrue(grp.update_policy)
self.assertEqual(2, len(grp.update_policy))
self.assertIn('rolling_update', grp.update_policy)
policy = grp.update_policy['rolling_update']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(0, int(policy['min_in_service']))
self.assertEqual(1, int(policy['max_batch_size']))
self.assertEqual(0, policy['pause_time'])
def test_parse_with_bad_update_policy(self):
tmpl = tmpl_with_bad_updt_policy()
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("foo", six.text_type(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyDiffTest, self).setUp()
def validate_update_policy_diff(self, current, updated):
# load current stack
current_stack = utils.parse_stack(current)
current_grp = current_stack['group1']
current_grp_json = current_grp.frozen_definition()
updated_stack = utils.parse_stack(updated)
updated_grp = updated_stack['group1']
updated_grp_json = updated_grp.t.freeze()
# identify the template difference
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.assertTrue(tmpl_diff.update_policy_changed())
# test application of the new update policy in handle_update
current_grp._try_rolling_update = mock.Mock()
current_grp._assemble_nested_for_size = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
current_grp.handle_update(updated_grp_json, tmpl_diff, None)
self.assertEqual(updated_grp_json._update_policy or {},
current_grp.update_policy.data)
def test_update_policy_added(self):
self.validate_update_policy_diff(template,
tmpl_with_updt_policy())
def test_update_policy_updated(self):
updt_template = tmpl_with_updt_policy()
grp = updt_template['resources']['group1']
policy = grp['update_policy']['rolling_update']
policy['min_in_service'] = '2'
policy['max_batch_size'] = '4'
policy['pause_time'] = '90'
self.validate_update_policy_diff(tmpl_with_updt_policy(),
updt_template)
def test_update_policy_removed(self):
self.validate_update_policy_diff(tmpl_with_updt_policy(),
template)
class RollingUpdateTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdateTest, self).setUp()
def check_with_update(self, with_policy=False, with_diff=False):
current = copy.deepcopy(template)
self.current_stack = utils.parse_stack(current)
self.current_grp = self.current_stack['group1']
current_grp_json = self.current_grp.frozen_definition()
prop_diff, tmpl_diff = None, None
updated = tmpl_with_updt_policy() if (
with_policy) else copy.deepcopy(template)
if with_diff:
res_def = updated['resources']['group1'][
'properties']['resource_def']
res_def['properties']['Foo'] = 'baz'
prop_diff = dict(
{'count': 2,
'resource_def': {'properties': {'Foo': 'baz'},
'type': 'OverwrittenFnGetRefIdType'}})
updated_stack = utils.parse_stack(updated)
updated_grp = updated_stack['group1']
updated_grp_json = updated_grp.t.freeze()
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.current_grp._replace = mock.Mock(return_value=[])
self.current_grp._assemble_nested = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
self.current_grp.handle_update(updated_grp_json, tmpl_diff, prop_diff)
def test_update_without_policy_prop_diff(self):
self.check_with_update(with_diff=True)
self.assertTrue(self.current_grp._assemble_nested.called)
def test_update_with_policy_prop_diff(self):
self.check_with_update(with_policy=True, with_diff=True)
self.current_grp._replace.assert_called_once_with(1, 2, 1)
self.assertTrue(self.current_grp._assemble_nested.called)
def test_update_time_not_sufficient(self):
current = copy.deepcopy(template)
self.stack = utils.parse_stack(current)
self.current_grp = self.stack['group1']
self.stack.timeout_secs = mock.Mock(return_value=200)
err = self.assertRaises(ValueError, self.current_grp._update_timeout,
3, 100)
self.assertIn('The current update policy will result in stack update '
'timeout.', six.text_type(err))
def test_update_time_sufficient(self):
current = copy.deepcopy(template)
self.stack = utils.parse_stack(current)
self.current_grp = self.stack['group1']
self.stack.timeout_secs = mock.Mock(return_value=400)
self.assertEqual(200, self.current_grp._update_timeout(3, 100))
class TestUtils(common.HeatTestCase):
# 1. No existing no blacklist
# 2. Existing with no blacklist
# 3. Existing with blacklist
scenarios = [
('1', dict(existing=[], black_listed=[], count=0)),
('2', dict(existing=['0', '1'], black_listed=[], count=0)),
('3', dict(existing=['0', '1'], black_listed=['0'], count=1)),
('4', dict(existing=['0', '1'], black_listed=['1', '2'], count=1))
]
def setUp(self):
super(TestUtils, self).setUp()
def test_count_black_listed(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._nested = get_fake_nested_stack(self.existing)
resgrp._name_blacklist = mock.Mock(return_value=set(self.black_listed))
rcount = resgrp._count_black_listed()
self.assertEqual(self.count, rcount)
class TestGetBatches(common.HeatTestCase):
scenarios = [
('4_4_1_0', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=0,
batches=[
(4, 1, ['4']),
(4, 1, ['3']),
(4, 1, ['2']),
(4, 1, ['1']),
])),
('4_4_1_4', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=4,
batches=[
(5, 1, ['5']),
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_4_1_5', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=5,
batches=[
(5, 1, ['5']),
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_4_2_0', dict(targ_cap=4, init_cap=4, bat_size=2, min_serv=0,
batches=[
(4, 2, ['4', '3']),
(4, 2, ['2', '1']),
])),
('4_4_2_4', dict(targ_cap=4, init_cap=4, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(4, 0, []),
])),
('5_5_2_0', dict(targ_cap=5, init_cap=5, bat_size=2, min_serv=0,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(5, 1, ['1']),
])),
('5_5_2_4', dict(targ_cap=5, init_cap=5, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(5, 0, []),
])),
('3_3_2_0', dict(targ_cap=3, init_cap=3, bat_size=2, min_serv=0,
batches=[
(3, 2, ['3', '2']),
(3, 1, ['1']),
])),
('3_3_2_4', dict(targ_cap=3, init_cap=3, bat_size=2, min_serv=4,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(4, 1, ['1']),
(3, 0, []),
])),
('4_4_4_0', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=0,
batches=[
(4, 4, ['4', '3', '2', '1']),
])),
('4_4_5_0', dict(targ_cap=4, init_cap=4, bat_size=5, min_serv=0,
batches=[
(4, 4, ['4', '3', '2', '1']),
])),
('4_4_4_1', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=1,
batches=[
(5, 4, ['5', '4', '3', '2']),
(4, 1, ['1']),
])),
('4_4_6_1', dict(targ_cap=4, init_cap=4, bat_size=6, min_serv=1,
batches=[
(5, 4, ['5', '4', '3', '2']),
(4, 1, ['1']),
])),
('4_4_4_2', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=2,
batches=[
(6, 4, ['6', '5', '4', '3']),
(4, 2, ['2', '1']),
])),
('4_4_4_4', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(4, 0, []),
])),
('4_4_5_6', dict(targ_cap=4, init_cap=4, bat_size=5, min_serv=6,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(4, 0, []),
])),
('4_7_1_0', dict(targ_cap=4, init_cap=7, bat_size=1, min_serv=0,
batches=[
(4, 1, ['4']),
(4, 1, ['3']),
(4, 1, ['2']),
(4, 1, ['1']),
])),
('4_7_1_4', dict(targ_cap=4, init_cap=7, bat_size=1, min_serv=4,
batches=[
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_7_1_5', dict(targ_cap=4, init_cap=7, bat_size=1, min_serv=5,
batches=[
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_7_2_0', dict(targ_cap=4, init_cap=7, bat_size=2, min_serv=0,
batches=[
(4, 2, ['4', '3']),
(4, 2, ['2', '1']),
])),
('4_7_2_4', dict(targ_cap=4, init_cap=7, bat_size=2, min_serv=4,
batches=[
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(4, 0, []),
])),
('5_7_2_0', dict(targ_cap=5, init_cap=7, bat_size=2, min_serv=0,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(5, 1, ['1']),
])),
('5_7_2_4', dict(targ_cap=5, init_cap=7, bat_size=2, min_serv=4,
batches=[
(6, 2, ['5', '4']),
(6, 2, ['3', '2']),
(5, 1, ['1']),
])),
('4_7_4_4', dict(targ_cap=4, init_cap=7, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '4', '3', '2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_7_5_6', dict(targ_cap=4, init_cap=7, bat_size=5, min_serv=6,
batches=[
(8, 4, ['8', '4', '3', '2']),
(5, 1, ['1']),
(4, 0, []),
])),
('6_4_1_0', dict(targ_cap=6, init_cap=4, bat_size=1, min_serv=0,
batches=[
(5, 1, ['5']),
(6, 1, ['6']),
(6, 1, ['4']),
(6, 1, ['3']),
(6, 1, ['2']),
(6, 1, ['1']),
])),
('6_4_1_4', dict(targ_cap=6, init_cap=4, bat_size=1, min_serv=4,
batches=[
(5, 1, ['5']),
(6, 1, ['6']),
(6, 1, ['4']),
(6, 1, ['3']),
(6, 1, ['2']),
(6, 1, ['1']),
])),
('6_4_1_5', dict(targ_cap=6, init_cap=4, bat_size=1, min_serv=5,
batches=[
(5, 1, ['5']),
(6, 1, ['6']),
(6, 1, ['4']),
(6, 1, ['3']),
(6, 1, ['2']),
(6, 1, ['1']),
])),
('6_4_2_0', dict(targ_cap=6, init_cap=4, bat_size=2, min_serv=0,
batches=[
(6, 2, ['5', '6']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_2_4', dict(targ_cap=6, init_cap=4, bat_size=2, min_serv=4,
batches=[
(6, 2, ['5', '6']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_5_2_0', dict(targ_cap=6, init_cap=5, bat_size=2, min_serv=0,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_5_2_4', dict(targ_cap=6, init_cap=5, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_3_2_0', dict(targ_cap=6, init_cap=3, bat_size=2, min_serv=0,
batches=[
(5, 2, ['4', '5']),
(6, 2, ['6', '3']),
(6, 2, ['2', '1']),
])),
('6_3_2_4', dict(targ_cap=6, init_cap=3, bat_size=2, min_serv=4,
batches=[
(5, 2, ['4', '5']),
(6, 2, ['6', '3']),
(6, 2, ['2', '1']),
])),
('6_4_4_0', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=0,
batches=[
(6, 4, ['5', '6', '4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_5_0', dict(targ_cap=6, init_cap=4, bat_size=5, min_serv=0,
batches=[
(6, 5, ['5', '6', '4', '3', '2']),
(6, 1, ['1']),
])),
('6_4_4_1', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=1,
batches=[
(6, 4, ['5', '6', '4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_6_1', dict(targ_cap=6, init_cap=4, bat_size=6, min_serv=1,
batches=[
(7, 6, ['5', '6', '7', '4', '3', '2']),
(6, 1, ['1']),
])),
('6_4_4_2', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=2,
batches=[
(6, 4, ['5', '6', '4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_4_4', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(6, 0, []),
])),
('6_4_5_6', dict(targ_cap=6, init_cap=4, bat_size=5, min_serv=6,
batches=[
(9, 5, ['9', '8', '7', '6', '5']),
(10, 4, ['10', '4', '3', '2']),
(7, 1, ['1']),
(6, 0, []),
])),
]
def setUp(self):
super(TestGetBatches, self).setUp()
self.stack = utils.parse_stack(template)
self.grp = self.stack['group1']
self.grp._name_blacklist = mock.Mock(return_value={'0'})
def test_get_batches(self):
batches = list(self.grp._get_batches(self.targ_cap,
self.init_cap,
self.bat_size,
self.min_serv))
self.assertEqual([(s, u) for s, u, n in self.batches], batches)
def test_assemble(self):
old_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
new_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "bar"})
resources = [(str(i), old_def) for i in range(self.init_cap + 1)]
self.grp.get_size = mock.Mock(return_value=self.targ_cap)
self.patchobject(grouputils, 'get_member_definitions',
return_value=resources)
self.grp.build_resource_definition = mock.Mock(return_value=new_def)
all_updated_names = set()
for size, max_upd, names in self.batches:
template = self.grp._assemble_for_rolling_update(size,
max_upd,
names)
res_dict = template.resource_definitions(self.stack)
expected_names = set(map(str, range(1, size + 1)))
self.assertEqual(expected_names, set(res_dict))
all_updated_names &= expected_names
all_updated_names |= set(names)
updated = set(n for n, v in res_dict.items() if v != old_def)
self.assertEqual(all_updated_names, updated)
resources[:] = sorted(res_dict.items(), key=lambda i: int(i[0]))
| steveb/heat | heat/tests/openstack/heat/test_resource_group.py | Python | apache-2.0 | 58,016 |
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from direct.gui.DirectScrolledList import *
from direct.distributed.ClockDelta import *
from toontown.toontowngui import TTDialog
import math
from direct.task.Task import Task
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from toontown.toon import Toon
from direct.showbase import RandomNumGen
from toontown.toonbase import TTLocalizer
import random
import random
import cPickle
from direct.showbase import PythonUtil
import GameSprite
from math import pi
import GardenProgressMeter
class GardenDropGame(DirectObject.DirectObject):
def __init__(self):
self.acceptErrorDialog = None
self.doneEvent = 'game Done'
self.sprites = []
self.load()
thing = self.model.find('**/item_board')
self.block = self.model1.find('**/minnieCircle')
self.colorRed = (1, 0, 0, 1)
self.colorBlue = (0, 0, 1, 1)
self.colorGreen = (0, 1, 0, 1)
self.colorGhostRed = (1, 0, 0, 0.5)
self.colorGhostBlue = (0, 0, 1, 0.5)
self.colorGhostGreen = (0, 1, 0, 0.5)
self.colorWhite = (1, 1, 1, 1)
self.colorBlack = (0, 0, 0, 1.0)
self.colorShadow = (0, 0, 0, 0.5)
self.lastTime = None
self.running = 0
self.massCount = 0
self.foundCount = 0
self.maxX = 0.47
self.minX = -0.47
self.maxZ = 0.65
self.minZ = -0.1
self.newBallX = 0.0
self.newBallZ = 0.6
self.rangeX = self.maxX - self.minX
self.rangeZ = self.maxZ - self.minZ
size = 0.085
sizeZ = size * 0.8
gX = int(self.rangeX / size)
gZ = int(self.rangeZ / sizeZ)
self.maxX = self.minX + gX * size
self.maxZ = self.minZ + gZ * sizeZ
self.controlOffsetX = 0.0
self.controlOffsetZ = 0.0
self.queExtent = 3
print 'Grid Dimensions X%s Z%s' % (gX, gZ)
self.grid = []
self.gridDimX = gX
self.gridDimZ = gZ
self.gridBrick = False
base.gardenGame = self
for countX in range(self.gridDimX):
newRow = []
for countZ in range(self.gridDimZ):
offset = 0
if countZ % 2 == 0:
offset = size / 2
newRow.append([None, countX * size + self.minX + offset, countZ * sizeZ + self.minZ])
self.grid.append(newRow)
self.controlSprite = None
self.cogSprite = self.addUnSprite(self.block, posX=0.25, posZ=0.5)
self.cogSprite.setColor(self.colorShadow)
for ball in range(0, 3):
place = random.random() * self.rangeX
newSprite = self.addSprite(self.block, size=0.5, posX=self.minX + place, posZ=0.0, found=1)
self.stickInGrid(newSprite, 1)
self.queBall = self.addSprite(self.block, posX=0.25, posZ=0.5, found=0)
self.queBall.setColor(self.colorWhite)
self.queBall.isQue = 1
self.matchList = []
self.newBallTime = 1.0
self.newBallCountUp = 0.0
self.cogX = 0
self.cogZ = 0
self.__run()
return
def findGrid(self, x, z, force = 0):
currentClosest = None
currentDist = 10000000
for countX in range(self.gridDimX):
for countZ in range(self.gridDimZ):
testDist = self.testPointDistanceSquare(x, z, self.grid[countX][countZ][1], self.grid[countX][countZ][2])
if self.grid[countX][countZ][0] == None and testDist < currentDist and (force or self.hasNeighbor(countX, countZ)):
currentClosest = self.grid[countX][countZ]
self.closestX = countX
self.closestZ = countZ
currentDist = testDist
return currentClosest
def hasNeighbor(self, cellX, cellZ):
gotNeighbor = 0
if cellZ % 2 == 0:
if self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ - 1)):
gotNeighbor = 1
return gotNeighbor
def clearMatchList(self):
for entry in self.matchList:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
gridEntry[0] = None
sprite.markedForDeath = 1
return
def createMatchList(self, x, z):
self.matchList = []
self.fillMatchList(x, z)
def fillMatchList(self, cellX, cellZ):
if (cellX, cellZ) in self.matchList:
return
self.matchList.append((cellX, cellZ))
colorType = self.grid[cellX][cellZ][0].colorType
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) == colorType:
self.fillMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) == colorType:
self.fillMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) == colorType:
self.fillMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) == colorType:
self.fillMatchList(cellX - 1, cellZ - 1)
def testGridfull(self, cell):
if not cell:
return 0
elif cell[0] != None:
return 1
else:
return 0
return
def getValidGrid(self, x, z):
if x < 0 or x >= self.gridDimX:
return None
elif z < 0 or z >= self.gridDimZ:
return None
else:
return self.grid[x][z]
return None
def getColorType(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].colorType
return
def findGridCog(self):
self.cogX = 0
self.cogZ = 0
self.massCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
self.cogX += cell[1]
self.cogZ += cell[2]
self.massCount += 1
if self.massCount > 0:
self.cogX = self.cogX / self.massCount
self.cogZ = self.cogZ / self.massCount
self.cogSprite.setX(self.cogX)
self.cogSprite.setZ(self.cogZ)
else:
self.doOnClearGrid()
return
def doOnClearGrid(self):
secondSprite = self.addSprite(self.block, posX=self.newBallX, posZ=0.0, found=1)
secondSprite.addForce(0, 1.55 * pi)
self.stickInGrid(secondSprite, 1)
def findGrid2(self, x, z):
rangeX = self.maxX - self.minX
rangeZ = self.maxZ - self.minZ
framedX = x - self.minX
framedZ = z - self.minZ
tileDimX = rangeX / self.gridDimX
tileDimZ = rangeZ / self.gridDimZ
tileX = int(framedX / tileDimX)
tileZ = int(framedZ / tileDimZ)
print 'find Grid tileX%s tileZ%s' % (tileX, tileZ)
return (tileX, tileZ)
def findPos(self, x, z):
rangeX = self.maxX - self.minX
rangeZ = self.maxZ - self.minZ
tileDimX = rangeX / self.gridDimX
tileDimZ = rangeZ / self.gridDimZ
posX = tileDimX * x + self.minX
posZ = tileDimZ * z + self.minZ
print 'find Pos X%s Z%s' % (posX, posZ)
return (posX, posZ)
def placeIntoGrid(self, sprite, x, z):
if self.grid[x][z][0] == None:
self.grid[x][z][0] = sprite
sprite.setActive(0)
newX, newZ = self.findPos(x, z)
sprite.setX(newX)
sprite.setZ(newZ)
print 'Setting Final Pos X%s Z%s' % (newX, newZ)
else:
self.placeIntoGrid(sprite, x + 1, z - 1)
return
def stickInGrid(self, sprite, force = 0):
if sprite.isActive and not sprite.isQue:
gridCell = self.findGrid(sprite.getX(), sprite.getZ(), force)
if gridCell:
gridCell[0] = sprite
sprite.setActive(0)
sprite.setX(gridCell[1])
sprite.setZ(gridCell[2])
self.createMatchList(self.closestX, self.closestZ)
if len(self.matchList) >= 3:
self.clearMatchList()
self.findGridCog()
def stickInGrid2(self, sprite):
if sprite.isActive and not sprite.isQue:
tileX, tileZ = self.findGrid(sprite.getX(), sprite.getZ())
self.placeIntoGrid(sprite, tileX, tileZ)
sprite.isActive = 0
def load(self):
model = loader.loadModel('phase_5.5/models/gui/package_delivery_panel')
model1 = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
self.model = model
self.model1 = model1
background = model.find('**/bg')
itemBoard = model.find('**/item_board')
self.frame = DirectFrame(scale=1.1, relief=DGG.FLAT, frameSize=(-0.5,
0.5,
-0.45,
-0.05), frameColor=(0.737, 0.573, 0.345, 1.0))
self.background = DirectFrame(self.frame, image=background, image_scale=0.05, relief=None, pos=(0, 1, 0))
self.itemBoard = DirectFrame(parent=self.frame, image=itemBoard, image_scale=0.05, image_color=(0.922, 0.922, 0.753, 1), relief=None, pos=(0, 1, 0))
gui2 = loader.loadModel('phase_3/models/gui/quit_button')
self.quitButton = DirectButton(parent=self.frame, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.5, 1.0, -0.42), scale=0.9, text='Exit Mini Game', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=0.045, text_pos=(0, -0.01), command=self.__handleExit)
return
def unload(self):
self.frame.destroy()
del self.frame
if self.acceptErrorDialog:
self.acceptErrorDialog.cleanup()
self.acceptErrorDialog = None
taskMgr.remove('gameTask')
self.ignoreAll()
return
def show(self):
self.frame.show()
def hide(self):
self.frame.hide()
def __handleExit(self):
self.__acceptExit()
def __acceptExit(self, buttonValue = None):
if hasattr(self, 'frame'):
self.hide()
self.unload()
messenger.send(self.doneEvent)
def addSprite(self, image, size = 0.5, posX = 0, posZ = 0, found = 0):
nodeObj = DirectLabel(parent=self.frame, relief=None, image=image, pos=(posX, 0.0, posZ), scale=size, image_color=(1.0, 1.0, 1.0, 1))
colorChoice = random.choice(range(0, 3))
newSprite = GameSprite.GameSprite(nodeObj, colorChoice, found)
self.sprites.append(newSprite)
if found:
self.foundCount += 1
return newSprite
def addUnSprite(self, image, size = 0.5, posX = 0, posZ = 0):
nodeObj = DirectLabel(parent=self.frame, relief=None, image=image, pos=(posX, 0.0, posZ), scale=size, image_color=(1.0, 1.0, 1.0, 1))
newSprite = GameSprite.GameSprite(nodeObj)
newSprite = GameSprite.GameSprite(nodeObj)
return newSprite
def __run(self, cont = 1):
if self.lastTime == None:
self.lastTime = globalClock.getRealTime()
timeDelta = globalClock.getRealTime() - self.lastTime
self.lastTime = globalClock.getRealTime()
self.newBallCountUp += timeDelta
if base.mouseWatcherNode.hasMouse():
x = base.mouseWatcherNode.getMouseX()
y = base.mouseWatcherNode.getMouseY()
self.queBall.setX(x)
self.queBall.setZ(y)
for sprite in self.sprites:
sprite.run(timeDelta)
if sprite.getX() > self.maxX:
sprite.setX(self.maxX)
sprite.velX = -sprite.velX
if sprite.getX() < self.minX:
sprite.setX(self.minX)
sprite.velX = -sprite.velX
if sprite.getZ() > self.maxZ:
sprite.setZ(self.maxZ)
sprite.velZ = -sprite.velZ
if sprite.getZ() < self.minZ:
self.stickInGrid(sprite, 1)
if sprite.isActive:
sprite.addForce(timeDelta * 0.9, pi * 1.5)
self.queBall.velX = (self.queBall.getX() - self.queBall.prevX) / timeDelta
self.queBall.velZ = (self.queBall.getZ() - self.queBall.prevZ) / timeDelta
self.__colTest()
for sprite in self.sprites:
if sprite.markedForDeath:
if sprite.foundation:
self.foundCount -= 1
self.sprites.remove(sprite)
sprite.delete()
if self.controlSprite == None:
self.addControlSprite(self.newBallX, self.newBallZ)
self.newBallCountUp = 0.0
if self.newBallCountUp >= self.newBallTime:
self.addControlSprite(self.newBallX, self.newBallZ)
self.newBallCountUp = 0.0
if not self.controlSprite.isActive:
self.controlSprite = None
if self.foundCount <= 0:
self.__handleWin()
if cont and not self.running:
taskMgr.add(self.__run, 'gameTask')
self.running = 1
return Task.cont
def __handleWin(self):
GardenProgressMeter.GardenProgressMeter()
self.__handleExit()
def addControlSprite(self, x = 0.0, z = 0.0):
newSprite = self.addSprite(self.block, posX=x, posZ=z)
self.controlSprite = newSprite
def __colTest(self):
if not hasattr(self, 'tick'):
self.tick = 0
self.tick += 1
if self.tick > 5:
self.tick = 0
sizeSprites = len(self.sprites)
for movingSpriteIndex in range(len(self.sprites)):
for testSpriteIndex in range(movingSpriteIndex, len(self.sprites)):
movingSprite = self.getSprite(movingSpriteIndex)
testSprite = self.getSprite(testSpriteIndex)
if testSprite and movingSprite:
if movingSpriteIndex != testSpriteIndex and (movingSprite.isActive or testSprite.isActive):
if movingSprite.isQue or testSprite.isQue:
if self.testDistance(movingSprite.nodeObj, testSprite.nodeObj) < self.queExtent * (movingSprite.size + testSprite.size):
self.push(movingSprite, testSprite)
elif self.testDistance(movingSprite.nodeObj, testSprite.nodeObj) < movingSprite.size + testSprite.size:
if not (movingSprite.isActive and testSprite.isActive):
self.__collide(movingSprite, testSprite)
if self.tick == 5:
pass
def getSprite(self, spriteIndex):
if spriteIndex >= len(self.sprites) or self.sprites[spriteIndex].markedForDeath:
return None
else:
return self.sprites[spriteIndex]
return None
def testDistance(self, nodeA, nodeB):
distX = nodeA.getX() - nodeB.getX()
distZ = nodeA.getZ() - nodeB.getZ()
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
return dist
def testPointDistance(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
if dist == 0:
dist = 1e-10
return dist
def testPointDistanceSquare(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
if distC == 0:
distC = 1e-10
return distC
def angleTwoSprites(self, sprite1, sprite2):
x1 = sprite1.getX()
z1 = sprite1.getZ()
x2 = sprite2.getX()
z2 = sprite2.getZ()
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def angleTwoPoints(self, x1, z1, x2, z2):
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def __collide(self, move, test):
queHit = 0
if move.isQue:
que = move
hit = test
queHit = 1
elif test.isQue:
que = test
hit = move
queHit = 1
else:
test.velX = 0
test.velZ = 0
move.velX = 0
move.velZ = 0
test.collide()
move.collide()
self.stickInGrid(move)
self.stickInGrid(test)
if queHit:
forceM = 0.1
distX = que.getX() - hit.getX()
distZ = que.getZ() - hit.getZ()
def push(self, move, test):
queHit = 0
if move.isQue:
que = move
hit = test
queHit = 1
elif test.isQue:
que = test
hit = move
queHit = 1
if queHit:
forceM = 0.1
dist = self.testDistance(move.nodeObj, test.nodeObj)
if abs(dist) < self.queExtent * que.size and abs(dist) > 0:
scaleSize = self.queExtent * que.size * 0.5
distFromPara = abs(abs(dist) - scaleSize)
force = (scaleSize - distFromPara) / scaleSize * (dist / abs(dist))
angle = self.angleTwoSprites(que, hit)
if angle < 0:
angle = angle + 2 * pi
if angle > pi * 2.0:
angle = angle - 2 * pi
newAngle = pi * 1.0
if angle > pi * 1.5 or angle < pi * 0.5:
newAngle = pi * 0.0
hit.addForce(forceM * force, newAngle)
| silly-wacky-3-town-toon/SOURCE-COD | toontown/estate/GardenDropGame.py | Python | apache-2.0 | 20,215 |
# Run this in the Django shell
from clinicalsearch.models import ClinicalTrial
import csv
with open('clinicalsearch/trials_ranked.csv', 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
print row
t = ClinicalTrial(id=row[0], sponsor=row[1], published=(row[2]=="TRUE"), state=row[3], url=row[4], ongoing=(row[5]=="TRUE"), title=row[6], condition=row[7], intervention=row[8], locations=row[9], last_changed=row[10], min_age=int(row[11]), max_age=int(row[12]), genders=row[13], health=(row[14] == "True"), ranking=int(row[15]))
t.save() | adam2392/clinicaltrials | populatedb.py | Python | apache-2.0 | 574 |
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Sylvain Afchain <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import messaging
from neutron.common import constants
from neutron.common import rpc
from neutron.common import topics
from neutron.common import utils
from neutron import manager
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class MeteringAgentNotifyAPI(object):
"""API for plugin to notify L3 metering agent."""
def __init__(self, topic=topics.METERING_AGENT):
super(MeteringAgentNotifyAPI, self).__init__()
target = messaging.Target(topic=topic, version='1.0')
self.client = rpc.get_client(target)
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_plugin()
l3_routers = {}
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
adminContext, [router['id']],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify metering agent at %(topic)s.%(host)s '
'the message %(method)s'),
{'topic': self.client.target.topic,
'host': l3_agent.host,
'method': method})
l3_router = l3_routers.get(l3_agent.host, [])
l3_router.append(router)
l3_routers[l3_agent.host] = l3_router
for host, routers in l3_routers.iteritems():
topic = '%s.%s' % (self.client.target.topic, host)
cctxt = self.client.prepare(topic=topic)
cctxt.cast(context, method, routers=routers)
def _notification_fanout(self, context, method, router_id):
LOG.debug(_('Fanout notify metering agent at %(topic)s the message '
'%(method)s on router %(router_id)s'),
{'topic': self.client.target.topic,
'method': method,
'router_id': router_id})
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, router_id=router_id)
def _notification(self, context, method, routers):
"""Notify all the agents that are hosting the routers."""
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
self._agent_notification(context, method, routers)
else:
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, routers=routers)
def router_deleted(self, context, router_id):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, routers):
if routers:
self._notification(context, 'routers_updated', routers)
def update_metering_label_rules(self, context, routers):
self._notification(context, 'update_metering_label_rules', routers)
def add_metering_label(self, context, routers):
self._notification(context, 'add_metering_label', routers)
def remove_metering_label(self, context, routers):
self._notification(context, 'remove_metering_label', routers)
| beagles/neutron_hacking | neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py | Python | apache-2.0 | 4,002 |
from keystone import utils
from keystone.common import wsgi
import keystone.config as config
from keystone.logic.types.tenant import Tenant
from . import get_marker_limit_and_url
class TenantController(wsgi.Controller):
"""Controller for Tenant related operations"""
def __init__(self, options, is_service_operation=None):
self.options = options
self.is_service_operation = is_service_operation
@utils.wrap_error
def create_tenant(self, req):
tenant = utils.get_normalized_request_content(Tenant, req)
return utils.send_result(201, req,
config.SERVICE.create_tenant(utils.get_auth_token(req), tenant))
@utils.wrap_error
def get_tenants(self, req):
tenant_name = req.GET["name"] if "name" in req.GET else None
if tenant_name:
tenant = config.SERVICE.get_tenant_by_name(
utils.get_auth_token(req),
tenant_name)
return utils.send_result(200, req, tenant)
else:
marker, limit, url = get_marker_limit_and_url(req)
tenants = config.SERVICE.get_tenants(utils.get_auth_token(req),
marker, limit, url, self.is_service_operation)
return utils.send_result(200, req, tenants)
@utils.wrap_error
def get_tenant(self, req, tenant_id):
tenant = config.SERVICE.get_tenant(utils.get_auth_token(req),
tenant_id)
return utils.send_result(200, req, tenant)
@utils.wrap_error
def update_tenant(self, req, tenant_id):
tenant = utils.get_normalized_request_content(Tenant, req)
rval = config.SERVICE.update_tenant(utils.get_auth_token(req),
tenant_id, tenant)
return utils.send_result(200, req, rval)
@utils.wrap_error
def delete_tenant(self, req, tenant_id):
rval = config.SERVICE.delete_tenant(utils.get_auth_token(req),
tenant_id)
return utils.send_result(204, req, rval)
| genius1611/Keystone | keystone/controllers/tenant.py | Python | apache-2.0 | 1,975 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import p2p_primary_path_
class p2p_primary_path(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Primary paths associated with the LSP
"""
__slots__ = ("_path_helper", "_extmethods", "__p2p_primary_path")
_yang_name = "p2p-primary-path"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__p2p_primary_path = YANGDynClass(
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"p2p-primary-path",
]
def _get_p2p_primary_path(self):
"""
Getter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list)
YANG Description: List of p2p primary paths for a tunnel
"""
return self.__p2p_primary_path
def _set_p2p_primary_path(self, v, load=False):
"""
Setter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_p2p_primary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_p2p_primary_path() directly.
YANG Description: List of p2p primary paths for a tunnel
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """p2p_primary_path must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__p2p_primary_path = t
if hasattr(self, "_set"):
self._set()
def _unset_p2p_primary_path(self):
self.__p2p_primary_path = YANGDynClass(
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
p2p_primary_path = __builtin__.property(
_get_p2p_primary_path, _set_p2p_primary_path
)
_pyangbind_elements = OrderedDict([("p2p_primary_path", p2p_primary_path)])
from . import p2p_primary_path_
class p2p_primary_path(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Primary paths associated with the LSP
"""
__slots__ = ("_path_helper", "_extmethods", "__p2p_primary_path")
_yang_name = "p2p-primary-path"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__p2p_primary_path = YANGDynClass(
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"p2p-primary-path",
]
def _get_p2p_primary_path(self):
"""
Getter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list)
YANG Description: List of p2p primary paths for a tunnel
"""
return self.__p2p_primary_path
def _set_p2p_primary_path(self, v, load=False):
"""
Setter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_p2p_primary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_p2p_primary_path() directly.
YANG Description: List of p2p primary paths for a tunnel
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """p2p_primary_path must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__p2p_primary_path = t
if hasattr(self, "_set"):
self._set()
def _unset_p2p_primary_path(self):
self.__p2p_primary_path = YANGDynClass(
base=YANGListType(
"name",
p2p_primary_path_.p2p_primary_path,
yang_name="p2p-primary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="p2p-primary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
p2p_primary_path = __builtin__.property(
_get_p2p_primary_path, _set_p2p_primary_path
)
_pyangbind_elements = OrderedDict([("p2p_primary_path", p2p_primary_path)])
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/__init__.py | Python | apache-2.0 | 14,694 |
"""Cleanup script."""
from grr.lib import export_utils
# After you do this the UI complains a little, but creating a new hunt fixes it.
hunts = aff4.FACTORY.Open("aff4:/hunts/")
for hunt in hunts.ListChildren():
aff4.FACTORY.Delete(hunt)
# Delete clients that haven't polled in for 2hours
for fd in aff4.FACTORY.MultiOpen(export_utils.GetAllClients()):
cutoff = rdfvalue.RDFDatetime().Now() - rdfvalue.Duration("2h")
if fd.Get(fd.Schema.PING) < cutoff:
aff4.FACTORY.Delete(fd.urn)
# Delete all flows
for client in export_utils.GetAllClients():
aff4.FACTORY.Delete(client.Add("flows"))
| destijl/grr-workshop-setup | cleanup.py | Python | apache-2.0 | 602 |
from django import forms
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from guardian.admin import GuardedModelAdmin
from uploader.projects.models import FileSystem, Project
class FileSystemAdminForm(forms.ModelForm):
class Meta:
model = FileSystem
class ProjectAdmin(GuardedModelAdmin):
list_display = ('__unicode__', 'file_system', 'directory')
class FileSystemAdmin(ModelAdmin):
list_display = ('__unicode__', 'alias', 'mount_point')
form = FileSystemAdminForm
admin.site.register(FileSystem, admin_class=FileSystemAdmin)
admin.site.register(Project, admin_class=ProjectAdmin)
| stfc/cvmfs-stratum-uploader | uploader/projects/admin.py | Python | apache-2.0 | 642 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.automl.model.base_pytorch_model import PytorchModelBuilder
from zoo.orca.automl.auto_estimator import AutoEstimator
from zoo.chronos.model.Seq2Seq_pytorch import model_creator
from .base_automodel import BasePytorchAutomodel
class AutoSeq2Seq(BasePytorchAutomodel):
def __init__(self,
input_feature_num,
output_target_num,
past_seq_len,
future_seq_len,
optimizer,
loss,
metric,
lr=0.001,
lstm_hidden_dim=128,
lstm_layer_num=2,
dropout=0.25,
teacher_forcing=False,
backend="torch",
logs_dir="/tmp/auto_seq2seq",
cpus_per_trial=1,
name="auto_seq2seq",
remote_dir=None,
):
"""
Create an AutoSeq2Seq.
:param input_feature_num: Int. The number of features in the input
:param output_target_num: Int. The number of targets in the output
:param past_seq_len: Int. The number of historical steps used for forecasting.
:param future_seq_len: Int. The number of future steps to forecast.
:param optimizer: String or pyTorch optimizer creator function or
tf.keras optimizer instance.
:param loss: String or pytorch/tf.keras loss instance or pytorch loss creator function.
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param lr: float or hp sampling function from a float space. Learning rate.
e.g. hp.choice([0.001, 0.003, 0.01])
:param lstm_hidden_dim: LSTM hidden channel for decoder and encoder.
hp.grid_search([32, 64, 128])
:param lstm_layer_num: LSTM layer number for decoder and encoder.
e.g. hp.randint(1, 4)
:param dropout: float or hp sampling function from a float space. Learning rate. Dropout
rate. e.g. hp.uniform(0.1, 0.3)
:param teacher_forcing: If use teacher forcing in training. e.g. hp.choice([True, False])
:param backend: The backend of the Seq2Seq model. We only support backend as "torch"
for now.
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_seq2seq"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoSeq2Seq. It defaults to "auto_seq2seq"
:param remote_dir: String. Remote directory to sync training results and checkpoints. It
defaults to None and doesn't take effects while running in local. While running in
cluster, it defaults to "hdfs:///tmp/{name}".
"""
super().__init__()
# todo: support search for past_seq_len.
# todo: add input check.
if backend != "torch":
raise ValueError(f"We only support backend as torch. Got {backend}")
self.search_space = dict(
input_feature_num=input_feature_num,
output_feature_num=output_target_num,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len,
lstm_hidden_dim=lstm_hidden_dim,
lstm_layer_num=lstm_layer_num,
lr=lr,
dropout=dropout,
teacher_forcing=teacher_forcing
)
self.metric = metric
model_builder = PytorchModelBuilder(model_creator=model_creator,
optimizer_creator=optimizer,
loss_creator=loss,
)
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
remote_dir=remote_dir,
name=name)
| intel-analytics/analytics-zoo | pyzoo/zoo/chronos/autots/model/auto_seq2seq.py | Python | apache-2.0 | 4,638 |
# coding=utf8
# Copyright © 2015-2017 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
import ambari_helpers as helpers
from resource_management import *
class Master(Script):
def install(self, env):
print('Install the CDAP Master')
import params
# Add repository file
helpers.add_repo(
params.files_dir + params.repo_file,
params.os_repo_dir
)
# Install any global packages
self.install_packages(env)
# Workaround for CDAP-3961
helpers.package('cdap-hbase-compat-1.1')
# Install package
helpers.package('cdap-master')
self.configure(env)
def start(self, env, upgrade_type=None):
print('Start the CDAP Master')
import params
import status_params
env.set_params(params)
self.configure(env)
helpers.create_hdfs_dir(params.hdfs_namespace, params.cdap_hdfs_user, 775)
# Create user's HDFS home
helpers.create_hdfs_dir('/user/' + params.cdap_user, params.cdap_user, 775)
if params.cdap_hdfs_user != params.cdap_user:
helpers.create_hdfs_dir('/user/' + params.cdap_hdfs_user, params.cdap_hdfs_user, 775)
# Hack to work around CDAP-1967
self.remove_jackson(env)
daemon_cmd = format('/opt/cdap/master/bin/cdap master start')
no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1')
Execute(
daemon_cmd,
user=params.cdap_user,
not_if=no_op_test
)
def stop(self, env, upgrade_type=None):
print('Stop the CDAP Master')
import status_params
daemon_cmd = format('service cdap-master stop')
no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1')
Execute(
daemon_cmd,
only_if=no_op_test
)
def status(self, env):
import status_params
check_process_status(status_params.cdap_master_pid_file)
def configure(self, env):
print('Configure the CDAP Master')
import params
env.set_params(params)
helpers.cdap_config('master')
def upgrade(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.UpgradeTool',
label='CDAP Upgrade Tool',
arguments='upgrade force'
)
def upgrade_hbase(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.UpgradeTool',
label='CDAP HBase Coprocessor Upgrade Tool',
arguments='upgrade_hbase force'
)
def postupgrade(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.flow.FlowQueuePendingCorrector',
label='CDAP Post-Upgrade Tool'
)
def queue_debugger(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.SimpleHBaseQueueDebugger',
label='CDAP Queue Debugger Tool'
)
def jobqueue_debugger(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.JobQueueDebugger',
label='CDAP Job Queue Debugger Tool'
)
def run_class(self, env, classname, label=None, arguments=''):
if label is None:
label = classname
print('Running: ' + label)
import params
cmd = format("/opt/cdap/master/bin/cdap run %s %s" % (classname, arguments))
Execute(
cmd,
user=params.cdap_user
)
def remove_jackson(self, env):
jackson_check = format('ls -1 /opt/cdap/master/lib/org.codehaus.jackson* 2>/dev/null')
Execute(
'rm -f /opt/cdap/master/lib/org.codehaus.jackson.jackson-*',
not_if=jackson_check
)
if __name__ == "__main__":
Master().execute()
| cdapio/cdap-ambari-service | src/main/resources/common-services/CDAP/6.0.0/package/scripts/master.py | Python | apache-2.0 | 4,572 |
from collections import OrderedDict
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from Orange.data import Table
from Orange.classification.svm import SVMLearner, NuSVMLearner
from Orange.widgets import settings, gui
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
class OWBaseSVM(OWBaseLearner):
#: Kernel types
Linear, Poly, RBF, Sigmoid = 0, 1, 2, 3
#: Selected kernel type
kernel_type = settings.Setting(RBF)
#: kernel degree
degree = settings.Setting(3)
#: gamma
gamma = settings.Setting(1.0)
#: coef0 (adative constant)
coef0 = settings.Setting(0.0)
#: numerical tolerance
tol = settings.Setting(0.001)
kernels = (("Linear", "x⋅y"),
("Polynomial", "(g x⋅y + c)<sup>d</sup>"),
("RBF", "exp(-g|x-y|²)"),
("Sigmoid", "tanh(g x⋅y + c)"))
def _add_kernel_box(self):
# Initialize with the widest label to measure max width
self.kernel_eq = self.kernels[-1][1]
self.kernel_box = box = gui.hBox(self.controlArea, "Kernel")
buttonbox = gui.radioButtonsInBox(
box, self, "kernel_type", btnLabels=[k[0] for k in self.kernels],
callback=self._on_kernel_changed, addSpace=20)
buttonbox.layout().setSpacing(10)
gui.rubber(buttonbox)
parambox = gui.vBox(box)
gui.label(parambox, self, "Kernel: %(kernel_eq)s")
common = dict(orientation=Qt.Horizontal, callback=self.settings_changed,
alignment=Qt.AlignRight, controlWidth=80)
spbox = gui.hBox(parambox)
gui.rubber(spbox)
inbox = gui.vBox(spbox)
gamma = gui.doubleSpin(
inbox, self, "gamma", 0.0, 10.0, 0.01, label=" g: ", **common)
coef0 = gui.doubleSpin(
inbox, self, "coef0", 0.0, 10.0, 0.01, label=" c: ", **common)
degree = gui.doubleSpin(
inbox, self, "degree", 0.0, 10.0, 0.5, label=" d: ", **common)
self._kernel_params = [gamma, coef0, degree]
gui.rubber(parambox)
# This is the maximal height (all double spins are visible)
# and the maximal width (the label is initialized to the widest one)
box.layout().activate()
box.setFixedHeight(box.sizeHint().height())
box.setMinimumWidth(box.sizeHint().width())
def _add_optimization_box(self):
self.optimization_box = gui.vBox(
self.controlArea, "Optimization Parameters")
gui.doubleSpin(
self.optimization_box, self, "tol", 1e-6, 1.0, 1e-5,
label="Numerical tolerance:",
decimals=6, alignment=Qt.AlignRight, controlWidth=100,
callback=self.settings_changed
)
def add_main_layout(self):
self._add_type_box()
self._add_kernel_box()
self._add_optimization_box()
def _on_kernel_changed(self):
enabled = [[False, False, False], # linear
[True, True, True], # poly
[True, False, False], # rbf
[True, True, False]] # sigmoid
self.kernel_eq = self.kernels[self.kernel_type][1]
mask = enabled[self.kernel_type]
for spin, enabled in zip(self._kernel_params, mask):
[spin.box.hide, spin.box.show][enabled]()
self.settings_changed()
def _report_kernel_parameters(self, items):
if self.kernel_type == 0:
items["Kernel"] = "Linear"
elif self.kernel_type == 1:
items["Kernel"] = \
"Polynomial, ({g:.4} x⋅y + {c:.4})<sup>{d}</sup>".format(
g=self.gamma, c=self.coef0, d=self.degree)
elif self.kernel_type == 2:
items["Kernel"] = "RBF, exp(-{:.4}|x-y|²)".format(self.gamma)
else:
items["Kernel"] = "Sigmoid, tanh({g:.4} x⋅y + {c:.4})".format(
g=self.gamma, c=self.coef0)
def update_model(self):
super().update_model()
sv = None
if self.valid_data:
sv = self.data[self.model.skl_model.support_]
self.send("Support vectors", sv)
class OWSVMClassification(OWBaseSVM):
name = "SVM"
description = "Support Vector Machines map inputs to higher-dimensional " \
"feature spaces that best separate different classes. "
icon = "icons/SVM.svg"
priority = 50
LEARNER = SVMLearner
outputs = [("Support vectors", Table)]
# 0: c_svc, 1: nu_svc
svmtype = settings.Setting(0)
C = settings.Setting(1.0)
nu = settings.Setting(0.5)
shrinking = settings.Setting(True),
probability = settings.Setting(False)
max_iter = settings.Setting(100)
limit_iter = settings.Setting(True)
def _add_type_box(self):
form = QtGui.QGridLayout()
self.type_box = box = gui.radioButtonsInBox(
self.controlArea, self, "svmtype", [], box="SVM Type",
orientation=form, callback=self.settings_changed)
form.addWidget(gui.appendRadioButton(box, "C-SVM", addToLayout=False),
0, 0, Qt.AlignLeft)
form.addWidget(QtGui.QLabel("Cost (C):"),
0, 1, Qt.AlignRight)
form.addWidget(gui.doubleSpin(box, self, "C", 1e-3, 1000.0, 0.1,
decimals=3, alignment=Qt.AlignRight,
controlWidth=80, addToLayout=False,
callback=self.settings_changed),
0, 2)
form.addWidget(gui.appendRadioButton(box, "ν-SVM", addToLayout=False),
1, 0, Qt.AlignLeft)
form.addWidget(QtGui.QLabel("Complexity (ν):"),
1, 1, Qt.AlignRight)
form.addWidget(gui.doubleSpin(box, self, "nu", 0.05, 1.0, 0.05,
decimals=2, alignment=Qt.AlignRight,
controlWidth=80, addToLayout=False,
callback=self.settings_changed),
1, 2)
def _add_optimization_box(self):
super()._add_optimization_box()
gui.spin(self.optimization_box, self, "max_iter", 50, 1e6, 50,
label="Iteration limit:", checked="limit_iter",
alignment=Qt.AlignRight, controlWidth=100,
callback=self.settings_changed)
def create_learner(self):
kernel = ["linear", "poly", "rbf", "sigmoid"][self.kernel_type]
common_args = dict(
kernel=kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
tol=self.tol,
max_iter=self.max_iter if self.limit_iter else -1,
probability=True,
preprocessors=self.preprocessors
)
if self.svmtype == 0:
return SVMLearner(C=self.C, **common_args)
else:
return NuSVMLearner(nu=self.nu, **common_args)
def get_learner_parameters(self):
items = OrderedDict()
if self.svmtype == 0:
items["SVM type"] = "C-SVM, C={}".format(self.C)
else:
items["SVM type"] = "ν-SVM, ν={}".format(self.nu)
self._report_kernel_parameters(items)
items["Numerical tolerance"] = "{:.6}".format(self.tol)
items["Iteration limt"] = self.max_iter if self.limit_iter else "unlimited"
return items
if __name__ == "__main__":
app = QtGui.QApplication([])
w = OWSVMClassification()
w.set_data(Table("iris")[:50])
w.show()
app.exec_()
| qPCR4vir/orange3 | Orange/widgets/classify/owsvmclassification.py | Python | bsd-2-clause | 7,592 |
#!/usr/bin/python -Wall
# ================================================================
# Given a list, returns a list of pairs of elements and repetition counts.
# Example (with commas elided for legibility):
#
# Input: [ 1 1 1 2 2 3 3 3 3 5 5 1 1 ]
# Output: [ [3 1] [2 2] [4 3] [2 5] [2 1] ]
#
# I.e. there is a run of 3 1's, then a run of 2 2's, then a run of 4 3's, then
# 2 5's, then 2 1's. This similar to the output of the Unix "uniq -c" command,
# if the input were one number per line. However, uniq -c puts the columns in
# reverse order from what I do here.
# ================================================================
# John Kerl
# [email protected]
# 2008-01-22
# ================================================================
def uniqc(list):
rv = []
n = len(list)
if (n == 0):
return []
curri = 0
nexti = 1
head = list[curri]
count = 1
while (curri < n):
if (nexti == n): # Last element in the list
if (list[curri] == head):
rv.append([head, count])
else:
rv.append([list[curri], 1])
elif (list[curri] == list[nexti]):
count += 1
else:
rv.append([head, count])
head = list[nexti]
count = 1
curri += 1
nexti += 1
return rv
# ----------------------------------------------------------------
# Test cases:
#def test1(list):
# #print list
# #print uniqc(list)
# #print
#
# # Pipe the output to, say, expand -20.
# print list, "\t", uniqc(list)
#
#def test_uniqc():
# test1([])
# test1([8])
# test1([8, 8])
# test1([8, 9])
# test1([9, 8])
# test1([9, 9])
# test1([8, 8, 8])
# test1([8, 8, 9])
# test1([8, 9, 8])
# test1([8, 9, 9])
# test1([9, 8, 8])
# test1([9, 8, 9])
# test1([9, 9, 8])
# test1([9, 9, 9])
#
#test_uniqc()
| johnkerl/sack | uniqc_m.py | Python | bsd-2-clause | 1,891 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.sw
~~~~~~~~~~~~~~~~~~~~~
Lexers for semantic web languages.
:copyright: 2007 by Philip Cooper <[email protected]>.
:license: BSD, see LICENSE for more details.
Modified and extended by Gerrit Niezen. (LICENSE file described above is missing, wasn't distributed with original file)
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal
from pygments.util import shebang_matches
__all__ = ['Notation3Lexer','SparqlLexer']
# The N3 lexer should be close to the not really correct grammar at
# http://www.w3.org/2000/10/swap/grammar/n3-ietf.txt
# Comments indicate to which grammar rule the various regular
# expressions correspond.
_explicit_uri = r'<[^>]*>'
_qname = r'((\w[-\w]*)?:)?\w[-\w]*|(\w[-\w]*)?:' #(([:letter:][-\w]*)?:)?[:letter:][.\w]*
_symbol = '(' + _qname + '|' + _explicit_uri +')'
_quickvariable = r'\?\w+'
def expression(symbolAction, nextState):
#expression ::= | pathitem pathtail
#pathitem ::= | "(" pathlist ")"
# | "[" propertylist "]"
# | "{" formulacontent "}"
# | boolean
# | literal
# | numericliteral
# | quickvariable
# | symbol
if not isinstance(nextState,tuple):
nextState = (nextState,)
nextState = nextState + ('pathtail',)
return [
#pathlist
(r'\(', Punctuation, nextState + ('list',)),
#properylist
(r'\[', Punctuation, nextState + ('propertyList',)),
#formulacontent
(r'\{', Punctuation, nextState + ('root',)),
#boolean
(r'@false|@true', Keyword.Constant, nextState),
#literal
(r'("""[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*""")|("[^"\\]*(?:\\.[^"\\]*)*")', String, nextState + ('dtlang',)),
#numericliteral ::= double|integer|rational
(r'[-+]?[0-9]+(\.[0-9]+)?([eE][-+]?[0-9]+)', Number.Float, nextState),
(r'[-+]?[0-9]+', Number.Integer, nextState),
(r'[-+]?[0-9]+/[0-9]+', Number, nextState),
#quickvariable
(_quickvariable, Name.Variable, nextState),
#symbol
(_symbol, symbolAction, nextState),
]
class Notation3Lexer(RegexLexer):
"""
Lexer for the N3 / Turtle / NT
"""
name = 'N3'
aliases = ['n3', 'turtle']
filenames = ['*.n3', '*.ttl', '*.NT']
mimetypes = ['text/rdf+n3','application/x-turtle','application/n3']
tokens = {
'whitespaces': [
(r'(#.*)', Comment),
(r'\s+', Text),
],
'pathtailExpression': expression(Name.Function, '#pop'),
'pathtail': [
# No whitespaces allowed in front!
(r'(^|!|\.)(?!\s)', Operator, 'pathtailExpression'),
(r'', Text, '#pop'),
],
# statement:
'root': [
include('whitespaces'),
# declaration ::= base|prefix|keywords
(r'(@(?:prefix|base)\s*)([^\!\"\#\$\&\'\(\)\*\,\+\/\;\<\=\>\?\@\[\\\]\^\`\{\|\}\~]*:\s+)?(<[^>]*>\s*\.)', bygroups(Keyword,Name.Variable,Name.Namespace)),
(r'(@keywords)(\s*\w+\s*,)*(\s*\w+)', bygroups(Keyword,Text,Text)),
# existential|universal
(r'@forSome|@forAll', Name.Class, 'symbol_csl'),
# Terminating a formula
(r'\}', Punctuation, '#pop'),
] + expression(Name.Class, 'propertyList'),
'propertyList': [
#predicate ::= | "<="
# | "="
# | "=>"
# | "@a"
# | "@has" expression
# | "@is" expression "@of"
# | expression
include('whitespaces'),
(r';', Punctuation),
(r'(<=|=>|=|@?a(?=\s))', Operator, 'objectList'),
(r'\.', Punctuation, '#pop'),
(r'\]', Punctuation, '#pop'),
(r'(?=\})', Text, '#pop'),
] + expression(Name.Function, 'objectList'),
'objectList': [
include('whitespaces'),
(r',', Punctuation),
(r'(?=;)', Text, '#pop'),
(r'(?=\.)', Text, '#pop'),
(r'(?=\])', Text, '#pop'),
(r'(?=\})', Text, '#pop'),
] + expression(Name.Attribute, ()),
'list': [
include('objectList'),
(r'\)', Punctuation, '#pop'),
],
'symbol_csl': [
include('whitespaces'),
(r',', Punctuation),
(_symbol, Name.Variable),
(r'.', Punctuation, '#pop'),
],
'dtlang': [
#dtlang ::= "@" langcode|"^^" symbol|void
(r'@[a-z]+(-[a-z0-9]+)*', Name.Attribute, '#pop'),
(r'\^\^'+_symbol, Name.Attribute, '#pop'),
(r'', Text, '#pop'),
],
}
class SparqlLexer(RegexLexer):
"""
Lexer for SPARQL Not Complete
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.sparql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'comments': [
(r'(\s*#.*)', Comment)
],
'root': [
include('comments'),
(r'(\s*(?:PREFIX|BASE)\s+)([\w-]*:[\w-]*)?(\s*<[^> ]*>\s*)',bygroups(Keyword,Name.Variable,Name.Namespace)),
(r'(\s*#.*)', Comment),
(r'(\s*)(SELECT\s*(?:DISTINCT|REDUCED)?)(\s*)',bygroups(Text, Keyword,Text), 'selectVars'),
(r'(\s*)((?:ASK|CONSTRUCT|DESCRIBE)\s*(?:DISTINCT|REDUCED)?\s*)((?:\?[a-zA-Z0-9_-]+\s*)+|\*)(\s*)',
bygroups(Text, Keyword,Name.Variable,Text)),
(r'(\s*)((?:LOAD|CLEAR|DROP|CREATE)\s*(?:SILENT)?\s*)(\s*(?:GRAPH)?\s*)(\s*<[^> ]*>\s*)(;)(\s*)',
bygroups(Text, Keyword, Keyword, Name.Attribute, Text, Text)),
(r'(\s*)((?:ADD|MOVE|COPY)\s*(?:SILENT)?\s*)(\s*(?:GRAPH)?\s*)(\s*<[^> ]*>\s*)((?:TO)\s*)(\s*(?:GRAPH)?\s*)(\s*<[^> ]*>\s*)?(;)(\s*)',
bygroups(Text, Keyword, Keyword, Name.Attribute, Keyword, Keyword, Name.Attribute, Text, Text)),
(r'(\s*)((?:INSERT|DELETE)\s*(?:DATA)?)\s*',bygroups(Text, Keyword),'quaddata'),
(r'(\s*)(CONSTRUCT)?\s*({)',bygroups(Text, Keyword,Punctuation),'graph'),
(r'(\s*)(FROM\s*(?:NAMED)?)(\s*.*)', bygroups(Text, Keyword,Text)),
(r'(\s*)(WHERE\s?)?\s*({)',bygroups(Text, Keyword, Punctuation),'groupgraph'),
(r'(\s*)(LIMIT|OFFSET)(\s*[+-]?[0-9]+)',bygroups(Text, Keyword,Literal.String)),
(r'(ORDER BY (?:ASC|DESC)\s*)(\()\s*',bygroups(Text, Keyword,Punctuation),'bindgraph'),
(r'(\s*)(})', bygroups(Text, Punctuation)),
],
'selectVars':[
(r'(\s*)(\*)(\s*)',bygroups(Text,Keyword,Text), '#pop'),
(r'(?=\s*(FROM|WHERE|GROUP|HAVING|ORDER|LIMIT|OFFSET))', Text, '#pop'),
(r'(\s*)(\()(\s*)', bygroups(Text, Punctuation, Text), 'bindgraph'),
include('variable'),
(r'\n', Text),
(r'', Text, '#pop'),
],
'quaddata':[
(r'(\s*)({)(\s*)(GRAPH)(\s*<[^> ]*>\s*)', bygroups(Text, Punctuation, Text, Keyword, Name.Attribute), 'quads'),
(r'(\s*)({)(\s*)',bygroups(Text,Punctuation,Text), 'graph'),
(r'', Text, '#pop'),
],
'quads':[
(r'(\s*)({)(\s*)(GRAPH)(\s*<[^> ]*>\s*)', bygroups(Text, Punctuation, Text, Keyword, Name.Attribute), '#push'),
(r'(\s*)({)(\s*)', bygroups(Text,Punctuation,Text), 'graph'),
(r'(\s*)(})(\s*)', bygroups(Text,Punctuation,Text), '#pop'),
],
'groupgraph':[
(r'(\s*)(UNION)(\s*)({)(\s*)', bygroups(Text, Keyword, Text, Punctuation, Text), '#push'),
(r'(\s*)({)(\s*)',bygroups(Text, Punctuation, Text), '#push'),
include('graph'),
include('root'),
(r'', Text, '#pop'),
],
'graph':[
(r'(\s*)(<[^>]*\>)', bygroups(Text, Name.Class), ('triple','predObj')),
(r'(\s*[a-zA-Z_0-9\-]*:[a-zA-Z0-9\-_]*\s)', Name.Class, ('triple','predObj')),
(r'(\s*\?[a-zA-Z0-9_-]*)', Name.Variable, ('triple','predObj')),
(r'\s*\[\]\s*', Name.Class, ('triple','predObj')),
(r'(\s*)(FILTER)(\s*)',bygroups(Text, Keyword,Text),'filterConstraint'),
(r'(\s*)(BIND)(\s*)(\()(\s*)',bygroups(Text, Keyword, Text, Punctuation, Text),'bindgraph'),
(r'(\s*)(OPTIONAL)(\s*)({)',bygroups(Text, Keyword, Text, Punctuation), '#push'),
(r'(\s*)(})(\s*)(\.)(\s*)', bygroups(Text, Punctuation, Text, Punctuation, Text), '#pop'),
(r'(\s*)(})', bygroups(Text, Punctuation), '#pop'),
(r'(\s*)(\.)(\s*)', bygroups(Text, Punctuation, Text), '#pop'),
],
'triple' : [
(r'(?=\s*})', Text, '#pop'),
(r'(\s*)(\.)(\s*)', bygroups(Text, Punctuation, Text), '#pop'),
],
'predObj': [
include('comments'),
(r'(\s*\?[a-zA-Z0-9_-]*\b\s*)', Name.Variable,'object'),
(r'(\s*[a-zA-Z_:][a-zA-Z0-9\-_:]*\b\s*)', Operator, 'object'),
(r'\s*(<[^>]*\>)', Operator, 'object'),
(r'\s*\]\s*', Text, '#pop'),
(r'(?=\s*\.\s*)', Keyword, '#pop'),
],
'objList': [
(r'(\s*)(\))', bygroups(Text, Punctuation), '#pop'),
include('object'),
],
'object': [
include('variable'),
(r'\s*\[', Text, 'predObj'),
(r'\s*<[^> ]*>', Name.Attribute),
(r'(\s*)("""(?:.|\n)*?""")(\@[a-z]{2-4}|\^\^<?[a-zA-Z0-9\-\:_#/\.]*>?)?\s*', bygroups(Text, Literal.String,Text)),
(r'\s*".*?[^\\]"(?:\@[a-z]{2-4}|\^\^<?[a-zA-Z0-9\-\:_#/\.]*>?)?\s*', Literal.String),
(r'(\s*)((?:[+-])?\d+\.?\d*)(\s*)', bygroups(Text, Number, Text)),
(r'\s*[a-zA-Z0-9\-_\:]+\s*', Name.Attribute),
(r'(\s*)(\()', bygroups(Text, Punctuation), 'objList'),
(r',', Punctuation),
(r'(\s*)(;)(\s*)', bygroups(Text, Punctuation, Text), '#pop'),
(r'(?=\])', Text, '#pop'),
(r'\s*(?=\.)', Text, '#pop'),
],
'variable':[
(r'(\?[a-zA-Z0-9\-_]+\s*)', Name.Variable),
],
'filterConstraint':[
include('filterBuiltin'),
(r'(\s*)(\()(\s*)', bygroups(Text, Punctuation, Text), 'filterExp'),
(r'(\s*)(\.)(\s*)', bygroups(Text, Punctuation, Text), '#pop'),
],
#filterBuiltin is intended to be included, not pushed
'filterBuiltin':[
include('aggregate'),
(r'(str|lang|langmates|datatype|bound|iri|uri|bnode)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(abs|ceil|floor|round)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(strlen|ucase|lcase|encode_for_uri|contains|strstarts|strends|strbefore|strafter)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(year|month|day|hours|minutes|seconds|timezone|tz)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(md5|sha1|sha256|sha384|sha512)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(if|strlang|strdt)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(sameterm|isIRI|isURI|isBlank|isLiteral|isNumeric)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
(r'(regex)(\s*)(\()', bygroups(Name.Builtin, Text, Punctuation), 'objList'),
],
# aggregate is intended to be included, not pushed
'aggregate':[
(r'(\s*)(COUNT)(\s*)(\()(\s*)(DISTINCT)?(\s*)(\*)(\s*)',
bygroups(Text, Keyword, Text, Punctuation, Text, Keyword, Text, Keyword, Text)),
(r'(\s*)(COUNT|SUM|MIN|MAX|AVG|SAMPLE)(\s*)(\()(\s*)(DISTINCT)?(\s*)',
bygroups(Text, Keyword, Text, Punctuation, Text, Keyword, Text), 'filterExp'),
(r'(\s*)(GROUP_CONCAT)(\s*)(\()(\s*)(DISTINCT)?(\s*)',
bygroups(Text, Keyword, Text, Punctuation, Text, Keyword, Text), 'groupConcatExp'),
],
'groupConcatExp':[
(r'(\s*)(;)(\s*)(SEPARATOR)(\s*)(=)(\s*)',
bygroups(Text, Punctuation, Text, Keyword, Text, Operator, Text), 'string'),
include('filterExp'),
],
'filterExp':[
include('filterBuiltin'),
(r'(\s*)(\()(\s*)', bygroups(Text, Punctuation, Text), '#push'),
include('variable'),
include('object'),
(r'\s*[+*/<>=~!%&|-]+\s*', Operator),
(r'(\s*)(\))', bygroups(Text, Punctuation), '#pop'),
],
'bindgraph':[
(r'(\s*)(\()(\s*)', bygroups(Text, Punctuation, Text), '#push'),
(r'\s*AS\s*', Keyword),
(r'(\s*)(IRI)(\s*)(\()(\s*)',bygroups(Text, Keyword, Text, Punctuation, Text),'iri'),
(r'(\s*)(\))(\s*)', bygroups(Text, Punctuation, Text), '#pop'),
include('filterExp'),
include('variable'),
include('object'),
(r'', Text, '#pop'),
],
'iri':[
include('object'),
(r'(\s*)(\))', bygroups(Text, Punctuation), '#pop'),
],
'string':[
(r'(\s*)("""(?:.|\n)*?""")(\@[a-z]{2-4}|\^\^<?[a-zA-Z0-9\-\:_#/\.]*>?)?\s*', bygroups(Text,Literal.String,Text), '#pop'),
(r'\s*".*?[^\\]"(?:\@[a-z]{2-4}|\^\^<?[a-zA-Z0-9\-\:_#/\.]*>?)?\s*', Literal.String, '#pop'),
],
}
| gniezen/n3pygments | swlexers/__init__.py | Python | bsd-2-clause | 13,819 |
# -*- coding: utf-8 -*-
from flask import render_template
from .. import lastuser_ui
@lastuser_ui.route('/')
def index():
return render_template('index.html.jinja2')
| hasgeek/lastuser | lastuser_ui/views/index.py | Python | bsd-2-clause | 174 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('eventlog', '0002_auto_20170522_1134'),
]
operations = [
migrations.AddField(
model_name='fieldsightlog',
name='source',
field=models.ForeignKey(related_name='log', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterField(
model_name='fieldsightlog',
name='type',
field=models.IntegerField(default=0, choices=[(0, b'USER'), (1, b'FORM'), (2, b'SUBMISSION'), (3, b'Site')]),
),
]
| awemulya/fieldsight-kobocat | onadata/apps/eventlog/migrations/0003_auto_20170522_1154.py | Python | bsd-2-clause | 783 |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_rolling_median():
series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6
out_series = series.rolling(3).median()
return out_series # Expect series of NaN, NaN, 4.0, 3.0, 5.0
print(series_rolling_median())
| IntelLabs/hpat | examples/series/rolling/series_rolling_median.py | Python | bsd-2-clause | 1,804 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import math
import numpy as np
from scipy.interpolate import interp1d
def _avgdiff(x):
dx = np.diff(x)
dx2 = np.zeros_like(x)
dx2[0], dx2[-1] = dx[0], dx[-1]
dx2[1:-1] = 0.5 * (dx[1:] + dx[:-1])
return dx2
def rebalanced_grid(
grid, err, base=0.25, num=None, resolution_factor=10, smooth_fact=1.0
):
if num is None:
num = grid.size
dx = np.diff(grid)
area_err = 0.5 * np.dot(err[1:] + err[:-1], dx) # trapezoidal rule
dx2 = _avgdiff(grid)
def smooth_err(x):
tot = 0
for i, (gx, e) in enumerate(zip(grid, err)):
fwhm = dx2[i] * smooth_fact
tot += e * np.exp(-((x - gx) ** 2) / (2 * (fwhm / 2.35482) ** 2))
return tot
finegrid = np.zeros((grid.size - 1) * resolution_factor + 1)
for i in range(grid.size - 1):
finegrid[i * resolution_factor : (i + 1) * resolution_factor] = np.linspace(
grid[i], grid[i + 1], resolution_factor + 1
)[:-1]
finegrid[-resolution_factor - 1 :] = np.linspace(
grid[-2], grid[-1], resolution_factor + 1
)
smoothed = smooth_err(finegrid) + base * area_err / (grid[-1] - grid[0])
assert np.all(smoothed > 0)
assert np.all(_avgdiff(finegrid) > 0)
interr = np.cumsum(smoothed * _avgdiff(finegrid))
cb = interp1d(interr, finegrid)
return cb(np.linspace(interr[0], interr[-1], num))
def pre_pruning_mask(grid, rtol=1e-12, atol=0.0):
"""Returns a mask for grid pruning.
Any grid spacing smaller than ``rtol*gridvalue + atol`` will
be pruned. In general the value on the right is removed unless it is
the last point in the grid.
Parameters
----------
grid : array
rtol : float
atol : float
Returns
-------
NumPy array of ``numpy.bool_`` (to be used as mask).
"""
if np.any(np.diff(grid) < 0):
raise ValueError("grid needs to be monotonic")
limit = grid[-1] - (atol + abs(rtol * grid[-1]))
mask = np.empty(grid.size, dtype=np.bool_)
mask[grid.size - 1] = True # rightmost point included
for ridx in range(grid.size - 2, -1, -1):
if grid[ridx] < limit:
mask[ridx] = True
break
else:
mask[ridx] = False
else:
raise ValueError("no grid-points left")
mask[0] = True # leftmost point included
limit = grid[0] + abs(rtol * grid[0]) + atol
for idx in range(1, ridx):
if grid[idx] < limit:
mask[idx] = False
else:
mask[idx] = True
limit = grid[idx] + abs(rtol * grid[idx]) + atol
return mask
def combine_grids(grids, **kwargs):
"""Combines multiple grids and prunes them using pre_pruning mask
Parameters
----------
grids : iterable of array_like grids
\\*\\* : dict
Keyword arguments passed on to pre_pruning_mask
Returns
-------
Strictly increasing monotonic array
"""
supergrid = np.sort(np.concatenate(grids))
mask = pre_pruning_mask(supergrid, **kwargs)
return supergrid[mask]
def grid_pruning_mask(grid, err, ndrop=None, protect_sparse=None, pow_err=2, pow_dx=2):
"""Returns a mask for grid pruning.
Parameters
----------
grid : array
err : array
ndrop : int
If not provided taken as 25% of grid size (rounded upward).
protect_sparse : int
If not provided taken as 25% of grid size (rounded upward).
pow_err : number
Exponent of error in weighting.
pow_dx : number
Exponent of grid spacing in weighting.
"""
if ndrop is None:
ndrop = math.ceil(grid.size * 0.25)
if protect_sparse is None:
protect_sparse = math.ceil(grid.size * 0.25)
dx = _avgdiff(grid)
protected = np.argsort(dx)[-protect_sparse:]
score = err ** pow_err * dx ** pow_dx
importance = np.argsort(score)
drop = []
for considered in importance:
if considered in protected:
continue
if considered - 1 in drop or considered + 1 in drop:
continue
drop.append(considered)
if len(drop) == ndrop:
break
return ~np.in1d(np.arange(grid.size), drop)
| bjodah/finitediff | finitediff/grid/rebalance.py | Python | bsd-2-clause | 4,269 |
from distutils.core import setup
setup(
name='captcha2upload',
packages=['captcha2upload'],
package_dir={'captcha2upload': 'src/captcha2upload'},
version='0.2',
install_requires=['requests'],
description='Upload your image and solve captche using the 2Captcha '
'Service',
author='Alessandro Sbarbati',
author_email='[email protected]',
url='https://github.com/Mirio/captcha2upload',
download_url='https://github.com/Mirio/captcha2upload/tarball/0.1',
keywords=['2captcha', 'captcha', 'Image Recognition'],
classifiers=["Topic :: Scientific/Engineering :: Image Recognition"],
)
| Mirio/captcha2upload | setup.py | Python | bsd-2-clause | 647 |
'''
Created on 10 August 2014
@author: vincent
'''
# Loading necessary packages
import numpy as np
import sys
from seizures.data.DataLoader_v2 import DataLoader
from seizures.evaluation.XValidation import XValidation
from seizures.evaluation.performance_measures import accuracy, auc
from seizures.features.FeatureExtractBase import FeatureExtractBase
from seizures.features.MixFeatures import MixFeatures
from seizures.features.SEFeatures import SEFeatures
from seizures.features.StatsFeatures import StatsFeatures
from seizures.features.PLVFeatures import PLVFeatures
from seizures.features.ARFeatures import ARFeatures
from seizures.features.LyapunovFeatures import LyapunovFeatures
from seizures.prediction.ForestPredictor import ForestPredictor
from seizures.prediction.SVMPredictor import SVMPredictor
from seizures.prediction.XtraTreesPredictor import XtraTreesPredictor
from seizures.Global import Global
from sklearn.cross_validation import train_test_split
def Xval_on_single_patient(predictor_cls, feature_extractor, patient_name="Dog_1",preprocess=True):
"""
Single patient cross validation
Returns 2 lists of cross validation performances
:param predictor_cls:
:param feature_extractor
:param patient_name:
:return:
"""
# predictor_cls is a handle to an instance of PredictorBase
# Instantiate the predictor
predictor = predictor_cls()
base_dir = Global.path_map('clips_folder')
base_dir = '/nfs/data3/kaggle_seizure/clips/'
loader = DataLoader(base_dir, feature_extractor)
X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(patient_name,preprocess=preprocess)
#X_train,y_seizure, y_early = loader.training_data(patient_name)
#y_train = [y_seizure,y_early]
#X_list,y_list = train_test_split(X_train,y_train)
# running cross validation
print patient_name
print "\ncross validation: seizures vs not"
result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_seizure), np.std(result_seizure), result_seizure)
print "\ncross validation: early_vs_not"
result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_early), np.std(result_early), result_early)
return result_seizure,result_early
def Xval_on_patients(predictor_cls, feature_extractor, patients_list=['Dog_1'],preprocess=True):
''' Runs cross validation for given predictor class and feature instance on the given list of patients
INPUT:
- predictor_cls: a Predictor class (implement)
- feature_extractor: an instanciation of a Features class
- patients_list: a list of subject strings e.g., ['Dog_1', 'Patient_2']
'''
assert(isinstance(feature_extractor, FeatureExtractBase))
results_seizure = []
results_early = []
for patient_name in patients_list:
result_seizure, result_early = Xval_on_single_patient(predictor_cls, feature_extractor, patient_name, preprocess=preprocess)
results_seizure.append(result_seizure)
results_early.append(result_early)
avg_results_seizure = np.mean(np.array(results_seizure),axis=0)
avg_results_early = np.mean(np.array(results_early),axis=0)
print "\ncross validation: seizures vs not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_seizure), np.std(avg_results_seizure), avg_results_seizure)
print "\ncross validation: early_vs_not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_early), np.std(avg_results_early), avg_results_early)
return avg_results_seizure, avg_results_early
# generate prediction for test data
def main():
# code run at script launch
#patient_name = sys.argv[1]
# There are Dog_[1-4] and Patient_[1-8]
patients_list = ["Dog_%d" % i for i in range(1, 5)] + ["Patient_%d" % i for i in range(1, 9)]
patients_list = ["Dog_%d" % i for i in [1]] #["Patient_%d" % i for i in range(1, 9)]#++
#feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}}])
#feature_extractor = PLVFeatures()
#feature_extractor = MixFeatures([{'name':"PLVFeatures",'args':{}},{'name':"ARFeatures",'args':{}}])
#feature_extractor = ARFeatures()
feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}},{'name':"PLVFeatures",'args':{}},{'name':'SEFeatures','args':{}}])
#feature_extractor = SEFeatures()
#feature_extractor = LyapunovFeatures()
#feature_extractor = StatsFeatures()
preprocess = True
predictor = SVMPredictor
#predictor = XtraTreesPredictor
if preprocess==True:
print 'Preprocessing ON'
else:
print 'Preprocessing OFF'
print 'predictor: ',predictor
Xval_on_patients(predictor,feature_extractor, patients_list,preprocess=preprocess)
if __name__ == '__main__':
main()
| vincentadam87/gatsby-hackathon-seizure | code/python/seizures/examples/cross_validation_test.py | Python | bsd-2-clause | 5,173 |
"""SCons.Tool.sgiar
Tool-specific initialization for SGI ar (library archive). If CC
exists, static libraries should be built with it, so the prelinker has
a chance to resolve C++ template instantiations.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgiar.py 5023 2010/06/14 22:05:46 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
if env.Detect('CC'):
env['AR'] = 'CC'
env['ARFLAGS'] = SCons.Util.CLVar('-ar')
env['ARCOM'] = '$AR $ARFLAGS -o $TARGET $SOURCES'
else:
env['AR'] = 'ar'
env['ARFLAGS'] = SCons.Util.CLVar('r')
env['ARCOM'] = '$AR $ARFLAGS $TARGET $SOURCES'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = '$SHLINK $SHLINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
def exists(env):
return env.Detect('CC') or env.Detect('ar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| kerwinxu/barcodeManager | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/sgiar.py | Python | bsd-2-clause | 2,644 |
#!/usr/bin/env python3
# coding:utf-8
import os
import sys
current_path = os.path.dirname(os.path.abspath(__file__))
helper_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, os.pardir, 'data', 'launcher', 'helper'))
if __name__ == "__main__":
default_path = os.path.abspath(os.path.join(current_path, os.pardir))
noarch_lib = os.path.abspath(os.path.join(default_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
osx_lib = os.path.join(default_path, 'lib', 'darwin')
sys.path.append(osx_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/3.8/Extras/lib/python/PyObjC"
sys.path.append(extra_lib)
from config import config
import module_init
import subprocess
import webbrowser
from xlog import getLogger
xlog = getLogger("launcher")
import AppKit
import SystemConfiguration
from PyObjCTools import AppHelper
class MacTrayObject(AppKit.NSObject):
def __init__(self):
pass
def applicationDidFinishLaunching_(self, notification):
setupHelper()
loadConfig()
self.setupUI()
self.registerObserver()
def setupUI(self):
self.statusbar = AppKit.NSStatusBar.systemStatusBar()
self.statusitem = self.statusbar.statusItemWithLength_(
AppKit.NSSquareStatusItemLength) # NSSquareStatusItemLength #NSVariableStatusItemLength
# Set initial image icon
icon_path = os.path.join(current_path, "web_ui", "favicon-mac.ico")
image = AppKit.NSImage.alloc().initByReferencingFile_(icon_path)
image.setScalesWhenResized_(True)
image.setSize_((20, 20))
self.statusitem.setImage_(image)
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
self.statusitem.setToolTip_("XX-Net")
# Get current selected mode
proxyState = getProxyState(currentService)
# Build a very simple menu
self.menu = AppKit.NSMenu.alloc().initWithTitle_('XX-Net')
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Config', 'config:', '')
self.menu.addItem_(menuitem)
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(getCurrentServiceMenuItemTitle(), None, '')
self.menu.addItem_(menuitem)
self.currentServiceMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Auto GAEProxy',
'enableAutoProxy:', '')
if proxyState == 'pac':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.autoGaeProxyMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global GAEProxy',
'enableGlobalProxy:', '')
if proxyState == 'gae':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalGaeProxyMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global X-Tunnel',
'enableGlobalXTunnel:', '')
if proxyState == 'x_tunnel':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalXTunnelMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global Smart-Router',
'enableGlobalSmartRouter:', '')
if proxyState == 'smart_router':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalSmartRouterMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Disable GAEProxy', 'disableProxy:',
'')
if proxyState == 'disable':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.disableGaeProxyMenuItem = menuitem
# Reset Menu Item
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Reset Each Module',
'restartEachModule:', '')
self.menu.addItem_(menuitem)
# Default event
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'windowWillClose:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
# Hide dock icon
AppKit.NSApp.setActivationPolicy_(AppKit.NSApplicationActivationPolicyProhibited)
def updateStatusBarMenu(self):
self.currentServiceMenuItem.setTitle_(getCurrentServiceMenuItemTitle())
# Remove Tick before All Menu Items
self.autoGaeProxyMenuItem.setState_(AppKit.NSOffState)
self.globalGaeProxyMenuItem.setState_(AppKit.NSOffState)
self.globalXTunnelMenuItem.setState_(AppKit.NSOffState)
self.globalSmartRouterMenuItem.setState_(AppKit.NSOffState)
self.disableGaeProxyMenuItem.setState_(AppKit.NSOffState)
# Get current selected mode
proxyState = getProxyState(currentService)
# Update Tick before Menu Item
if proxyState == 'pac':
self.autoGaeProxyMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'gae':
self.globalGaeProxyMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'x_tunnel':
self.globalXTunnelMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'smart_router':
self.globalSmartRouterMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'disable':
self.disableGaeProxyMenuItem.setState_(AppKit.NSOnState)
# Trigger autovalidation
self.menu.update()
def validateMenuItem_(self, menuItem):
return currentService or (menuItem != self.autoGaeProxyMenuItem and
menuItem != self.globalGaeProxyMenuItem and
menuItem != self.globalXTunnelMenuItem and
menuItem != self.globalSmartRouterMenuItem and
menuItem != self.disableGaeProxyMenuItem)
def presentAlert_withTitle_(self, msg, title):
self.performSelectorOnMainThread_withObject_waitUntilDone_('presentAlertWithInfo:', [title, msg], True)
return self.alertReturn
def presentAlertWithInfo_(self, info):
alert = AppKit.NSAlert.alloc().init()
alert.setMessageText_(info[0])
alert.setInformativeText_(info[1])
alert.addButtonWithTitle_("OK")
alert.addButtonWithTitle_("Cancel")
self.alertReturn = alert.runModal() == AppKit.NSAlertFirstButtonReturn
def registerObserver(self):
nc = AppKit.NSWorkspace.sharedWorkspace().notificationCenter()
nc.addObserver_selector_name_object_(self, 'windowWillClose:', AppKit.NSWorkspaceWillPowerOffNotification, None)
def windowWillClose_(self, notification):
executeResult = subprocess.check_output(['networksetup', '-listallnetworkservices'])
services = executeResult.split(b'\n')
services = [service for service in services if service and service.find(b'*') == -1 and getProxyState(
service) != 'disable'] # Remove disabled services and empty lines
if len(services) > 0:
try:
list(map(helperDisableAutoProxy, services))
list(map(helperDisableGlobalProxy, services))
except:
disableAutoProxyCommand = ';'.join(map(getDisableAutoProxyCommand, services))
disableGlobalProxyCommand = ';'.join(map(getDisableGlobalProxyCommand, services))
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, disableGlobalProxyCommand)
xlog.info("try disable proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
module_init.stop_all()
os._exit(0)
AppKit.NSApp.terminate_(self)
def config_(self, notification):
host_port = config.control_port
webbrowser.open_new("http://127.0.0.1:%s/" % host_port)
def restartEachModule_(self, _):
module_init.stop_all()
module_init.start_all_auto()
def enableAutoProxy_(self, _):
try:
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
except:
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
enableAutoProxyCommand = getEnableAutoProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableGlobalProxyCommand, enableAutoProxyCommand)
xlog.info("try enable auto proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "pac"
config.save()
self.updateStatusBarMenu()
def enableGlobalProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableGlobalProxyCommand = getEnableGlobalProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, enableGlobalProxyCommand)
xlog.info("try enable global proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "gae"
config.save()
self.updateStatusBarMenu()
def enableGlobalXTunnel_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableXTunnelProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableXTunnelProxyCommand = getEnableXTunnelProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, enableXTunnelProxyCommand)
xlog.info("try enable global x-tunnel proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "x_tunnel"
config.save()
self.updateStatusBarMenu()
def enableGlobalSmartRouter_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableSmartRouterProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableSmartRouterCommand = getEnableSmartRouterProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, enableSmartRouterCommand)
xlog.info("try enable global smart-router proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "smart_router"
config.save()
self.updateStatusBarMenu()
def disableProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (
disableAutoProxyCommand, disableGlobalProxyCommand)
xlog.info("try disable proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.os_proxy_mode = "disable"
config.save()
self.updateStatusBarMenu()
def setupHelper():
try:
with open(os.devnull) as devnull:
subprocess.check_call(helper_path, stderr=devnull)
except:
rmCommand = "rm \\\"%s\\\"" % helper_path
cpCommand = "cp \\\"%s\\\" \\\"%s\\\"" % (os.path.join(current_path, 'mac_helper'), helper_path)
chownCommand = "chown root \\\"%s\\\"" % helper_path
chmodCommand = "chmod 4755 \\\"%s\\\"" % helper_path
executeCommand = 'do shell script "%s;%s;%s;%s" with administrator privileges' % (
rmCommand, cpCommand, chownCommand, chmodCommand)
xlog.info("try setup helper:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
def getCurrentServiceMenuItemTitle():
if currentService:
return 'Connection: %s' % currentService
else:
return 'Connection: None'
def getProxyState(service):
if not service:
return
# Check if auto proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getautoproxyurl', service])
if (executeResult.find(b'http://127.0.0.1:8086/proxy.pac\nEnabled: Yes') != -1):
return "pac"
# Check if global proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getwebproxy', service])
if (executeResult.find(b'Enabled: Yes\nServer: 127.0.0.1\nPort: 8087') != -1):
return "gae"
# Check if global proxy is enabled
if (executeResult.find(b'Enabled: Yes\nServer: 127.0.0.1\nPort: 1080') != -1):
return "x_tunnel"
if (executeResult.find(b'Enabled: Yes\nServer: 127.0.0.1\nPort: 8086') != -1):
return "smart_router"
return "disable"
# Generate commands for Apple Script
def getEnableAutoProxyCommand(service):
return "networksetup -setautoproxyurl \\\"%s\\\" \\\"http://127.0.0.1:8086/proxy.pac\\\"" % service
def getDisableAutoProxyCommand(service):
return "networksetup -setautoproxystate \\\"%s\\\" off" % service
def getEnableGlobalProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8087" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8087" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getEnableXTunnelProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 1080" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 1080" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getEnableSmartRouterProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8086" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8086" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getDisableGlobalProxyCommand(service):
disableHttpProxyCommand = "networksetup -setwebproxystate \\\"%s\\\" off" % service
disableHttpsProxyCommand = "networksetup -setsecurewebproxystate \\\"%s\\\" off" % service
return "%s;%s" % (disableHttpProxyCommand, disableHttpsProxyCommand)
# Call helper
def helperEnableAutoProxy(service):
subprocess.check_call([helper_path, 'enableauto', service, 'http://127.0.0.1:8086/proxy.pac'])
def helperDisableAutoProxy(service):
subprocess.check_call([helper_path, 'disableauto', service])
def helperEnableGlobalProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8087'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8087'])
def helperEnableXTunnelProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '1080'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '1080'])
def helperEnableSmartRouterProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8086'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8086'])
def helperDisableGlobalProxy(service):
subprocess.check_call([helper_path, 'disablehttp', service])
subprocess.check_call([helper_path, 'disablehttps', service])
def loadConfig():
if not currentService:
return
proxy_setting = config.os_proxy_mode
if getProxyState(currentService) == proxy_setting:
return
try:
if proxy_setting == "pac":
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
elif proxy_setting == "gae":
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
elif proxy_setting == "x_tunnel":
helperDisableAutoProxy(currentService)
helperEnableXTunnelProxy(currentService)
elif proxy_setting == "smart_router":
helperDisableAutoProxy(currentService)
helperEnableSmartRouterProxy(currentService)
elif proxy_setting == "disable":
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
else:
xlog.warn("proxy_setting:%r", proxy_setting)
except:
xlog.warn("helper failed, please manually reset proxy settings after switching connection")
sys_tray = MacTrayObject.alloc().init()
currentService = None
def fetchCurrentService(protocol):
global currentService
status = SystemConfiguration.SCDynamicStoreCopyValue(None, "State:/Network/Global/" + protocol)
if not status:
currentService = None
return
serviceID = status['PrimaryService']
service = SystemConfiguration.SCDynamicStoreCopyValue(None, "Setup:/Network/Service/" + serviceID)
if not service:
currentService = None
return
currentService = service['UserDefinedName']
@AppKit.objc.callbackFor(AppKit.CFNotificationCenterAddObserver)
def networkChanged(center, observer, name, object, userInfo):
fetchCurrentService('IPv4')
loadConfig()
sys_tray.updateStatusBarMenu()
# Note: the following code can't run in class
def serve_forever():
app = AppKit.NSApplication.sharedApplication()
app.setDelegate_(sys_tray)
# Listen for network change
nc = AppKit.CFNotificationCenterGetDarwinNotifyCenter()
AppKit.CFNotificationCenterAddObserver(nc, None, networkChanged, "com.apple.system.config.network_change", None,
AppKit.CFNotificationSuspensionBehaviorDeliverImmediately)
fetchCurrentService('IPv4')
AppHelper.runEventLoop()
def on_quit(widget=None, data=None):
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
def main():
serve_forever()
if __name__ == '__main__':
main()
| xyuanmu/XX-Net | code/default/launcher/mac_tray.py | Python | bsd-2-clause | 19,146 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: db_ext_split.py <src> <dst> <prob>
Options:
-h --help
"""
import os
import cv2
from glob import glob
from docopt import docopt
from mscr.split import Split, RandomSplitPredicate
from mscr.util import Crop
from mscr.data import MyProgressBar
PAD = 8
if __name__ == '__main__':
args = docopt(__doc__)
src = args['<src>']
dst = args['<dst>']
prob = float(args['<prob>'])
split = Split(RandomSplitPredicate(p=prob))
crop = Crop()
count = 0
if os.path.exists(src) and os.path.exists(dst):
filz = glob(os.path.join(src, '*.jpg'))
pbar = MyProgressBar(len(filz), 'extending db:')
for im in filz:
img = cv2.imread(im)
img = crop.run(img)
for bl in split.run(img):
out = os.path.join(dst, str(count).zfill(PAD) + '.jpg')
cv2.imwrite(out, bl.img)
count += 1
pbar.update()
pbar.finish()
else:
print 'err: dimstat.py: path doesn\'t exists'
| fpeder/mscr | bin/db_ext_split.py | Python | bsd-2-clause | 1,070 |
from biokit.rtools import package
import pytest
try:
import create_dummy_package as dun
except:
from . import create_dummy_package as dun
def test_install_packages():
d = dun.CreateDummy()
d()
package.install_package('./dummy/dummytest_1.0.0.tar.gz', verbose=True)
d._clean()
def test_install_packages():
package.install_package("truncnorm")
def test_get_r_version():
package.get_R_version()
def test_pm():
pm = package.RPackageManager()
pm.packages
pm.installed
pm.available
pm.get_package_version('base')
try:
pm.get_package_version('whatever_is_not_installed')
assert False
except:
assert True
pm.remove('truncnorm')
pm.biocLite('truncnorm')
pm.biocLite(['truncnorm'])
pm.biocLite(None)
d = dun.CreateDummy()
d()
pm.install('dummy/dummytest_1.0.0.tar.gz')
pm.remove('dummytest')
d._clean()
pm.remove('truncnorm')
pm.install('truncnorm')
pm.install('truncnorm') # trying again with required version
| biokit/biokit | test/rtools/test_package.py | Python | bsd-2-clause | 1,053 |
"""
@package mi.instrument.nortek.aquadopp.ooicore.test.test_driver
@author Rachel Manoni
@brief Test cases for ooicore driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u
$ bin/test_driver -i
$ bin/test_driver -q
* From pyon
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore -a UNIT
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore -a INT
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore -a QUAL
"""
__author__ = 'Rachel Manoni, Ronald Ronquillo'
__license__ = 'Apache 2.0'
import time
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.instrument.nortek.vector.ooicore.test.test_driver import bad_sample
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey
from mi.instrument.nortek.test.test_driver import DriverTestMixinSub
from mi.core.instrument.instrument_driver import DriverConfigKey, ResourceAgentState
from mi.core.instrument.data_particle import DataParticleKey, DataParticleValue
from mi.core.instrument.chunker import StringChunker
from mi.core.exceptions import SampleException
from mi.instrument.nortek.aquadopp.ooicore.driver import NortekDataParticleType
from mi.instrument.nortek.aquadopp.ooicore.driver import AquadoppDwVelocityDataParticle
from mi.instrument.nortek.aquadopp.ooicore.driver import AquadoppDwVelocityDataParticleKey
from mi.instrument.nortek.test.test_driver import NortekUnitTest, NortekIntTest, NortekQualTest, user_config2
from mi.instrument.nortek.driver import ProtocolState, ProtocolEvent, TIMEOUT, Parameter, NortekEngIdDataParticleKey, \
NortekInstrumentProtocol, NEWLINE, EngineeringParameter
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.nortek.aquadopp.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='nortek_aquadopp_dw_ooicore',
instrument_agent_name='nortek_aquadopp_dw_ooicore_agent',
instrument_agent_packet_config=NortekDataParticleType(),
driver_startup_config={
DriverConfigKey.PARAMETERS: {
Parameter.DEPLOYMENT_NAME: 'test',
Parameter.COMMENTS: 'this is a test',
#update the following two parameters to allow for faster collecting of samples during testing
Parameter.AVG_INTERVAL: 1,
Parameter.MEASUREMENT_INTERVAL: 1}}
)
def eng_id_sample():
sample_as_hex = "415144"
return sample_as_hex.decode('hex')
eng_id_particle = [{DataParticleKey.VALUE_ID: NortekEngIdDataParticleKey.ID, DataParticleKey.VALUE: "AQD 8493 "}]
def velocity_sample():
sample_as_hex = "a5011500101926221211000000009300f83b810628017f01002d0000e3094c0122ff9afe1e1416006093"
return sample_as_hex.decode('hex')
velocity_particle = [{'value_id': AquadoppDwVelocityDataParticleKey.TIMESTAMP, 'value': '26/11/2012 22:10:19'},
{'value_id': AquadoppDwVelocityDataParticleKey.ERROR, 'value': 0},
{'value_id': AquadoppDwVelocityDataParticleKey.ANALOG1, 'value': 0},
{'value_id': AquadoppDwVelocityDataParticleKey.BATTERY_VOLTAGE, 'value': 147},
{'value_id': AquadoppDwVelocityDataParticleKey.SOUND_SPEED_ANALOG2, 'value': 15352},
{'value_id': AquadoppDwVelocityDataParticleKey.HEADING, 'value': 1665},
{'value_id': AquadoppDwVelocityDataParticleKey.PITCH, 'value': 296},
{'value_id': AquadoppDwVelocityDataParticleKey.ROLL, 'value': 383},
{'value_id': AquadoppDwVelocityDataParticleKey.STATUS, 'value': 45},
{'value_id': AquadoppDwVelocityDataParticleKey.PRESSURE, 'value': 0},
{'value_id': AquadoppDwVelocityDataParticleKey.TEMPERATURE, 'value': 2531},
{'value_id': AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM1, 'value': 332},
{'value_id': AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM2, 'value': -222},
{'value_id': AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM3, 'value': -358},
{'value_id': AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM1, 'value': 30},
{'value_id': AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM2, 'value': 20},
{'value_id': AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM3, 'value': 22}]
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python, mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class AquadoppDriverTestMixinSub(DriverTestMixinSub):
"""
Mixin class used for storing data particle constance and common data assertion methods.
"""
#Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
#this particle can be used for both the velocity particle and the diagnostic particle
_sample_velocity_diagnostic = {
AquadoppDwVelocityDataParticleKey.TIMESTAMP: {TYPE: unicode, VALUE: '', REQUIRED: True},
AquadoppDwVelocityDataParticleKey.ERROR: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.ANALOG1: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.BATTERY_VOLTAGE: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.SOUND_SPEED_ANALOG2: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.HEADING: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.PITCH: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.ROLL: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.PRESSURE: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.TEMPERATURE: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM1: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM2: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM3: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM1: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM2: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM3: {TYPE: int, VALUE: 0, REQUIRED: True}
}
def assert_particle_velocity(self, data_particle, verify_values=False):
"""
Verify velpt_velocity_data
@param data_particle AquadoppDwVelocityDataParticleKey data particle
@param verify_values bool, should we verify parameter values
"""
self.assert_data_particle_keys(AquadoppDwVelocityDataParticleKey, self._sample_velocity_diagnostic)
self.assert_data_particle_header(data_particle, NortekDataParticleType.VELOCITY)
self.assert_data_particle_parameters(data_particle, self._sample_velocity_diagnostic, verify_values)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(NortekUnitTest):
def setUp(self):
NortekUnitTest.setUp(self)
def test_driver_enums(self):
"""
Verify driver specific enums have no duplicates
Base unit test driver will test enums specific for the base class.
"""
self.assert_enum_has_no_duplicates(NortekDataParticleType())
def test_velocity_sample_format(self):
"""
Verify driver can get velocity sample data out in a reasonable format.
Parsed is all we care about...raw is tested in the base DataParticle tests
"""
port_timestamp = 3555423720.711772
driver_timestamp = 3555423722.711772
# construct the expected particle
expected_particle = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.STREAM_NAME: NortekDataParticleType.VELOCITY,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: driver_timestamp,
DataParticleKey.PREFERRED_TIMESTAMP: DataParticleKey.PORT_TIMESTAMP,
DataParticleKey.QUALITY_FLAG: DataParticleValue.OK,
DataParticleKey.VALUES: velocity_particle}
self.compare_parsed_data_particle(AquadoppDwVelocityDataParticle, velocity_sample(), expected_particle)
def test_chunker(self):
"""
Verify the chunker can parse each sample type
1. complete data structure
2. fragmented data structure
3. combined data structure
4. data structure with noise
"""
chunker = StringChunker(NortekInstrumentProtocol.sieve_function)
self.assert_chunker_sample(chunker, velocity_sample())
self.assert_chunker_fragmented_sample(chunker, velocity_sample())
self.assert_chunker_combined_sample(chunker, velocity_sample())
self.assert_chunker_sample_with_noise(chunker, velocity_sample())
def test_corrupt_data_structures(self):
"""
Verify when generating the particle, if the particle is corrupt, an exception is raised
"""
particle = AquadoppDwVelocityDataParticle(bad_sample(), port_timestamp=3558720820.531179)
with self.assertRaises(SampleException):
particle.generate()
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class IntFromIDK(NortekIntTest, AquadoppDriverTestMixinSub):
def setUp(self):
NortekIntTest.setUp(self)
def test_acquire_sample(self):
"""
Verify acquire sample command and events.
1. initialize the instrument to COMMAND state
2. command the driver to ACQUIRE SAMPLE
3. verify the particle coming in
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(NortekDataParticleType.VELOCITY, self.assert_particle_velocity, timeout=TIMEOUT)
def test_command_autosample(self):
"""
Verify autosample command and events.
1. initialize the instrument to COMMAND state
2. command the instrument to AUTOSAMPLE state
3. verify the particle coming in and the sampling is continuous (gather several samples)
4. stop AUTOSAMPLE
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(NortekDataParticleType.VELOCITY, self.assert_particle_velocity,
particle_count=4, timeout=TIMEOUT)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
def test_parameters(self):
"""
Verify that we can set the parameters
1. Cannot set read only parameters
2. Can set read/write parameters
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
#test read/write parameter
self.assert_set(Parameter.BLANKING_DISTANCE, 50)
self.assert_set(Parameter.TIMING_CONTROL_REGISTER, 131)
self.assert_set(Parameter.COMPASS_UPDATE_RATE, 2)
self.assert_set(Parameter.COORDINATE_SYSTEM, 1)
self.assert_set(Parameter.VELOCITY_ADJ_TABLE, 'bu0ePTk9Uz1uPYg9oj27PdQ97T0GPh4+Nj5OPmU+fT6TPqo+wD7WPuw+Aj8'
'XPyw/QT9VP2k/fT+RP6Q/uD/KP90/8D8CQBRAJkA3QElAWkBrQHxAjECcQK'
'xAvEDMQNtA6kD5QAhBF0ElQTNBQkFPQV1BakF4QYVBkkGeQatBt0HDQc9B20'
'HnQfJB/UEIQhNCHkIoQjNCPUJHQlFCW0JkQm5Cd0KAQolCkUKaQqJCqkKyQrpC',)
#these need to update simultaneously
#self.assert_set(Parameter.MEASUREMENT_INTERVAL, 61)
#self.assert_set(Parameter.AVG_INTERVAL, 61)
#test read only parameters (includes immutable, when not startup)
self.assert_set_exception(EngineeringParameter.CLOCK_SYNC_INTERVAL, '12:00:00')
self.assert_set_exception(EngineeringParameter.ACQUIRE_STATUS_INTERVAL, '12:00:00')
self.assert_set_exception(Parameter.TRANSMIT_PULSE_LENGTH, 20)
self.assert_set_exception(Parameter.TIME_BETWEEN_PINGS, 45)
self.assert_set_exception(Parameter.NUMBER_PINGS, 1)
self.assert_set_exception(Parameter.RECEIVE_LENGTH, 8)
self.assert_set_exception(Parameter.TIME_BETWEEN_BURST_SEQUENCES, 1)
self.assert_set_exception(Parameter.USER_NUMBER_BEAMS, 4)
self.assert_set_exception(Parameter.POWER_CONTROL_REGISTER, 1)
self.assert_set_exception(Parameter.A1_1_SPARE, 3)
self.assert_set_exception(Parameter.B0_1_SPARE, 1)
self.assert_set_exception(Parameter.B1_1_SPARE, 2)
self.assert_set_exception(Parameter.NUMBER_BINS, 2)
self.assert_set_exception(Parameter.BIN_LENGTH, 8)
self.assert_set_exception(Parameter.ADJUSTMENT_SOUND_SPEED, 16658)
self.assert_set_exception(Parameter.DEPLOYMENT_NAME, 'test')
self.assert_set_exception(Parameter.WRAP_MODE, 0)
self.assert_set_exception(Parameter.CLOCK_DEPLOY, 123)
self.assert_set_exception(Parameter.DIAGNOSTIC_INTERVAL, 10801)
self.assert_set_exception(Parameter.MODE, 49)
self.assert_set_exception(Parameter.NUMBER_SAMPLES_DIAGNOSTIC, 2)
self.assert_set_exception(Parameter.NUMBER_BEAMS_CELL_DIAGNOSTIC, 2)
self.assert_set_exception(Parameter.NUMBER_PINGS_DIAGNOSTIC, 2)
self.assert_set_exception(Parameter.MODE_TEST, 5)
self.assert_set_exception(Parameter.ANALOG_INPUT_ADDR, '123')
self.assert_set_exception(Parameter.SW_VERSION, 'blah')
self.assert_set_exception(Parameter.USER_1_SPARE, 23)
self.assert_set_exception(Parameter.COMMENTS, 'hello there')
self.assert_set_exception(Parameter.WAVE_MEASUREMENT_MODE, 3)
self.assert_set_exception(Parameter.DYN_PERCENTAGE_POSITION, 3)
self.assert_set_exception(Parameter.WAVE_TRANSMIT_PULSE,3 )
self.assert_set_exception(Parameter.WAVE_BLANKING_DISTANCE, 3)
self.assert_set_exception(Parameter.WAVE_CELL_SIZE, 3)
self.assert_set_exception(Parameter.NUMBER_DIAG_SAMPLES, 1)
self.assert_set_exception(Parameter.A1_2_SPARE, 6)
self.assert_set_exception(Parameter.B0_2_SPARE, 4)
self.assert_set_exception(Parameter.NUMBER_SAMPLES_PER_BURST, 4)
self.assert_set_exception(Parameter.USER_2_SPARE, 1)
self.assert_set_exception(Parameter.ANALOG_OUTPUT_SCALE, 234)
self.assert_set_exception(Parameter.CORRELATION_THRESHOLD, 1234)
self.assert_set_exception(Parameter.USER_3_SPARE, 1)
self.assert_set_exception(Parameter.TRANSMIT_PULSE_LENGTH_SECOND_LAG, 1)
self.assert_set_exception(Parameter.USER_4_SPARE, 1)
self.assert_set_exception(Parameter.QUAL_CONSTANTS, 'consts')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualFromIDK(NortekQualTest, AquadoppDriverTestMixinSub):
def setUp(self):
NortekQualTest.setUp(self)
def test_direct_access_telnet_mode(self):
"""
Verify the Instrument Driver properly supports direct access to the
physical instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
self.tcp_client.send_data("K1W%!Q")
result = self.tcp_client.expect("AQUADOPP")
self.assertTrue(result)
log.debug("DA Server Started. Reading battery voltage")
self.tcp_client.send_data("BV")
self.tcp_client.expect("\x06\x06")
self.tcp_client.send_data("CC" + user_config2())
self.tcp_client.expect("\x06\x06")
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 10)
#verify the setting got restored.
self.assert_get_parameter(Parameter.TRANSMIT_PULSE_LENGTH, 125)
self.assert_get_parameter(Parameter.RECEIVE_LENGTH, 32)
self.assert_get_parameter(Parameter.TIME_BETWEEN_BURST_SEQUENCES, 512)
self.assert_get_parameter(Parameter.TIMING_CONTROL_REGISTER, 131)
self.assert_get_parameter(Parameter.BIN_LENGTH, 7)
self.assert_get_parameter(Parameter.ADJUSTMENT_SOUND_SPEED, 1525)
self.assert_get_parameter(Parameter.VELOCITY_ADJ_TABLE, 'Aj0ePTk9Uz1uPYg9oj27PdQ97T0GPh4+Nj5OPmU+fT6TPqo+wD7WPuw+Aj8'
'XPyw/QT9VP2k/fT+RP6Q/uD/KP90/8D8CQBRAJkA3QElAWkBrQHxAjECcQK'
'xAvEDMQNtA6kD5QAhBF0ElQTNBQkFPQV1BakF4QYVBkkGeQatBt0HDQc9B20'
'HnQfJB/UEIQhNCHkIoQjNCPUJHQlFCW0JkQm5Cd0KAQolCkUKaQqJCqkKyQrpC',)
self.assert_get_parameter(EngineeringParameter.CLOCK_SYNC_INTERVAL, '00:00:00')
self.assert_get_parameter(EngineeringParameter.ACQUIRE_STATUS_INTERVAL, '00:00:00')
self.assert_get_parameter(Parameter.BLANKING_DISTANCE, 49)
self.assert_get_parameter(Parameter.TIME_BETWEEN_PINGS, 437)
self.assert_get_parameter(Parameter.NUMBER_PINGS, 1)
self.assert_get_parameter(Parameter.AVG_INTERVAL, 1)
self.assert_get_parameter(Parameter.USER_NUMBER_BEAMS, 3)
self.assert_get_parameter(Parameter.POWER_CONTROL_REGISTER, 0)
self.assert_get_parameter(Parameter.COMPASS_UPDATE_RATE, 1)
self.assert_get_parameter(Parameter.COORDINATE_SYSTEM, 2)
self.assert_get_parameter(Parameter.NUMBER_BINS, 1)
self.assert_get_parameter(Parameter.MEASUREMENT_INTERVAL, 1)
self.assert_get_parameter(Parameter.WRAP_MODE, 0)
self.assert_get_parameter(Parameter.CLOCK_DEPLOY, [0,0,0,0,0,0])
self.assert_get_parameter(Parameter.DIAGNOSTIC_INTERVAL, 11250)
self.assert_get_parameter(Parameter.MODE, 48)
self.assert_get_parameter(Parameter.NUMBER_SAMPLES_DIAGNOSTIC, 20)
self.assert_get_parameter(Parameter.NUMBER_BEAMS_CELL_DIAGNOSTIC, 1)
self.assert_get_parameter(Parameter.NUMBER_PINGS_DIAGNOSTIC, 1)
self.assert_get_parameter(Parameter.MODE_TEST, 4)
self.assert_get_parameter(Parameter.WAVE_MEASUREMENT_MODE, 0)
self.assert_get_parameter(Parameter.DYN_PERCENTAGE_POSITION, 0)
self.assert_get_parameter(Parameter.WAVE_TRANSMIT_PULSE, 0)
self.assert_get_parameter(Parameter.WAVE_BLANKING_DISTANCE, 0)
self.assert_get_parameter(Parameter.WAVE_CELL_SIZE, 0)
self.assert_get_parameter(Parameter.NUMBER_DIAG_SAMPLES, 0)
self.assert_get_parameter(Parameter.NUMBER_SAMPLES_PER_BURST, 0)
self.assert_get_parameter(Parameter.ANALOG_OUTPUT_SCALE, 6711)
self.assert_get_parameter(Parameter.CORRELATION_THRESHOLD, 0)
self.assert_get_parameter(Parameter.TRANSMIT_PULSE_LENGTH_SECOND_LAG, 2)
# Test direct access inactivity timeout
self.assert_direct_access_start_telnet(inactivity_timeout=30, session_timeout=90)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
# Test session timeout without activity
self.assert_direct_access_start_telnet(inactivity_timeout=120, session_timeout=30)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
# Test direct access session timeout with activity
self.assert_direct_access_start_telnet(inactivity_timeout=30, session_timeout=60)
# Send some activity every 30 seconds to keep DA alive.
for i in range(1, 2, 3):
self.tcp_client.send_data(NEWLINE)
log.debug("Sending a little keep alive communication, sleeping for 15 seconds")
time.sleep(15)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 45)
def test_get_set_parameters(self):
"""
Verify that parameters can be get/set properly
"""
self.assert_enter_command_mode()
#test read/write parameter
self.assert_set_parameter(Parameter.BLANKING_DISTANCE, 50)
self.assert_set_parameter(Parameter.TIMING_CONTROL_REGISTER, 131)
self.assert_set_parameter(Parameter.COMPASS_UPDATE_RATE, 2)
self.assert_set_parameter(Parameter.COORDINATE_SYSTEM, 1)
self.assert_set_parameter(Parameter.VELOCITY_ADJ_TABLE, 'bu0ePTk9Uz1uPYg9oj27PdQ97T0GPh4+Nj5OPmU+fT6TPqo+wD7WPuw+Aj8'
'XPyw/QT9VP2k/fT+RP6Q/uD/KP90/8D8CQBRAJkA3QElAWkBrQHxAjECcQK'
'xAvEDMQNtA6kD5QAhBF0ElQTNBQkFPQV1BakF4QYVBkkGeQatBt0HDQc9B20'
'HnQfJB/UEIQhNCHkIoQjNCPUJHQlFCW0JkQm5Cd0KAQolCkUKaQqJCqkKyQrpC',)
#test read only parameters (includes immutable, when not startup)
self.assert_get_parameter(EngineeringParameter.CLOCK_SYNC_INTERVAL, '00:00:00')
self.assert_get_parameter(EngineeringParameter.ACQUIRE_STATUS_INTERVAL, '00:00:00')
self.assert_get_parameter(Parameter.TRANSMIT_PULSE_LENGTH, 125)
self.assert_get_parameter(Parameter.TIME_BETWEEN_PINGS, 437)
self.assert_get_parameter(Parameter.NUMBER_PINGS, 1)
self.assert_get_parameter(Parameter.RECEIVE_LENGTH, 32)
self.assert_get_parameter(Parameter.TIME_BETWEEN_BURST_SEQUENCES, 512)
self.assert_get_parameter(Parameter.USER_NUMBER_BEAMS, 3)
self.assert_get_parameter(Parameter.POWER_CONTROL_REGISTER, 0)
self.assert_get_parameter(Parameter.NUMBER_BINS, 1)
self.assert_get_parameter(Parameter.BIN_LENGTH, 7)
self.assert_get_parameter(Parameter.ADJUSTMENT_SOUND_SPEED, 1525)
self.assert_get_parameter(Parameter.WRAP_MODE, 0)
self.assert_get_parameter(Parameter.CLOCK_DEPLOY, [0, 0, 0, 0, 0, 0])
self.assert_get_parameter(Parameter.DIAGNOSTIC_INTERVAL, 11250)
self.assert_get_parameter(Parameter.MODE, 48)
self.assert_get_parameter(Parameter.NUMBER_SAMPLES_DIAGNOSTIC, 20)
self.assert_get_parameter(Parameter.NUMBER_BEAMS_CELL_DIAGNOSTIC, 1)
self.assert_get_parameter(Parameter.ANALOG_INPUT_ADDR, 0)
self.assert_get_parameter(Parameter.NUMBER_PINGS_DIAGNOSTIC, 1)
self.assert_get_parameter(Parameter.MODE_TEST, 4)
self.assert_get_parameter(Parameter.SW_VERSION, 13902)
self.assert_get_parameter(Parameter.SW_VERSION, 13902)
self.assert_get_parameter(Parameter.WAVE_MEASUREMENT_MODE, 0)
self.assert_get_parameter(Parameter.DYN_PERCENTAGE_POSITION, 0)
self.assert_get_parameter(Parameter.WAVE_TRANSMIT_PULSE, 0)
self.assert_get_parameter(Parameter.WAVE_BLANKING_DISTANCE, 0)
self.assert_get_parameter(Parameter.WAVE_CELL_SIZE, 0)
self.assert_get_parameter(Parameter.NUMBER_DIAG_SAMPLES, 0)
self.assert_get_parameter(Parameter.NUMBER_SAMPLES_PER_BURST, 0)
self.assert_get_parameter(Parameter.ANALOG_OUTPUT_SCALE, 6711)
self.assert_get_parameter(Parameter.CORRELATION_THRESHOLD, 0)
self.assert_get_parameter(Parameter.TRANSMIT_PULSE_LENGTH_SECOND_LAG, 2)
#NOTE: the following cannot be tested because there are no default values
# 'spare' parameters are not used by the driver, only place holders for the config file sent to set params
# other parameter values are dependent on the instrument being tested
# self.assert_get_parameter(Parameter.A1_1_SPARE, 3)
# self.assert_get_parameter(Parameter.B0_1_SPARE, 1)
# self.assert_get_parameter(Parameter.B1_1_SPARE, 2)
# self.assert_get_parameter(Parameter.DEPLOYMENT_NAME, 'test')
# self.assert_get_parameter(Parameter.USER_1_SPARE, 23)
# self.assert_get_parameter(Parameter.COMMENTS, 'hello there')
# self.assert_get_parameter(Parameter.A1_2_SPARE, 6)
# self.assert_get_parameter(Parameter.B0_2_SPARE, 4)
# self.assert_get_parameter(Parameter.USER_2_SPARE, 1)
# self.assert_get_parameter(Parameter.USER_3_SPARE, 1)
# self.assert_get_parameter(Parameter.USER_4_SPARE, 1)
# self.assert_get_parameter(Parameter.QUAL_CONSTANTS, 'consts')
def test_poll(self):
"""
Verify the driver can poll the instrument for a single sample
"""
self.assert_sample_polled(self.assert_particle_velocity, NortekDataParticleType.VELOCITY, timeout=10)
def test_autosample(self):
"""
Verify the driver can enter and exit autosample mode, while in autosample the driver will collect multiple
samples.
"""
self.assert_sample_autosample(self.assert_particle_velocity, NortekDataParticleType.VELOCITY, timeout=20)
| rmanoni/mi-instrument | mi/instrument/nortek/aquadopp/ooicore/test/test_driver.py | Python | bsd-2-clause | 28,532 |
order = ['','K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
class Sizes(object):
_BASE = 1000.
def toSize(self, value, input='', output='K'):
"""
Convert value in other measurement
"""
input = order.index(input)
output = order.index(output)
factor = input - output
return value * (self._BASE ** factor)
def converToBestUnit(self, value, input=''):
devider = len(str(int(self._BASE))) - 1
output = (len(str(value)) -2) / devider
output += order.index(input)
if output > len(order):
output = len(order) - 1
elif output < 0:
output = 0
output = order[output]
return self.toSize(value, input, output), output
class Bytes(Sizes):
_BASE = 1024.
| Jumpscale/jumpscale6_core | lib/JumpScale/baselib/units/units.py | Python | bsd-2-clause | 787 |
import sdl2 as sdl
class Context(object):
def __init__(self, major, minor, msaa=2):
self.major = major
self.minor = minor
self.msaa = msaa
self.context = None
self._window = None
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_DOUBLEBUFFER, 1)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_CONTEXT_MAJOR_VERSION, major)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_CONTEXT_MINOR_VERSION, minor)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_CONTEXT_PROFILE_MASK, sdl.SDL_GL_CONTEXT_PROFILE_CORE)
if msaa < 0:
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_MULTISAMPLEBUFFERS, 1)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_MULTISAMPLESAMPLES, msaa)
def destroy(self):
sdl.SDL_GL_DeleteContext(self.context)
@property
def window(self):
return self._window
@window.setter
def window(self, win):
self._window = win
if self.context == None:
# Create context if not already created
self.context = sdl.SDL_GL_CreateContext(self._window.window)
| mdsitton/pract2d | pract2d/core/context.py | Python | bsd-2-clause | 1,065 |
'''
TODO (29.05.2012):
1) show 1x, 2x, 3x threshold (as line)
2) auto scale in y axis? (calc and save min & max values of buffer)
3) draw y axis?
4) 'max_nbr_buffers_transmitted' must be 1 and 'framesize' must be 512 otherwise we get in trouble in RT mode.
5) set 'SHIFT_VIEW' in update() and dequeue in 'do_draw'? does this get rid of the shift / lag? --> IT DOES NOT!
6) how do I connect points across VBOs? currently only points inside a VBO are connected.
7) make code modular so that I don't have keep to versions up-to-date.
0) WINDOWS only:
A) if you are planning to run this program on Windows (32bit and 64 bit), make
sure to install python 32bit - 64bit python on Windows won't work with pyglet!
B) install 32bit installer of 'setuptools' http://pypi.python.org/pypi/setuptools
C) $ cd c:\python27\Scripts
$ easy_install numpy
D) set the nvidia driver 3D settings to 'performance' if you want highest FPS
1) you need to install a recent version of pyglet to run this program:
$ hg clone https://pyglet.googlecode.com/hg/ pyglet
$ sudo python setup.py install
# on windows do:
# d:
# cd d:\code\pyglet
# c:\Python27\python.exe setup.py install
2) you also need numpy to be installed; on ubuntu do:
$ sudo apt-get install python-numpy
3) Ubuntu / Linux only: in case this applications freezes make sure the following
points are met:
- Nvidia driver 280.13; I had lots of problems with version 290 & 295
- latest pyglet dev version is installed (see point 1). I tried both pyglet-1.1.2 and
pyglet-1.1.4 that come with ubuntu but I get very poor performance.
4) check remaining 'TODO' sections
Profiling)
A) per function
$ python -m cProfile pyglet_vbo_test7.py
B) per line
$ sudo /usr/bin/easy_install line_profiler
# add decorator '@profile' in front of each function
$ kernprof.py -l pyglet_vbo_test7.py
$ python /usr/local/lib/python2.7/dist-packages/line_profiler-1.0b3-py2.7-linux-x86_64.egg/line_profiler.py pyglet_vbo_test7.py.lprof > prof.txt
$ python /usr/local/lib/python2.7/dist-packages/RunSnakeRun-2.0.2a1-py2.7.egg/runsnakerun/runsnake.py prof.txt
C) with runsnakerun GUI - not compatible with method B)
$ sudo /usr/bin/easy_install RunSnakeRun
$ python -m cProfile -o pyglet_vbo_test7.profile pyglet_vbo_test7.py
$ python /usr/local/lib/python2.7/dist-packages/RunSnakeRun-2.0.2a1-py2.7.egg/runsnakerun/runsnake.py pyglet_vbo_test7.profile
'''
''' turn on debugger if necessary
import pdb
pdb.set_trace()
'''
import pyglet
from pyglet.gl import *
from ctypes import pointer, sizeof
import numpy as np
import random
from time import time
from math import ceil, floor
''' mmap stuff '''
import os, sys
import mmap
from datetime import datetime
from struct import unpack, pack
# switch between drawing modes. all modes render ~ the same amount of data points.
# mode = 0; few segments -> high FPS since not many gl* calls
# mode = 1; many segments -> low FPS since gl* calls are executed many more times.
MODE = 1
# default window dimensions
WIN_HEIGHT_DEFAULT = 800
WIN_WIDTH_DEFAULT = 800
# 512 is neuralynx specific.
NBR_DATA_POINTS_PER_BUFFER = 1.0
NBR_DATA_POINTS_PER_BUFFER_INT = int(NBR_DATA_POINTS_PER_BUFFER)
SCANRATE = 1
SECONDS_TO_VISUALIZE_PER_PANEL = 1.0
# approximate number of data point per VBO. will change and be adjusted so that
# this number is a multiple of NBR_DATA_POINTS_PER_BUFFER
NBR_DATA_POINTS_PER_VBO = 200
# how many times per second should we call the update function?
#CALL_UPDATE_X_TIMES_PER_SECOND = 67.0
# TODO: check what a reasonable value for 'CALL_UPDATE_X_TIMES_PER_SECOND' is.
# going from 67.0 to 60.0 gives me a huge performance improvement.
CALL_UPDATE_X_TIMES_PER_SECOND = 60.0
# into how many data panels should we split up the window?
NBR_PANELS = 1
# use same color for all segments?
USE_UNIFORM_COLOR = True
# default color to be used by 'USE_UNIFORM_COLOR'
DEFAULT_COLOR = [1, 0, 0]
# y scaling factors for spike and noise values.
SPIKE_SIZE = 200
NOISE_SIZE = 100
# numpy's randint is exclusive, therefore we need to add one.
NOISE_SIZE_NP = NOISE_SIZE + 1
# generate spike every N points
if MODE == 0:
GENERATE_SPIKE_EVERY_N_POINTS = 10000
elif MODE == 1:
GENERATE_SPIKE_EVERY_N_POINTS = 128
# where to put the 0/0 point of the data points.
X_OFFSET_PANEL = 20
Y_OFFSET_PANEL = 200
# update counter used to determine when to generate a new segment of data.
update_counter = 1;
SHIFT_VIEW = False
# enable debug 'print' statements?
DEBUG = 0
# number of independent data streams?
# e.g., 'StimOMatic' feeds in one spike and one LFP channel
NBR_INDEPENDENT_CHANNELS = 2
# should we use multiprocessing if possible? this might speed things up.
USE_MULTIPROCESSING = False
MULTIPROCESSING_NBR_PROCESSES = 12
DO_PROFILE = False
PLUGIN_NAME = 'pCtrlLFP'
# where's your temporary directory? mmap will write into it.
TMP_DIR = '/tmp'
if os.name == 'nt': # windows systems
# make sure you use double '\\' to separate directories
TMP_DIR = 'c:\\temp'
else: # unix systems
TMP_DIR = '/tmp'
TMP_DIR = TMP_DIR + os.sep + PLUGIN_NAME
# should we use mmap to receive data from matlab?
USE_MMAP = 1
MMAP_BYTES_PER_FLOAT = 8
MMAP_stats_file = TMP_DIR + os.sep + 'bla_stats'
# location of shared file(s)
MMAP_FILENAME = []
for j in range(NBR_INDEPENDENT_CHANNELS):
MMAP_FILENAME.append(TMP_DIR + os.sep + 'bla' + str(j+1))
# number of elements to store in memory
MMAP_STORE_LENGTH = MMAP_BYTES_PER_FLOAT * int(NBR_DATA_POINTS_PER_BUFFER)
# null string used to initalize memory
MMAP_NULL_HEX = '\x00'
################## function needed to calculate dependent parameters
def calc_VOB_numbers(NBR_DATA_POINTS_PER_VBO, NBR_DATA_POINTS_PER_BUFFER, SECONDS_TO_VISUALIZE_PER_PANEL):
NBR_DATA_POINTS_PER_VBO = ceil(NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER) * NBR_DATA_POINTS_PER_BUFFER
# calculate the number of VBOs that are need to display all data
NBR_VBOS_PER_PANEL = ceil(SECONDS_TO_VISUALIZE_PER_PANEL * SCANRATE / NBR_DATA_POINTS_PER_VBO)
# how many buffers of size 'NBR_DATA_POINTS_PER_BUFFER' does each panel hold?
# NBR_BUFFERS_PER_PANEL = NBR_VBOS_PER_PANEL * NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER
# update 'SECONDS_TO_VISUALIZE_PER_PANEL' to its true value
SECONDS_TO_VISUALIZE_PER_PANEL = NBR_VBOS_PER_PANEL * NBR_DATA_POINTS_PER_VBO / SCANRATE
# add one VBO to each panel since we want to smoothly add new data points.
NBR_VBOS_PER_PANEL += 1
return int(NBR_DATA_POINTS_PER_VBO), int(NBR_VBOS_PER_PANEL), SECONDS_TO_VISUALIZE_PER_PANEL
################## dependent parameters / settings
output = calc_VOB_numbers(NBR_DATA_POINTS_PER_VBO, NBR_DATA_POINTS_PER_BUFFER, SECONDS_TO_VISUALIZE_PER_PANEL)
NBR_DATA_POINTS_PER_VBO, NBR_VBOS_PER_PANEL, SECONDS_TO_VISUALIZE_PER_PANEL = output
# default X values
X_MIN = 0
X_MAX = float(WIN_WIDTH_DEFAULT) - X_OFFSET_PANEL
# shift each VBO by how much in X & Y direction, relative to the previous VBO?
SHIFT_Y_BY = 0
SHIFT_X_BY = abs(X_MIN) + abs(X_MAX)
# while generating the fake data, what is the stepsize between individual x data
# points?
STEPSIZE_X = float(SHIFT_X_BY) / NBR_DATA_POINTS_PER_VBO
# how much distance do 'NBR_DATA_POINTS_PER_BUFFER' points cover in x direction?
SHIFT_X_SINGLE_BUFFER = STEPSIZE_X * NBR_DATA_POINTS_PER_BUFFER
# Definitions for 'glColorPointer' and 'glVertexPointer'
n_COORDINATES_PER_VERTEX = 2
BYTES_PER_POINT = 8
# indicator values used to confirm that data is received.
DATA_RECEIVED_ACK_NUM = 3.14159265
DATA_RECEIVED_ACK_STR = pack('d', DATA_RECEIVED_ACK_NUM)
NBR_BUFFERS_ZERO_STR = pack('d', 0)
##################
# default window dimensions
WIN_HEIGHT_current = WIN_HEIGHT_DEFAULT
WIN_WIDTH_current = WIN_WIDTH_DEFAULT
''' decorator to quickly switch between profiling and no profiling '''
def do_profile(cond):
def resdec(f):
if not cond:
return f
return profile(f)
return resdec
@do_profile(DO_PROFILE)
def generate_line_segment_zeros(x_shift=SHIFT_X_BY, min_x=X_MIN, max_x=X_MAX, step_size=STEPSIZE_X):
''' same as 'generate_line_segment' but will generate zero y-values '''
zeros = True
x, y = generate_points(min_x, max_x, x_shift, step_size, zeros)
return create_2dim_list_from_arrays(x, y)
@do_profile(DO_PROFILE)
def generate_line_segment(x_shift=SHIFT_X_BY, min_x=X_MIN, max_x=X_MAX, step_size=STEPSIZE_X):
# ~ 1ms
x, y = generate_points(min_x, max_x, x_shift, step_size)
return create_2dim_list_from_arrays(x, y)
@do_profile(DO_PROFILE)
def generate_numbers_for_x_vector(x, zeros = False):
nbr_elements = len(x)
if zeros: # generate zeros
# TODO: check whether we need to add offset (Y_OFFSET_PANEL + 1)
y = np.zeros(nbr_elements)# + Y_OFFSET_PANEL + 1
else: # generate random values.
# generate a vector of random numbers in range [0, 1]
# y = [random.random() for i in range(nbr_elements)]
y = np.random.random(nbr_elements)
# generate a scaling vector of random numbers in range [1, NOISE_SIZE]
# this vector will scale each data point
# y_scale = [random.randint(1, NOISE_SIZE) for i in range(nbr_elements)]
y_scale = np.random.randint(1, NOISE_SIZE_NP, nbr_elements)
# generate a spike every 'GENERATE_SPIKE_EVERY_N_POINTS' data points
# generate an intial offset so that spikes don't occur at same position.
y_scale_offset = np.random.randint(1, GENERATE_SPIKE_EVERY_N_POINTS)
y_scale[GENERATE_SPIKE_EVERY_N_POINTS - 1 + y_scale_offset::GENERATE_SPIKE_EVERY_N_POINTS] = SPIKE_SIZE
# rescale each data point accordingly
y = (y * y_scale) + SHIFT_Y_BY + Y_OFFSET_PANEL
return y
@do_profile(DO_PROFILE)
def generate_points(min_x=X_MIN, max_x=X_MAX, x_shift=SHIFT_X_BY, step_size = STEPSIZE_X, zeros = False):
# < 0.1ms
# 'range' can only generate integer arrays
# x = np.array(range(min_x, max_x), int)
# use 'arrange' from numpy to generate a float array
x = np.arange(min_x, max_x, step_size)
x = x + x_shift
y = generate_numbers_for_x_vector(x, zeros)
return x, y
@do_profile(DO_PROFILE)
def create_2dim_list_from_arrays(x, y):
data = []
for i, j in zip(x, y):
data.extend([i, j])
return data
@do_profile(DO_PROFILE)
def transform_line_points_to_data_format_for_GPU(line_points):
# ~ 0.2ms
#print "nbr data points generated: " + str(len(line_points) / 2)
return (GLfloat*len(line_points))(*line_points)
@do_profile(DO_PROFILE)
def generate_color_for_segment():
# < 0.1ms
# generate well visible (not too dark) colors
if not USE_UNIFORM_COLOR:
while True:
color = [random.random() for j in xrange(0, 3)]
if sum(color) > 0.5:
break
else:
color = [1, 0, 0]
return color
@do_profile(DO_PROFILE)
def create_VBO():
# < 0.1ms
vbo_id = GLuint()
# generates 1 buffer object names, which are stored in pointer(vbo_id)
glGenBuffers(1, pointer(vbo_id))
return vbo_id
@do_profile(DO_PROFILE)
def create_VBO_send_data_to_VBO(data):
# < 0.1ms
vbo_id = create_VBO()
send_data_to_VBO(vbo_id, data)
return vbo_id
@do_profile(DO_PROFILE)
def send_data_to_VBO(vbo_id, data):
# < 0.1ms
# binds the named buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo_id)
# creates and initializes a buffer object's data store -> transfers data
# from the CPU to the GPU.
# TODO: check whether GL_DYNAMIC_DRAW or GL_STREAM_DRAW is faster.
# GL_STREAM_DRAW should be faster when updating the buffer @ every frame?
# see redbook page 95 & 96.
glBufferData(GL_ARRAY_BUFFER, sizeof(data), data, GL_DYNAMIC_DRAW)
@do_profile(DO_PROFILE)
def overwrite_line_segment_on_GPU(x_shift=SHIFT_X_BY, line_points=False, vbo_to_update=False):
# ~ 0.3ms
if not vbo_to_update:
print "!! no vbo pointer found - aborting !!"
print "update_counter: %d " % update_counter
return
if not line_points:
if DEBUG:
print "overwrite_line_segment_on_GPU: need to generate points"
line_points = generate_line_segment(x_shift)
data = transform_line_points_to_data_format_for_GPU(line_points)
color = generate_color_for_segment()
nbr_points = len(line_points)/2
# update data on VBO
send_data_to_VBO(vbo_to_update, data)
return nbr_points, color
@do_profile(DO_PROFILE)
def create_vbos(NBR_PANELS, NBR_VBOS_PER_PANEL):
vbos = [ [None] * int(NBR_VBOS_PER_PANEL) for i in xrange(NBR_PANELS) ]
for panel in range(NBR_PANELS):
for vbo in range(NBR_VBOS_PER_PANEL):
vbos[panel][vbo] = create_VBO()
return vbos
@do_profile(DO_PROFILE)
def create_initial_data(nPanels, nVbosPerPanel, nDataPointsPerVbo):
data = [ [None] * int(nVbosPerPanel) for i in xrange(nPanels) ]
for panel in range(nPanels):
for vbo in range(nVbosPerPanel):
curr_x_offset = (vbo * SHIFT_X_BY) + X_OFFSET_PANEL
#print "vbo %d, offset %d " % (vbo, curr_x_offset)
if (vbo + 1) == nVbosPerPanel:
tmp = generate_line_segment_zeros(x_shift=curr_x_offset)
else:
tmp = generate_line_segment(x_shift=curr_x_offset)
data[panel][vbo] = transform_line_points_to_data_format_for_GPU(tmp)
return data, curr_x_offset
@do_profile(DO_PROFILE)
def create_initial_colors(nPanels, nVbosPerPanel):
colors = [ [None] * int(nVbosPerPanel) for i in xrange(nPanels) ]
for panel in range(nPanels):
for vbo in range(nVbosPerPanel):
colors[panel][vbo] = generate_color_for_segment()
return colors
@do_profile(DO_PROFILE)
def initialize_vbos_with_start_data(NBR_PANELS, NBR_VBOS_PER_PANEL, vbos, data):
for panel in range(NBR_PANELS):
for vbo in range(NBR_VBOS_PER_PANEL):
send_data_to_VBO(vbos[panel][vbo], data[panel][vbo])
@do_profile(DO_PROFILE)
def setup_vbo_stuff(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO):
t0 = time()
vbos = create_vbos(NBR_PANELS, NBR_VBOS_PER_PANEL)
data, curr_x_offset = create_initial_data(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO)
initialize_vbos_with_start_data(NBR_PANELS, NBR_VBOS_PER_PANEL, vbos, data)
colors = create_initial_colors(NBR_PANELS, NBR_VBOS_PER_PANEL)
print 'initial setup time was %f seconds.' %(time() - t0)
return vbos, colors, curr_x_offset
def setup_plotting_queue():
# setup plotting queue
import collections
max_nbr_buffers = 20000
plot_queue = collections.deque([], max_nbr_buffers)
return plot_queue
@do_profile(DO_PROFILE)
def update_line_segment_on_GPU(vbo_id, pointer_offset, data):
# bind buffer and overwrite position with offset 'pos_to_overwrite*BYTES_PER_POINT'
#try:
glBindBuffer(GL_ARRAY_BUFFER, vbo_id)
glBufferSubData(GL_ARRAY_BUFFER, pointer_offset, sizeof(data), data)
#except:
#print "pointer_offset: ", pointer_offset
#print "sizeof(data): ", sizeof(data)
#pass
@do_profile(DO_PROFILE)
def calc_x_values_single_buffer():
x_values = np.arange(0, SHIFT_X_SINGLE_BUFFER, STEPSIZE_X)
return x_values
@do_profile(DO_PROFILE)
def append_data_to_plot_queue(new_data, nbr_buffers_per_mmap_file):
# reformat data so that the buffers from 'j' mmap files
# are paired together.
for j in range(int(min(nbr_buffers_per_mmap_file))):
data_to_add = []
for k in range(len(new_data)):
data_to_add.append(new_data[k][j])
# append 'data_to_add' to end (right side) of queue
plot_queue.append(data_to_add)
@do_profile(DO_PROFILE)
def get_data_from_plot_queue():
# remove & return left most element from queue
data = []
if len(plot_queue) > 0:
data = plot_queue.popleft()
return data
@do_profile(DO_PROFILE)
def request_new_data():
''' generates new raw data or grabs new data from MMAP '''
if USE_MMAP == 1:
new_data = get_data_from_mmap()
#update_data_stream_status(new_data)
#print new_data
else:
new_data = []
# get the x-spacing right
x_values = calc_x_values_single_buffer()
for j in xrange(NBR_INDEPENDENT_CHANNELS):
# put data into zero-th buffer
new_data.append([generate_numbers_for_x_vector(x_values)])
nbr_mmap_files = len(new_data)
nbr_buffers_per_mmap_file = np.zeros(nbr_mmap_files)
empty_data = np.zeros(nbr_mmap_files)
for j in range(nbr_mmap_files):
# update number of buffers in this 'file'. Will fail
# if len(new_data) != NBR_INDEPENDENT_CHANNELS
try:
nbr_buffers_per_mmap_file[j] = len(new_data[j])
except:
continue
# check whether the first buffer of the current mmap file is empty
sum_data = sum(new_data[j][0])
if sum_data == 0 or sum_data == DATA_RECEIVED_ACK_NUM:
empty_data[j] = 1
# print empty_data
return new_data, empty_data, nbr_buffers_per_mmap_file
def transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current):
# calc correct x_value
x_values = calc_x_values_single_buffer() + x_shift_single_buffer_current
nbr_mmap_files = len(raw_data)
data = []
for j in range(nbr_mmap_files):
line_points = create_2dim_list_from_arrays(x_values, raw_data[j])
data.append(transform_line_points_to_data_format_for_GPU(line_points))
return data
def mmap_stats_go_to_nbr_received_buffers_pos():
# go to 2nd position relative to 0.
mmap_stats.seek(MMAP_BYTES_PER_FLOAT * 2, 0)
@do_profile(DO_PROFILE)
def get_nbr_received_buffers_from_mmap():
# go to position where 'number of new buffers' is stored
mmap_stats_go_to_nbr_received_buffers_pos()
# read-in the string value
nbr_buffers_received = mmap_stats.read(MMAP_BYTES_PER_FLOAT)
# convert into decimal value
nbr_buffers_received = unpack('d', nbr_buffers_received)[0]
# debugging:
#print str(nbr_buffers_received) + ' number buffers received'
return nbr_buffers_received
def create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers = 1):
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers):
# create deep copy of zeros, otherwise we create multiple references to
# the same object.
zeros_copy = zeros.copy()
buffers.append(zeros)
data = []
for mmap_file_index in xrange(nbr_mmap_files):
# put data into zero-th buffer
data.append(buffers)
return data
@do_profile(DO_PROFILE)
def splitIterator(text, size):
# assert size > 0, "size should be > 0"
for start in range(0, len(text), size):
yield text[start:start + size]
prev_sum = 0
MMAP_NO_DATA_INDICATE_ZERO = False
MMAP_NO_DATA_INDICATE_NON_ZERO = True
@do_profile(DO_PROFILE)
def get_data_from_mmap():
#
#t0 = time()
nbr_buffers_received = get_nbr_received_buffers_from_mmap()
nbr_mmap_files = len(mmap_data)
zeros = np.zeros(NBR_DATA_POINTS_PER_BUFFER_INT)
''' no new buffers - generate one empty dummy buffer and return '''
if nbr_buffers_received == 0 or nbr_buffers_received == -1:
return create_empty_data_buffer(nbr_mmap_files, zeros)
nbr_buffers_received = int(nbr_buffers_received)
nbr_elements = nbr_buffers_received * NBR_DATA_POINTS_PER_BUFFER_INT
range_nbr_mmap_files = range(nbr_mmap_files)
# check if there's any data that's ready for pickup.
new_data_found = np.zeros(nbr_mmap_files)
for mmap_file_index in range_nbr_mmap_files:
# go to beginning of memory mapped area
mmap_data[mmap_file_index].seek(0)
# quit right away if no new data has been written yet.
this_element = mmap_data[mmap_file_index].read(MMAP_BYTES_PER_FLOAT)
this_element = unpack('d', this_element)[0]
if round(this_element, 8) != DATA_RECEIVED_ACK_NUM:
new_data_found[mmap_file_index] = 1
# none of the files contain new data
if sum(new_data_found) == 0:
return create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers_received)
''' read out transferred data '''
data = []
# this is ~ 10ms slower.
#data = np.zeros((nbr_mmap_files, nbr_buffers_received, NBR_DATA_POINTS_PER_BUFFER_INT))
# at least one new buffer has arrived.
for mmap_file_index in range_nbr_mmap_files:
#'''
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers_received):
# DONE: find out what the problem here is:
# there seems to be a bug in python on windows, or I don't understand the way things work:
# if I create 'zeros' outside this loop, the second time that 'zeros' gets called,
# it will contain all values found in data[mmap_file_index][buffer][j]. Therefore I have to re-generate
# the 'zeros' for each mmap_file_index'th loop.
# SOLUTION:
# We need to make a 'deep-copy' of zeros, otherwise we are just
# passing a reference to the same object (which is a np.array object).
zero_copy = zeros.copy()
buffers.append(zero_copy)
# add all buffers to mmap_file_index'th data stream.
data.append(buffers)
#'''
# go to beginning of memory mapped area & read out all elements
mmap_data[mmap_file_index].seek(0)
all_values_string = mmap_data[mmap_file_index].read(nbr_elements * MMAP_BYTES_PER_FLOAT)
# 0.1632 per call in debugger
# grab sub-list so we avoid having to call this list by its index.
this_data = data[mmap_file_index]
# unpack all values at once
unpacked_values = unpack("d" * nbr_elements, all_values_string)
# using list comprehension is better than a regular loop with random array access
this_data = [unpacked_values[i:i+NBR_DATA_POINTS_PER_BUFFER_INT] for i in xrange(0, nbr_elements, NBR_DATA_POINTS_PER_BUFFER_INT)]
# slower version of above line.
#for abs_idx in range(nbr_elements):
# this_data[abs_idx / NBR_DATA_POINTS_PER_BUFFER_INT][abs_idx % NBR_DATA_POINTS_PER_BUFFER_INT] = unpacked_values[abs_idx]
# write-back sub-list
data[mmap_file_index] = this_data
''' original version.
# these next few lines are responsible for 90% of the time spent in this function.
# 0.4974s per call in debugger
element_values_list = list(splitIterator(all_values_string, MMAP_BYTES_PER_FLOAT))
for abs_element_index in range(nbr_elements):
this_element = element_values_list[abs_element_index]
this_element = unpack('d', this_element)[0]
buffer_nbr = abs_element_index / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer = abs_element_index % NBR_DATA_POINTS_PER_BUFFER_INT
data[mmap_file_index][buffer_nbr][index_in_buffer] = this_element
'''
''' useless alternatives
# even worse: -> ~ 0.0063 secs per call
unpacked_values = [unpack('d', element_values_list[j])[0] for j in range(nbr_elements)]
# worst: ~0.0160 secs per call
buffer_ids = np.arange(nbr_elements) / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer_id = np.arange(nbr_elements) % NBR_DATA_POINTS_PER_BUFFER_INT
for abs_element_index in range(nbr_elements):
data[mmap_file_index][buffer_ids[abs_element_index]][index_in_buffer_id[abs_element_index]] = unpacked_values[abs_element_index]
'''
#t1 = time()
#print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
# go to beginning of memory mapped area and overwrite first value with
# ACK string so that the sender knows that it is safe to overwrite the
# previous data (== send new data).
for mmap_file_index in range_nbr_mmap_files:
mmap_data[mmap_file_index].seek(0)
mmap_data[mmap_file_index].write(DATA_RECEIVED_ACK_STR)
# overwrite the 'number of buffers received' field with zero, so that we don't
# keep reading in this very same data.
mmap_stats_go_to_nbr_received_buffers_pos()
mmap_stats.write(NBR_BUFFERS_ZERO_STR)
return data
@do_profile(DO_PROFILE)
def update_vbo_with_data_from_plot_queue():
global x_shift_current, x_shift_single_buffer_current
global pointer_shift
global vbos, colors
global c_vbo # counter needed for VBO positioning
global pointer_offset, nbr_points_rendered_in_last_vbo
for j in xrange(NBR_BUFFERS_TO_UPDATE):
# grab 'raw_data' from beginning of plot queue.
raw_data = get_data_from_plot_queue()
data = transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current)
### VBO POSITIONING
pos_to_overwrite = c_vbo % (NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER)
nbr_points_rendered_in_last_vbo = int(NBR_DATA_POINTS_PER_BUFFER * pos_to_overwrite)
# at which location in the memory (in bytes) of the VBO should we replace the data?
# also needed for plotting.
pointer_offset = nbr_points_rendered_in_last_vbo * BYTES_PER_POINT
nbr_data_streams = len(data)
for panel in range(NBR_PANELS):
update_line_segment_on_GPU(vbos[panel][-1], pointer_offset, data[panel % nbr_data_streams])
c_vbo += 1
x_shift_single_buffer_current += SHIFT_X_SINGLE_BUFFER
pointer_shift += NBR_DATA_POINTS_PER_BUFFER
# check whether we reached the end of the VBO and thus need to rotate it.
if pointer_shift == NBR_DATA_POINTS_PER_VBO:
pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo = rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo)
@do_profile(DO_PROFILE)
def rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo):
# reset pointer offsets / shifts
# TODO: clean up and clarify 'pointer_shift' vs 'pointer_offset'!
pointer_shift = 0
pointer_offset = 0
c_vbo = 0
x_shift_current += SHIFT_X_BY
''' this is not fast enough and will lead to jitter effects
# generate new data set for each panel
tmp_points = [ [None] for j in range(NBR_PANELS)]
for panel in range(NBR_PANELS):
tmp_points_panel = generate_line_segment_zeros(x_shift=x_shift_current)
tmp_points[panel] = transform_line_points_to_data_format_for_GPU(tmp_points_panel)
'''
for panel in range(NBR_PANELS):
this_vbo = vbos[panel][0]
this_color = colors[panel][0]
# Delete current vbo and replace with new one.
# We could just re-use the current vbo, however this might lead to 'blinking' artifacts
# with the first VBO (probably because of incorrect referencing).
# By deleting the VBO, we make sure that this VBO is not being used for plotting.
glDeleteBuffers(1, pointer(this_vbo))
this_vbo = create_VBO()
# bind VBO and allocate memory.
glBindBuffer(GL_ARRAY_BUFFER, this_vbo)
glBufferData(GL_ARRAY_BUFFER, n_COORDINATES_PER_VERTEX * NBR_DATA_POINTS_PER_VBO * BYTES_PER_POINT, None, GL_DYNAMIC_DRAW)
# vbo pointer & color from arrays
vbos[panel] = vbos[panel][1:]
colors[panel] = colors[panel][1:]
# add color and pointer to VBO
vbos[panel].append(this_vbo)
colors[panel].append(this_color)
return pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo
@do_profile(DO_PROFILE)
def update_data_stream_status(data):
global prev_sum, MMAP_NO_DATA_INDICATE_ZERO, MMAP_NO_DATA_INDICATE_NON_ZERO
# check if new data has arrived and tell user
# we only check for the first data stream - I'm assuming here that either
# all channels or no channels with fail.
nbr_mmap_files = 0
buffer_to_check = 0
current_sum = sum(data[nbr_mmap_files][buffer_to_check])
if current_sum == prev_sum:
if prev_sum == 0:
# indicate zero state only once
if not MMAP_NO_DATA_INDICATE_ZERO:
print datetime.now(), ' - No new data received (sum(data) == zero)'
MMAP_NO_DATA_INDICATE_ZERO = True
else:
if not MMAP_NO_DATA_INDICATE_NON_ZERO:
print datetime.now(), ' - No new data received (sum(data) != zero)'
MMAP_NO_DATA_INDICATE_NON_ZERO = True
else:
if MMAP_NO_DATA_INDICATE_ZERO:
MMAP_NO_DATA_INDICATE_ZERO = False
print datetime.now(), ' - New data received!'
if MMAP_NO_DATA_INDICATE_NON_ZERO:
MMAP_NO_DATA_INDICATE_NON_ZERO = False
print datetime.now(), ' - New data received!'
prev_sum = current_sum
# t1 = time()
# print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
@do_profile(DO_PROFILE)
def create_mmap_file_on_disk(fname):
# (over-) write file
fd = os.open(fname, os.O_CREAT | os.O_TRUNC | os.O_RDWR)
assert os.write(fd, MMAP_NULL_HEX * MMAP_STORE_LENGTH)
os.close(fd)
@do_profile(DO_PROFILE)
def setup_mmap(filenames):
# matlab:
# m = memmapfile('/tmp/bla', 'Format', 'double', 'Writable', true)
# m.Data = sin(linspace(200, 203, 512))*100
# m.Data = linspace(200, 300, 512);
# t = timer('TimerFcn', 'm.Data=sin(linspace(200, 203, 512)) * rand(1)*512;', 'Period', 0.015, 'ExecutionMode', 'fixedRate');
# start(t)
mmap_false = False
mmap_data = []
for i in range(len(filenames)):
fname = filenames[i]
# check if file exists
if not os.path.isfile(fname):
# check if directory exists
path_to_file = os.path.dirname(fname)
if not os.path.isdir(path_to_file):
print "Directory '" + path_to_file + "' not found - creating it."
os.makedirs(path_to_file)
create_mmap_file_on_disk(fname)
# initialize the memory map
f = open(fname, "r+b")
mmap_data.append(mmap.mmap(f.fileno(), 0))
# initialize memory with default value
for j in range(len(mmap_data)):
mmap_data[i][j] = MMAP_NULL_HEX
return mmap_data
##################### MAIN #####################################################
# animation is enabled by default. you can pause / resume it by pressing 'a'
DO_ANIMATE = True
DO_NEXT_STEP = False
''' BEGIN setup part 1 '''
if USE_MMAP:
# initialize MMAP
mmap_data = setup_mmap(MMAP_FILENAME)
if not mmap_data:
print "Could not read mmap-file. Aborting."
sys.exit(1)
if not os.path.isfile(MMAP_stats_file):
create_mmap_file_on_disk(MMAP_stats_file)
f = open(MMAP_stats_file, "r+b")
mmap_stats = mmap.mmap(f.fileno(), 0)
vbos, colors, x_shift_current = setup_vbo_stuff(NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO)
# TODO: clarify difference between 'x_shift_single_buffer_current' and 'x_shift_current'
x_shift_single_buffer_current = x_shift_current
plot_queue = setup_plotting_queue()
info_str = "%d panels; %d segments per panel; %d number of points per segment." % ( NBR_PANELS, NBR_VBOS_PER_PANEL, NBR_DATA_POINTS_PER_VBO )
print info_str
# setup window
window = pyglet.window.Window(width=WIN_WIDTH_DEFAULT, height=WIN_HEIGHT_DEFAULT, resizable=True)
window.set_caption(info_str)
# initialize FPS display
fps_display = pyglet.clock.ClockDisplay(interval=0.125, format='FPS %(fps).2f')
''' END setup part 1 '''
''' BEGIN periodic event function - check whether we need to replace a VBO '''
# variables needed while updating the VBOs
pointer_shift = 0
pointer_offset = 0
nbr_points_rendered_in_last_vbo = 0
c_vbo = 0
# definitions needed for dequeueing of plot buffers.
NBR_BUFFERS_TO_UPDATE = 1
MIN_NBR_BUFFERS_NECESSARY_FOR_UPDATE = NBR_BUFFERS_TO_UPDATE
@do_profile(DO_PROFILE)
def update(dt):
# ~ 24 ms, generating new data set for each panel
# ~ 6 ms, generating only one new data set and re-using it.
# ~ 0.4 ms, without 'generate_line_segment' and 'overwrite_line_segment_on_GPU'
if not DO_ANIMATE:
# quit right away if animation is disabled. Ideally we would want to still
# compute at least the next set of 'tmp_points', however we need to make sure that
# 'x_shift_current' doesn't get updated more than once (or 'SHIFT_X_BY' is updated
# accordingly).
return
if DO_NEXT_STEP:
raw_input('please press key to continue ')
if DEBUG:
print "update_counter in 'update()' %d " % update_counter
t0 = time()
''' START 'DATA MANAGEMENT' '''
# pick up new data from mmap or other system (i.e. generated)
new_data, new_data_is_empty, nbr_buffers_per_mmap_file = request_new_data()
# don't add empty data to the queue
# don't use 'NBR_INDEPENDENT_CHANNELS' here, because we might be skipping this channel
if sum(new_data_is_empty) != len(new_data):
append_data_to_plot_queue(new_data, nbr_buffers_per_mmap_file)
''' END 'DATA MANAGEMENT' '''
''' START 'dequeue enough buffers and prepare them for plotting' '''
# don't purge entire queue - keep at least one element in queue.
if len(plot_queue) < MIN_NBR_BUFFERS_NECESSARY_FOR_UPDATE:
return
# dequeue buffers and update VBOs
update_vbo_with_data_from_plot_queue()
''' END 'dequeue enough buffers and prepare them for plotting' '''
# indicate that view needs to be shifted
global SHIFT_VIEW
SHIFT_VIEW = True
if DEBUG:
t1 = time()
print 'update() takes %f seconds' %(t1-t0)
pyglet.clock.schedule_interval(update, 1.0/CALL_UPDATE_X_TIMES_PER_SECOND)
''' END periodic event function '''
from pyglet.window import key
KEYPRESS_STEPSIZE = 10
zoom = 0
currentScale = 1
@window.event
@do_profile(DO_PROFILE)
def on_key_press(symbol, modifiers):
global DO_ANIMATE, DO_NEXT_STEP, KEYPRESS_STEPSIZE, zoom, currentScale
global x_shift_single_buffer_current
global plot_queue
# turn animation on / off.
if symbol == key.A:
DO_ANIMATE = not DO_ANIMATE
if DO_ANIMATE:
print 'animation on'
else:
print 'animation off'
elif symbol == key.C:
plot_queue = setup_plotting_queue()
print "Cleared Plot-Queue"
elif symbol == key.Q:
print "Plot-Queue size: %d" % (len(plot_queue))
# zero the plot along the x axis. in case of drifting, this should get the
# back onto the screen.
elif symbol == key.Z:
glTranslatef(+x_shift_single_buffer_current, 0.0, 0.0)
fps_display.label.x = fps_display.label.x - x_shift_single_buffer_current
x_shift_single_buffer_current = 0
x_shift_current = 0
elif symbol == key.S:
DO_NEXT_STEP = not DO_NEXT_STEP
elif symbol == key.LEFT:
glTranslatef(-KEYPRESS_STEPSIZE, 0.0, 0.0)
elif symbol == key.RIGHT:
glTranslatef(KEYPRESS_STEPSIZE, 0.0, 0.0)
elif (symbol == key.PLUS or symbol == key.NUM_ADD):
KEYPRESS_STEPSIZE += 10
print 'step size is now %d ' % KEYPRESS_STEPSIZE
elif (symbol == key.MINUS or symbol == key.NUM_SUBTRACT):
KEYPRESS_STEPSIZE -= 10
KEYPRESS_STEPSIZE = max(10, KEYPRESS_STEPSIZE)
print 'step size is now %d ' % KEYPRESS_STEPSIZE
else:
print '%s key, %s modifier was pressed' % (symbol, modifiers)
''' zooming
elif symbol == key.Z:
if modifiers == key.MOD_ALT + 16:
#zoom -= 0.5;
#glOrtho(+1.5 + zoom, 1.0 + zoom, +2.0 + zoom, 0.5 + zoom, +1.0, -3.5)
#currentScale -= 0.1
#glScaled(currentScale, currentScale, 1);
elif modifiers == key.MOD_SHIFT + 16:
#zoom += 0.5;
#glOrtho(-1.5 + zoom, 1.0 - zoom, -2.0 + zoom, 0.5 - zoom, -1.0, 3.5)
#currentScale += 0.1
#glScaled(currentScale, currentScale, 1);
'''
''' rotations
elif symbol == key.PAGEDOWN:
# we need to move objects into center, before rotating
#glRotatef(0.5, 1, 0, 0)
# need to move object back to original position
elif symbol == key.PAGEUP:
# we need to move objects into center, before rotating
#glRotatef(-0.5, 1, 0, 0)
# need to move object back to original position
'''
'''
BEGIN 'on_resize' function - can only be defined once 'window' exists
'''
@window.event
@do_profile(DO_PROFILE)
def on_resize(width, height):
global WIN_HEIGHT_current, WIN_WIDTH_current
WIN_HEIGHT_current = height
WIN_WIDTH_current = width
# TODO: currently we only rescale the Y dimension. Add X-Scaling!
if DEBUG:
print "new height %d " %(height)
print "new width %d " %(width)
''' END 'on_resize' function - can only be defined once 'window' exists '''
'''
BEGIN 'draw' function - can only be defined once 'window' exists
The EventLoop will dispatch this event when the window should be redrawn.
This will happen during idle time after any window events and after any
scheduled functions were called.
'''
@window.event
@do_profile(DO_PROFILE)
def on_draw():
# ~ 21ms (test6 was ~260ms)
global SHIFT_VIEW
# clear buffers to preset values
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# TODO:
# maybe we should move back to the origin and translate from there?
# glLoadIdentity()
# glTranslatef(-x_shift_single_buffer_current/2, 0.0, 0.0)
if SHIFT_VIEW:
#local_shift = (SHIFT_X_BY/CALL_UPDATE_X_TIMES_PER_SECOND)
# TODO: fix 'local_shift', right now we override it to '1'
# 'SHIFT_X_BY' needs to be an integral number, otherwise we get
# artifacts of single points moving up and down between shifts.
local_shift = NBR_BUFFERS_TO_UPDATE * STEPSIZE_X * NBR_DATA_POINTS_PER_BUFFER
#local_shift = 1
glTranslatef(-local_shift, 0.0, 0.0)
# shift location of FPS display by same amount - but in opposite direction
# TODO: this must be because of a different reference point?
fps_display.label.x = fps_display.label.x + local_shift
SHIFT_VIEW = False
if USE_UNIFORM_COLOR:
glColor3f(DEFAULT_COLOR[0], DEFAULT_COLOR[1], DEFAULT_COLOR[2])
height_per_panel = (WIN_HEIGHT_current / NBR_PANELS)
for panel in range(NBR_PANELS):
#glViewport(x, y, w, h)
glViewport(0, panel * height_per_panel, WIN_WIDTH_current, height_per_panel)
# plot each VBO
for segment in range(NBR_VBOS_PER_PANEL):
if not USE_UNIFORM_COLOR:
this_color = colors[panel][segment]
glColor3f(this_color[0], this_color[1], this_color[2])
# bind the named buffer object so we can work with it.
glBindBuffer(GL_ARRAY_BUFFER, vbos[panel][segment])
## TODO!
''' hide individual buffers in first VBO so that points disappear
smoothly in the first buffer '''
this_pointer_offset = 0
nbr_points_to_draw = NBR_DATA_POINTS_PER_VBO
if segment == 0:
this_pointer_offset = pointer_offset
nbr_points_to_draw = NBR_DATA_POINTS_PER_VBO - (pointer_offset / BYTES_PER_POINT)
elif segment == NBR_VBOS_PER_PANEL - 1:
# TODO: is 'nbr_points_rendered_in_last_vbo' correct? or are we plotting too few points?
this_pointer_offset = 0
nbr_points_to_draw = nbr_points_rendered_in_last_vbo
# specifies the location and data format of an array of vertex coordinates to use when rendering
glVertexPointer(n_COORDINATES_PER_VERTEX, GL_FLOAT, 0, this_pointer_offset)
# render primitives from array data
glDrawArrays(GL_LINE_STRIP, 0, nbr_points_to_draw)
# update the FPS display.
glViewport(0, 0, WIN_WIDTH_current, WIN_HEIGHT_current)
fps_display.draw()
''' END 'draw' function - can only be defined once 'window' exists '''
''' BEGIN setup part 2 '''
glClearColor(0, 0, 0, 1.0)
# enable VERTEX_ARRAY mode.
glEnableClientState(GL_VERTEX_ARRAY)
# try to render a smooth line
glEnable(GL_LINE_SMOOTH)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
# start application event loop
pyglet.app.run()
'''
print "quit counter " + str(on_draw_quit_counter)
print "re-draw counter " + str(on_draw_redraw_counter)
print "update counter " + str(update_counter)
'''
''' END setup part 2 '''
| StimOMatic/StimOMatic | python/OpenGLPlotting/pomp/apps/deprecated/01.06.2012/pCtrlLFP_old.py | Python | bsd-2-clause | 41,129 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Student.student_id'
db.alter_column('publications_student', 'student_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=12))
def backwards(self, orm):
# Changing field 'Student.student_id'
db.alter_column('publications_student', 'student_id', self.gf('django.db.models.fields.CharField')(max_length=8, unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 3, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'page_flags': ('django.db.models.fields.TextField', [], {'null': True, 'blank': True}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contacts_and_people.building': {
'Meta': {'ordering': "('site', 'street', 'number', 'name')", 'object_name': 'Building'},
'access_and_parking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_access_and_parking'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'getting_here': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'getting_here'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'on_delete': 'models.PROTECT', 'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '256'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '17', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Entity', '_ormbases': ['contacts_and_people.EntityLite']},
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_publications_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'building_recapitulates_entity_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contacts_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publications_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'vacancies_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entity'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['cms.Page']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.entitylite': {
'Meta': {'object_name': 'EntityLite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.membership': {
'Meta': {'ordering': "('-importance_to_entity', 'person__surname')", 'object_name': 'Membership'},
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'Meta': {'ordering': "['surname', 'given_name', 'user']", 'object_name': 'Person', '_ormbases': ['contacts_and_people.PersonLite']},
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'people'", 'to': "orm['contacts_and_people.Entity']", 'through': "orm['contacts_and_people.Membership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person_user'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.personlite': {
'Meta': {'object_name': 'PersonLite'},
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'Meta': {'ordering': "('label',)", 'object_name': 'PhoneContact'},
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'Meta': {'ordering': "('country', 'site_name', 'post_town')", 'object_name': 'Site'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.title': {
'Meta': {'ordering': "['title']", 'object_name': 'Title'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'links.externallink': {
'Meta': {'ordering': "['title']", 'object_name': 'ExternalLink'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'blank': 'True'}),
'external_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['links.LinkType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'links.externalsite': {
'Meta': {'ordering': "['domain']", 'object_name': 'ExternalSite'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'publications.authored': {
'Meta': {'object_name': 'Authored'},
'bibliographic_record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authored'", 'to': "orm['publications.BibliographicRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_a_favourite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publication': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authored'", 'to': "orm['publications.Publication']"}),
'researcher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authored'", 'to': "orm['publications.Researcher']"}),
'reverse_sort_cue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'publications.bibliographicrecord': {
'Meta': {'ordering': "['-publication_date']", 'object_name': 'BibliographicRecord'},
'abstract': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'associated_authors': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'authors': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'awarded_date': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'begin_page': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'book_author_type': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'commissioning_body': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'confidential': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'doi': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'edition': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'editors': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'end_page': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'filed_date': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'finish_date': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'first_author': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_at_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'isbn_10': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'isbn_13': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'issn': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'issue': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'journal': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'journal_article_type': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'language': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'last_author': ('django.db.models.fields.TextField', [], {}),
'location': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'medium': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name_of_conference': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'number': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'number_of_authors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'number_of_pages': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'number_of_pieces': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'parent_title': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'patent_number': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'patent_status': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'pii': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'place_of_publication': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'publication': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bibliographic_records'", 'to': "orm['publications.Publication']"}),
'publication_date': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'publication_status': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'publisher': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reference_count': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'series': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'start_date': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'times_cited': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'verification_status': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'version': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'volume': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'publications.bibliourl': {
'Meta': {'object_name': 'BiblioURL'},
'bibliographic_record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['publications.BibliographicRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'publications.publication': {
'Meta': {'object_name': 'Publication'},
'created_when': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified_when': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'needs_refetch': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'new_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'public_dspace_handle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'publications.publicationsplugin': {
'Meta': {'object_name': 'PublicationsPlugin', 'db_table': "'cmsplugin_publicationsplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'publicationsplugin_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'favourites_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "'long'", 'max_length': '25'}),
'group_dates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'heading_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'limit_to': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5', 'null': 'True', 'blank': 'True'}),
'publications_heading_text': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'})
},
'publications.researcher': {
'Meta': {'object_name': 'Researcher'},
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'research_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'person': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.Person']", 'unique': 'True', 'primary_key': 'True'}),
'publishes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symplectic_access': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symplectic_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'symplectic_int_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'synopsis': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'research_synopsis'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'publications.student': {
'Meta': {'object_name': 'Student'},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'programme': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'researcher': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['publications.Researcher']", 'unique': 'True', 'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'student_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'supervisors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publications.Supervisor']", 'through': "orm['publications.Supervision']", 'symmetrical': 'False'}),
'thesis': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'publications.supervision': {
'Meta': {'object_name': 'Supervision'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publications.Student']"}),
'supervisor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publications.Supervisor']"})
},
'publications.supervisor': {
'Meta': {'object_name': 'Supervisor'},
'researcher': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['publications.Researcher']", 'unique': 'True', 'primary_key': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['publications'] | evildmp/arkestra-publications | publications/migrations/0003_auto__chg_field_student_student_id.py | Python | bsd-2-clause | 40,862 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from account.views import ChangePasswordView, SignupView, LoginView
from django.conf.urls import include, url
from django.contrib import admin
from example_thirdparty.forms import SignupFormWithCaptcha
urlpatterns = [
url(r'^admin/', include(admin.site.urls) if django.VERSION < (1, 10) else admin.site.urls),
# aliases to match original django-registration urls
url(r"^accounts/password/$", ChangePasswordView.as_view(),
name="auth_password_change"),
url(r"^accounts/signup/$",
SignupView.as_view(form_class=SignupFormWithCaptcha),
name="registration_register"),
url(r"^accounts/login/$", LoginView.as_view(), name="auth_login"),
url(r'^accounts/', include('account.urls')),
url(r'^captcha/', include('captcha.urls')),
url(r'^', include('pybb.urls', namespace='pybb')),
]
| hovel/pybbm | test/example_thirdparty/example_thirdparty/urls.py | Python | bsd-2-clause | 911 |
import urllib2
import urllib
import os, sys
from bs4 import *
argv = sys.argv[1:]
begin = int(argv[0])
count = int(argv[1])
for i in range(begin, begin+count):
try:
url = 'http://danbooru.donmai.us/posts/' + str(i)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
html = response.read()
soup = BeautifulSoup(html)
relURL = soup.select('#image')[0]['src'].split('/data/')[1]
if 'sample' in relURL:
# Image was too big and thus was resized.
relURL = relURL.split('sample-')[1]
newPath = 'http://danbooru.donmai.us/data/' + relURL
newFile = 'C:\\programming\\vacbooru-master\\dbu\\' + relURL
if not os.path.exists(newFile):
r = urllib.urlopen(newPath).read()
if len(r) > 400:
f = open(newFile,'wb')
f.write(r)
f.close()
print str(i) + " downloaded"
else:
print str(i) + " is a 0 size image"
else:
print str(i) + " already exists"
except Exception as e:
print str(i) + " download failed: " + str(e)
if 'list index out of range' in str(e):
print "\t This is likley a image that needs dbu gold" | EdibleEd/vacbooru | VAB_massdownload.py | Python | bsd-2-clause | 1,305 |
"""
Clone server Model Four
Author: Min RK <[email protected]
"""
import zmq
from kvsimple import KVMsg
# simple struct for routing information for a key-value snapshot
class Route:
def __init__(self, socket, identity, subtree):
self.socket = socket # ROUTER socket to send to
self.identity = identity # Identity of peer who requested state
self.subtree = subtree # Client subtree specification
def send_single(key, kvmsg, route):
"""Send one state snapshot key-value pair to a socket"""
# check front of key against subscription subtree:
if kvmsg.key.startswith(route.subtree):
# Send identity of recipient first
route.socket.send(route.identity, zmq.SNDMORE)
kvmsg.send(route.socket)
def main():
# context and sockets
ctx = zmq.Context()
snapshot = ctx.socket(zmq.ROUTER)
snapshot.bind("tcp://*:5556")
publisher = ctx.socket(zmq.PUB)
publisher.bind("tcp://*:5557")
collector = ctx.socket(zmq.PULL)
collector.bind("tcp://*:5558")
sequence = 0
kvmap = {}
poller = zmq.Poller()
poller.register(collector, zmq.POLLIN)
poller.register(snapshot, zmq.POLLIN)
while True:
try:
items = dict(poller.poll(1000))
except:
break # Interrupted
# Apply state update sent from client
if collector in items:
kvmsg = KVMsg.recv(collector)
sequence += 1
kvmsg.sequence = sequence
kvmsg.send(publisher)
kvmsg.store(kvmap)
print "I: publishing update %5d" % sequence
# Execute state snapshot request
if snapshot in items:
msg = snapshot.recv_multipart()
identity, request, subtree = msg
if request == "ICANHAZ?":
pass
else:
print "E: bad request, aborting\n",
break
# Send state snapshot to client
route = Route(snapshot, identity, subtree)
# For each entry in kvmap, send kvmsg to client
for k,v in kvmap.items():
send_single(k,v,route)
# Now send END message with sequence number
print "Sending state shapshot=%d\n" % sequence,
snapshot.send(identity, zmq.SNDMORE)
kvmsg = KVMsg(sequence)
kvmsg.key = "KTHXBAI"
kvmsg.body = subtree
kvmsg.send(snapshot)
print " Interrupted\n%d messages handled" % sequence
if __name__ == '__main__':
main() | krattai/noo-ebs | docs/zeroMQ-guide2/examples/Python/clonesrv4.py | Python | bsd-2-clause | 2,618 |
'''abduction.py
Base functionality for logical abduction using a knowledge base of definite clauses
Andrew S. Gordon
'''
import itertools
from . import parse
from . import unify
def abduction(obs, kb, maxdepth, skolemize = True):
'''Logical abduction: returns a list of all sets of assumptions that entail the observations given the kb'''
indexed_kb = index_by_consequent_predicate(kb)
res = []
listoflists = [and_or_leaflists([ob], indexed_kb, maxdepth) for ob in obs]
for u in itertools.product(*listoflists):
u = list(itertools.chain.from_iterable(u))
res.extend(crunch(u))
if skolemize:
return [unify.skolemize(r) for r in res]
else:
return res
def index_by_consequent_predicate(kb):
res = {}
for dc in kb:
predicate = parse.consequent(dc)[0]
if predicate in res:
res[predicate].append(dc)
else:
res[predicate] = [dc]
return res
def and_or_leaflists(remaining, indexed_kb, depth, antecedents = [], assumptions = []):
'''Returns list of all entailing sets of leafs in the and-or backchaining tree'''
if depth == 0 and antecedents: # fail
return [] # (empty) list of lists
elif not remaining: # done with this level
if not antecedents: # found one
return [assumptions] # list of lists
else:
return and_or_leaflists(antecedents, indexed_kb, depth - 1, [], assumptions)
else: # more to go on this level
literal = remaining[0] # first of remaining
predicate = literal[0]
if predicate not in indexed_kb:
return and_or_leaflists(remaining[1:], indexed_kb, depth, antecedents, [literal] + assumptions) # shift literal to assumptions
else:
revisions = []
for rule in indexed_kb[predicate]: # indexed by predicate of literal
theta = unify.unify(literal, parse.consequent(rule))
if theta != None:
if depth == 0: # no depth for revision
return [] # (empty) list of lists
revisions.append([unify.subst(theta, remaining[1:]), # new remaining with substitutions
indexed_kb,
depth,
unify.standardize(unify.subst(theta, parse.antecedent(rule))) +
unify.subst(theta, antecedents), # new antecedents with substitutions
unify.subst(theta, assumptions)]) # new assumptions with substitutions
return itertools.chain(*[and_or_leaflists(*rev) for rev in revisions]) # list of lists (if any)
def crunch(conjunction):
'''Returns all possible ways that literals in a conjunction could be unified'''
return [k for k,v in itertools.groupby(sorted(cruncher(conjunction, 0)))] # dedupe solutions
def cruncher(conjunction, idx = 0):
if idx >= len(conjunction) - 1: # last one
return [[k for k,v in itertools.groupby(sorted(conjunction))]] # dedupe literals in solution
else:
res = []
for subsequent in range(idx + 1,len(conjunction)):
theta = unify.unify(conjunction[idx], conjunction[subsequent])
if theta:
new_conjunction = unify.subst(theta,
conjunction[0:subsequent] +
conjunction[(subsequent + 1):len(conjunction)])
res.extend(cruncher(new_conjunction, idx))
res.extend(cruncher(conjunction, idx + 1))
return res
| asgordon/EtcAbductionPy | etcabductionpy/abduction.py | Python | bsd-2-clause | 3,656 |
import redis_url
import unittest
class RedisUrlTestSuite(unittest.TestCase):
def test_redis_parse_localhost(self):
self.assertEqual(
redis_url.parse('redis://localhost:6379/0?cluster=false'),
{
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None
}
)
def test_redis_parse_remote(self):
self.assertEqual(
redis_url.parse('redis://:[email protected]:30001?cluster=false'),
{
'host': 'ec2-192-168-1-1.compute-1.amazon.aws.com',
'port': 30001,
'db': 0,
'password': '138913'
}
)
def test_redis_parse_cluster_localhost(self):
self.assertEqual(
redis_url.parse('redis://localhost:6379?cluster=true'),
{
'host': 'localhost',
'port': 6379,
'password': None
}
)
def test_redis_parse_cluster_skip_full_coverage_check(self):
self.assertEqual(
redis_url.parse('redis://localhost:6379?cluster=true&skip_full_coverage_check=true'),
{
'host': 'localhost',
'port': 6379,
'password': None,
'skip_full_coverage_check': True,
}
)
if __name__ == '__main__':
unittest.main()
| Xopherus/redis-url-py | test_redis_url.py | Python | bsd-2-clause | 1,467 |
# mesa - toolkit for building dynamic python apps with zero downtime
# basis: package is inspected for all instances of specified abc and each added to internal mesa list
# Casa is a mesa obj is instantiated as holder of dynamic obj list, one for each abc type in specified package
# m = mesa.Casa(hideExceptions=False) parameter instructs whether to generate exception on existance of methods to run against abc method list
# Mesa.run('method name') = for methods executes named method against each concrete class in the package, does a check to ensure method name exists in abc
# Mesa.generate('method name') = a generator for functions that emits the results from calls to the specified function name in each concrete class. also checks
#
# house recipes
# event driven messsage-passing based app framework - each casa contains a specific route or flow
# wsgi based simple mvc web framework using 2bit as templating language. single casa for all pages
# DOTO: decide best way to test, some scenarios require file io but no clicks required - simple unit tests
# DOTO: generate is a generator yielding a dictionary of results
# DOTO: check flickoutr and how to dynamically create classes with parameters
# DOTO: auth - way to supply callback for required input fields collection from ui
# DOTO: base.Casa appears to pass it's own instance as self to called module. Unsure what side effects are?
# DOTO: utility interface to implement by client app to take care of input for each specific data type
# DOTO: accompanying Method utility that once required args are declared once, elegant handling
# ie no passing from interface to host back to interface like it is in unit test right now
# TODO: meta methods that build on the basic iterating methods to abstract away iteration from caller
# TODO: check for abc type conformance
# TODO: at minute convention is that dynamic module contains one class of same name. Change to support all/others
# TODO: mesa test suit scenarios:
# build a casa, add class, rebuild casa
# build casa, call method not in abc
# build casa with concrete class not implementing an abc method
| rutherford/mesa | TODO.py | Python | bsd-2-clause | 2,149 |
#!/usr/bin/env python3
from linker import Linker
import htmlPage
import content.index,content.db,content.fincom
# TODO put into config
spbBudgetXlsPath='../spb-budget-xls'
if __name__=='__main__':
linker=Linker('filelists',{
'csv':['csv'],
'xls':['xls'],
'db':['zip','sql','xlsx'],
})
htmlPage.HtmlPage('index.html','Данные бюджета Санкт-Петербурга',content.index.content,linker).write('output/index.html')
htmlPage.HtmlPage('xls.html','Ведомственная структура расходов бюджета Санкт-Петербурга в csv и xls',htmlPage.importContent(spbBudgetXlsPath+'/index.html'),linker).write('output/xls.html')
htmlPage.HtmlPage('db.html','БД и таблицы расходов бюджета Санкт-Петербурга из разных источников',content.db.content,linker).write('output/db.html')
htmlPage.HtmlPage('fincom.html','Что можно найти на сайте Комитета финансов',content.fincom.content,linker).write('output/fincom.html')
| AntonKhorev/BudgetSpb | main.py | Python | bsd-2-clause | 1,072 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# notice, this list of conditions and the following disclaimer in
# * Redistributions in binary form must reproduce the above copyright
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import zmq
import signal
interrupted = False
def signal_handler(signum, frame):
global interrupted
interrupted = True
if __name__ == '__main__':
ctx = zmq.Context()
rep = ctx.socket(zmq.REP)
rep.bind('tcp://*:5555')
print 'reply init sucess ...'
try:
rep.recv()
except KeyboardInterrupt:
print 'W: interrupte received, proceeding ...'
count = 0
signal.signal(signal.SIGINT, signal_handler)
while True:
try:
msg = rep.recv(zmq.DONTWAIT)
except zmq.ZMQError:
pass
count += 1
if interrupted:
print 'W: interrupte received, Killing server ...'
break
rep.close()
| ASMlover/study | zeroMQ/python/interrupt/rep.py | Python | bsd-2-clause | 2,030 |
import datetime
import logging
JRD_TYPES = ('application/json', 'application/xrd+json', 'text/json')
XRD_TYPES = ('application/xrd+xml', 'text/xml')
logger = logging.getLogger("rd")
def _is_str(s):
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def loads(content, content_type):
from rd import jrd, xrd
content_type = content_type.split(";")[0]
if content_type in JRD_TYPES:
logger.debug("loads() loading JRD")
return jrd.loads(content)
elif content_type in XRD_TYPES:
logger.debug("loads() loading XRD")
return xrd.loads(content)
#
# special XRD types
#
class Attribute(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __cmp__(self, other):
return cmp(str(self), str(other))
def __eq__(self, other):
return str(self) == other
def __str__(self):
return "%s=%s" % (self.name, self.value)
class Element(object):
def __init__(self, name, value, attrs=None):
self.name = name
self.value = value
self.attrs = attrs or {}
class Title(object):
def __init__(self, value, lang=None):
self.value = value
self.lang = lang
def __cmp__(self, other):
return cmp(str(self), str(other))
def __eq__(self, other):
return str(self) == str(other)
def __str__(self):
if self.lang:
return "%s:%s" % (self.lang, self.value)
return self.value
class Property(object):
def __init__(self, type_, value=None):
self.type = type_
self.value = value
def __cmp__(self, other):
return cmp(str(self), str(other))
def __eq__(self, other):
return str(self) == other
def __str__(self):
if self.value:
return "%s:%s" % (self.type, self.value)
return self.type
#
# special list types
#
class ListLikeObject(list):
def __setitem__(self, key, value):
value = self.item(value)
super(ListLikeObject, self).__setitem__(key, value)
def append(self, value):
value = self.item(value)
super(ListLikeObject, self).append(value)
def extend(self, values):
values = (self.item(value) for value in values)
super(ListLikeObject, self).extend(values)
class AttributeList(ListLikeObject):
def __call__(self, name):
for attr in self:
if attr.name == name:
yield attr
def item(self, value):
if isinstance(value, (list, tuple)):
return Attribute(*value)
elif not isinstance(value, Attribute):
raise ValueError('value must be an instance of Attribute')
return value
class ElementList(ListLikeObject):
def item(self, value):
if not isinstance(value, Element):
raise ValueError('value must be an instance of Type')
return value
class TitleList(ListLikeObject):
def item(self, value):
if _is_str(value):
return Title(value)
elif isinstance(value, (list, tuple)):
return Title(*value)
elif not isinstance(value, Title):
raise ValueError('value must be an instance of Title')
return value
class LinkList(ListLikeObject):
def __call__(self, rel):
for link in self:
if link.rel == rel:
yield link
def item(self, value):
if not isinstance(value, Link):
raise ValueError('value must be an instance of Link')
return value
class PropertyList(ListLikeObject):
def __call__(self, type_):
for prop in self:
if prop.type == type_:
yield prop
def item(self, value):
if _is_str(value):
return Property(value)
elif isinstance(value, (tuple, list)):
return Property(*value)
elif not isinstance(value, Property):
raise ValueError('value must be an instance of Property')
return value
#
# Link object
#
class Link(object):
def __init__(self, rel=None, type=None, href=None, template=None):
self.rel = rel
self.type = type
self.href = href
self.template = template
self._titles = TitleList()
self._properties = PropertyList()
def get_titles(self):
return self._titles
titles = property(get_titles)
def get_properties(self):
return self._properties
properties = property(get_properties)
#
# main RD class
#
class RD(object):
def __init__(self, xml_id=None, subject=None):
self.xml_id = xml_id
self.subject = subject
self._expires = None
self._aliases = []
self._properties = PropertyList()
self._links = LinkList()
self._signatures = []
self._attributes = AttributeList()
self._elements = ElementList()
# ser/deser methods
def to_json(self):
from rd import jrd
return jrd.dumps(self)
def to_xml(self):
from rd import xrd
return xrd.dumps(self)
# helper methods
def find_link(self, rels, attr=None):
if not isinstance(rels, (list, tuple)):
rels = (rels,)
for link in self.links:
if link.rel in rels:
if attr:
return getattr(link, attr, None)
return link
# custom elements and attributes
def get_elements(self):
return self._elements
elements = property(get_elements)
@property
def attributes(self):
return self._attributes
# defined elements and attributes
def get_expires(self):
return self._expires
def set_expires(self, expires):
if not isinstance(expires, datetime.datetime):
raise ValueError('expires must be a datetime object')
self._expires = expires
expires = property(get_expires, set_expires)
def get_aliases(self):
return self._aliases
aliases = property(get_aliases)
def get_properties(self):
return self._properties
properties = property(get_properties)
def get_links(self):
return self._links
links = property(get_links)
def get_signatures(self):
return self._signatures
signatures = property(get_links)
| jcarbaugh/python-rd | rd/core.py | Python | bsd-3-clause | 6,389 |
"""
keybump
~~~~~~~
manage your versioning like a boss .
:copyright: (c) 2015 by gregorynicholas.
:license: MIT, see LICENSE for more details.
"""
from __future__ import unicode_literals
__version__ = '3.0.1'
| gregorynicholas/keybump | keybump/__init__.py | Python | bsd-3-clause | 223 |
#!/usr/bin/env python3
#
# Copyright 2021 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper script to run arbitrary bash files from GN.
This script should be used only when absolutely necessary and never in a
cross-platform way (that is, it should only be used for an action on a
particular platform, not an platform-independent target).
"""
import logging
import subprocess
import sys
if __name__ == '__main__':
logging_format = '[%(levelname)s:%(filename)s:%(lineno)s] %(message)s'
logging.basicConfig(
level=logging.INFO, format=logging_format, datefmt='%H:%M:%S')
logging.warning('Calling a bash process during GN build. '
'Avoid doing this whenever possible.')
sys.exit(subprocess.call(sys.argv[1:]))
| youtube/cobalt | starboard/build/run_bash.py | Python | bsd-3-clause | 1,290 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Face.district_id'
db.add_column(u'faces_face', 'district_id',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['faces.District'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Face.district_id'
db.delete_column(u'faces_face', 'district_id_id')
models = {
'album.imagecollection': {
'Meta': {'object_name': 'ImageCollection'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'})
},
'album.imagecollectionimage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'ImageCollectionImage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'file': ('mezzanine.core.fields.FileField', [], {'max_length': '200'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['album.ImageCollection']"}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'faces.district': {
'Meta': {'object_name': 'District'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'district_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'faces.face': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Face'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'district_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['faces.District']", 'null': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['album.ImageCollection']"}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_pinned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'})
},
'faces.faceimage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'FaceImage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'face': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['faces.Face']"}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'face_image'", 'to': "orm['album.ImageCollectionImage']"}),
'image_file': ('mezzanine.core.fields.FileField', [], {'max_length': '200', 'null': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_pinned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['faces'] | RuralIndia/pari | pari/faces/migrations/0006_auto__add_field_face_district_id.py | Python | bsd-3-clause | 11,120 |
"""
A module of restricted Boltzmann machine (RBM) modified
from the Deep Learning Tutorials (www.deeplearning.net/tutorial/).
Copyright (c) 2008-2013, Theano Development Team All rights reserved.
Modified by Yifeng Li
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: [email protected]
"""
from __future__ import division
import time
import math
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import classification as cl
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(self, input=None, n_visible=784, n_hidden=500, \
W=None, hbias=None, vbias=None, numpy_rng=None,
theano_rng=None):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='hbias', borrow=True)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
name='vbias', borrow=True)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
# udpate is a dictionary type, updates of values of shared variables
# including model parameters and persistent chain
[pre_sigmoid_nvs, nv_means, nv_samples,
pre_sigmoid_nhs, nh_means, nh_samples], updates = \
theano.scan(self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k)
# determine gradients on RBM parameters
# not that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(chain_end))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
# update is a dictionary, add the parameter update dictionary items
updates[param] = param - gparam * T.cast(lr,
dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates,
pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
of log(sigmoid(..)) and will not apply the wanted
optimization. We can not go and replace the sigmoid in scan
with something else also, because this only needs to be done
on the last step. Therefore the easiest and more efficient way
is to get also the pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan such
that Theano can catch and optimize the expression.
"""
cross_entropy = T.mean(
T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +
(1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),
axis=1))
return cross_entropy
def train_model(rng=numpy.random.RandomState(100), train_set_x_org=None, n_hidden=100,
learning_rate=0.1, training_epochs=100, batch_size=100, persistent_chain_k=15):
"""
Train a RBM model given training data.
INPUTS:
rng: numpy random number state.
train_set_x_org: numpy 2d array, each row is a training sample.
n_hidden, int, number of hidden units.
learning_rate: float scalar, the initial learning rate.
training_epochs: int scalar, the maximal number of epochs.
batch_size: int scalar, minibatch size.
persistent_chain_k: length of persistent chain from the last sampling to new sampling.
OUTPUTS:
rbm: object of RBM. The model learned.
mean_hidden: numpy 2d array, each row is a reduced training sample.
training_time: training time.
"""
train_set_x = theano.shared(numpy.asarray(train_set_x_org,dtype=theano.config.floatX),borrow=True)
n_train_batches = int(math.ceil(train_set_x.get_value(borrow=True).shape[0] / batch_size))
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
# shared variable to reduce the learning rate
learning_rate_shared=theano.shared(learning_rate,name='learn_rate_shared')
# learning_rate_init=T.scalar(name='learning_rate_init',dtype=theano.config.floatX)
# epoch_variable=T.iscalar(name='epoch_variable')
decay_rate=T.scalar(name='decay_rate',dtype=theano.config.floatX)
# compute_learn_rate=theano.function([learning_rate_init,epoch_variable,decay_rate],learning_rate_shared, \
# updates=[(learning_rate_shared,learning_rate_init*decay_rate**(epoch_variable//100))]) # thenao does not support math.pow, instead use T.pow() or a**b
reduce_learning_rate=theano.function([decay_rate],learning_rate_shared,updates=[(learning_rate_shared,learning_rate_shared*decay_rate)])
n_visible=train_set_x_org.shape[1] # number of input features
theano_rng = RandomStreams(rng.randint(2 ** 30))
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
dtype=theano.config.floatX),
borrow=True)
# construct the RBM class
rbm = RBM(input=x, n_visible=n_visible,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = rbm.get_cost_updates(lr=learning_rate,persistent=persistent_chain,k=persistent_chain_k)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm_one_iteration = theano.function([index], cost, updates=updates,
givens={x: train_set_x[index * batch_size:(index + 1) * batch_size]},
name='train_rbm')
# optimization, gradient descent
max_num_epoch_change_learning_rate=100
max_num_epoch_not_improve=2*max_num_epoch_change_learning_rate
max_num_epoch_change_rate=0.8
epoch_change_count=0
best_cost=numpy.inf
# train the model using training set
start_time=time.clock()
for epoch in xrange(training_epochs):
c=[] # costs of all minibatches of this epoch
epoch_change_count=epoch_change_count+1
if epoch_change_count % max_num_epoch_change_learning_rate ==0:
reduce_learning_rate(0.5)
max_num_epoch_change_learning_rate= \
cl.change_max_num_epoch_change_learning_rate(max_num_epoch_change_learning_rate,max_num_epoch_change_rate)
max_num_epoch_not_improve=2*max_num_epoch_change_learning_rate
epoch_change_count=0
for batch_index in xrange(n_train_batches):
c_batch=train_rbm_one_iteration(batch_index)
c.append(c_batch)
this_cost=numpy.mean(c)
print 'Training eopch: %d, cost: %f' % (epoch,this_cost)
if this_cost<best_cost:
best_cost=this_cost
num_epoch_not_improve=0
if this_cost>=best_cost:
num_epoch_not_improve=num_epoch_not_improve+1
if num_epoch_not_improve>=max_num_epoch_not_improve:
break
end_time=time.clock()
training_time=end_time-start_time
print 'Training time: %f' %(training_time/60)
# return the trained model and the reduced training set
extracted=rbm.propup(train_set_x)
get_extracted=theano.function([],extracted)
pre_activation,mean_hidden=get_extracted()
return rbm, mean_hidden, training_time
def test_model(model_trained,test_set_x_org=None):
"""
Get the reduced data using the model learned.
INPUTS:
model_trained: object of RBM, RBM model learned.
test_set_x_org: numpy 2d array, each row is a sample.
OUTPUTS:
mean_hidden: numpy 2d array, the reduced data.
"""
test_set_x=theano.shared(numpy.asarray(test_set_x_org,dtype=theano.config.floatX),borrow=True)
extracted=model_trained.propup(test_set_x)
get_extracted=theano.function([],extracted)
pre_activation,mean_hidden=get_extracted()
return mean_hidden
def sample_model(rng,model_trained,test_set_x_org=None,n_chains=20,n_samples=10,sample_gap=1000):
"""
Sample from the trained RBM given some actual examples to initialize the algorithm.
INPUTS:
rng: numpy random number state.
model_trained: object of RBM, RBM model learned.
test_set_x_org: numpy 2d array, each row is a actual example.
n_chains: number of Gibbs chains to be sampled indepently.
n_samples: int, number of samples to be taking in each chain.
A sample is taken every "sample_gap" steps.
sample_gap: int, steps of Gibbs sampling before taking samples.
OUTPUTS:
samples_vis: numpy array of n_samples X n_chains X num_visible_units,
sampled samples.
samples_vis_mf: numpy array of n_samples X n_chains X num_visible_units,
mean fields of sampled samples.
"""
test_set_x=theano.shared(numpy.asarray(test_set_x_org,dtype=theano.config.floatX),borrow=True)
number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]
# pick random test examples, with which to initialize the persistent chain
test_idx = rng.randint(number_of_test_samples - n_chains)
persistent_vis_chain = theano.shared(numpy.asarray(
test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains],
dtype=theano.config.floatX))
# sampling
[presig_hids, hid_mfs, hid_samples, presig_vis,
vis_mfs, vis_samples], updates = \
theano.scan(model_trained.gibbs_vhv,
outputs_info=[None, None, None, None,
None, persistent_vis_chain],
n_steps=sample_gap)
# add to updates the shared variable that takes care of our persistent
# chain :.
updates.update({persistent_vis_chain: vis_samples[-1]})
# construct the function that implements our persistent chain.
# we generate the "mean field" activations for plotting and the actual
# samples for reinitializing the state of our persistent chain
sample_fn = theano.function([], [vis_mfs[-1], vis_samples[-1]],
updates=updates,
name='sample_fn')
# sample n_samples here
samples_vis=numpy.zeros((n_samples,n_chains,model_trained.n_visible),dtype=test_set_x_org.dtype)
samples_vis_mf=samples_vis
for idx in xrange(n_samples):
vis_mf, vis_sample = sample_fn()
samples_vis[idx,:,:]=vis_sample
samples_vis_mf[idx,:,:]=vis_mf
return samples_vis, samples_vis_mf
| yifeng-li/DECRES | rbm.py | Python | bsd-3-clause | 22,163 |
from __future__ import absolute_import
from collections import namedtuple
from django.conf import settings
from sentry.utils.dates import to_datetime
from sentry.utils.services import LazyServiceWrapper
from .backends.base import Backend # NOQA
from .backends.dummy import DummyBackend # NOQA
backend = LazyServiceWrapper(Backend, settings.SENTRY_DIGESTS,
settings.SENTRY_DIGESTS_OPTIONS,
(DummyBackend,))
backend.expose(locals())
class Record(namedtuple('Record', 'key value timestamp')):
@property
def datetime(self):
return to_datetime(self.timestamp)
ScheduleEntry = namedtuple('ScheduleEntry', 'key timestamp')
OPTIONS = frozenset((
'increment_delay',
'maximum_delay',
'minimum_delay',
))
def get_option_key(plugin, option):
assert option in OPTIONS
return 'digests:{}:{}'.format(plugin, option)
| JackDanger/sentry | src/sentry/digests/__init__.py | Python | bsd-3-clause | 910 |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: William Baker
#
# This test is used to ensure planning with a MoveGroupInterface is
# possbile if the robot's move_group node is in a different namespace
import unittest
import numpy as np
import rospy
import rostest
import os
from moveit_ros_planning_interface._moveit_move_group_interface import MoveGroupInterface
class PythonMoveGroupNsTest(unittest.TestCase):
PLANNING_GROUP = "manipulator"
PLANNING_NS = "test_ns/"
@classmethod
def setUpClass(self):
self.group = MoveGroupInterface(self.PLANNING_GROUP, "%srobot_description"%self.PLANNING_NS, self.PLANNING_NS)
@classmethod
def tearDown(self):
pass
def check_target_setting(self, expect, *args):
if len(args) == 0:
args = [expect]
self.group.set_joint_value_target(*args)
res = self.group.get_joint_value_target()
self.assertTrue(np.all(np.asarray(res) == np.asarray(expect)),
"Setting failed for %s, values: %s" % (type(args[0]), res))
def test_target_setting(self):
n = self.group.get_variable_count()
self.check_target_setting([0.1] * n)
self.check_target_setting((0.2,) * n)
self.check_target_setting(np.zeros(n))
self.check_target_setting([0.3] * n, {name: 0.3 for name in self.group.get_active_joints()})
self.check_target_setting([0.5] + [0.3]*(n-1), "joint_1", 0.5)
def plan(self, target):
self.group.set_joint_value_target(target)
return self.group.compute_plan()
def test_validation(self):
current = np.asarray(self.group.get_current_joint_values())
plan1 = self.plan(current + 0.2)
plan2 = self.plan(current + 0.2)
# first plan should execute
self.assertTrue(self.group.execute(plan1))
# second plan should be invalid now (due to modified start point) and rejected
self.assertFalse(self.group.execute(plan2))
# newly planned trajectory should execute again
plan3 = self.plan(current)
self.assertTrue(self.group.execute(plan3))
if __name__ == '__main__':
PKGNAME = 'moveit_ros_planning_interface'
NODENAME = 'moveit_test_python_move_group'
rospy.init_node(NODENAME)
rostest.rosrun(PKGNAME, NODENAME, PythonMoveGroupNsTest)
| davetcoleman/moveit | moveit_ros/planning_interface/test/python_move_group_ns.py | Python | bsd-3-clause | 3,928 |
"""
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
from __future__ import division, absolute_import, print_function
import datetime
import sys
import operator
import warnings
import pytest
import shutil
import tempfile
import numpy as np
from numpy.testing import (
assert_raises, assert_warns, assert_, assert_array_equal
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
try:
import pytz
_has_pytz = True
except ImportError:
_has_pytz = False
class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
warning_cls = DeprecationWarning
def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=np._NoValue,
args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
function : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_others : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfully.
args : tuple
Arguments for `function`
kwargs : dict
Keyword arguments for `function`
"""
# reset the log
self.log[:] = []
if exceptions is np._NoValue:
exceptions = (self.warning_cls,)
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected %s but got: %s" %
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test that warnings are not raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
class TestNonTupleNDIndexDeprecation(object):
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_warns(FutureWarning, a.__getitem__, [slice(None)])
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_raises(FutureWarning, a.__getitem__, [slice(None)])
# a a[[0, 1]] always was advanced indexing, so no error/warning
a[[0, 1]]
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# Element comparison error (numpy array can't be compared).
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, and this used to pass due
# to dtype mismatch; now (2018-06-21) it correctly leads to a
# FutureWarning.
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray(object):
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
struct1 = np.zeros(2, dtype="i4,i4")
struct2 = np.zeros(2, dtype="i4,i4,i4")
assert_warns(FutureWarning, lambda: struct1 == 1)
assert_warns(FutureWarning, lambda: struct1 == struct2)
assert_warns(FutureWarning, lambda: struct1 != 1)
assert_warns(FutureWarning, lambda: struct1 != struct2)
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@pytest.mark.skipif(not _has_pytz,
reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""View of non-C-contiguous arrays deprecated in 1.11.0.
The deprecation will not be raised for arrays that are both C and F
contiguous, as C contiguous is dominant. There are more such arrays
with relaxed stride checking than without so the deprecation is not
as visible with relaxed stride checking in force.
"""
def test_fortran_contiguous(self):
self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
"""Invalid arguments to the ORDER parameter in array.flatten() should not be
allowed and should raise an error. However, in the interests of not breaking
code that may inadvertently pass invalid arguments to this parameter, a
DeprecationWarning will be issued instead for the time being to give developers
time to refactor relevant code.
"""
def test_flatten_array_non_string_arg(self):
x = np.zeros((3, 5))
self.message = ("Non-string object detected for "
"the array ordering. Please pass "
"in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=(np.pi,))
def test_flatten_array_invalid_string_arg(self):
# Tests that a DeprecationWarning is raised
# when a string of length greater than one
# starting with "C", "F", "A", or "K" (case-
# and unicode-insensitive) is passed in for
# the ORDER parameter. Otherwise, a TypeError
# will be raised!
x = np.zeros((3, 5))
self.message = ("Non length-one string passed "
"in for the array ordering. Please "
"pass in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=("FACK",))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
"""Argument to the num parameter in linspace that cannot be
safely interpreted as an integer is deprecated in 1.12.0.
Argument to the num parameter in linspace that cannot be
safely interpreted as an integer should not be allowed.
In the interest of not breaking code that passes
an argument that could still be interpreted as an integer, a
DeprecationWarning will be issued for the time being to give
developers time to refactor relevant code.
"""
def test_float_arg(self):
# 2016-02-25, PR#7328
self.assert_deprecated(np.linspace, args=(0, 10, 2.5))
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
is now considered unsafe from a user perspective and will raise an error in the future.
"""
def test_insufficient_width_positive(self):
args = (10,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
def test_insufficient_width_negative(self):
args = (-5,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
class TestNumericStyleTypecodes(_DeprecationTestCase):
"""
Deprecate the old numeric-style dtypes, which are especially
confusing for complex types, e.g. Complex32 -> complex64. When the
deprecation cycle is complete, the check for the strings should be
removed from PyArray_DescrConverter in descriptor.c, and the
deprecated keys should not be added as capitalized aliases in
_add_aliases in numerictypes.py.
"""
def test_all_dtypes(self):
deprecated_types = [
'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
]
if sys.version_info[0] < 3:
deprecated_types.extend(['Unicode0', 'String0'])
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
args=(dt,))
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
test_case_instance.teardown()
class TestClassicIntDivision(_DeprecationTestCase):
"""
See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
if used for division
List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
def test_int_dtypes(self):
#scramble types and do some mix and match testing
deprecated_types = [
'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
'intp', 'int64', 'uint32', 'int16'
]
if sys.version_info[0] < 3 and sys.py3kwarning:
import operator as op
dt2 = 'bool_'
for dt1 in deprecated_types:
a = np.array([1,2,3], dtype=dt1)
b = np.array([1,2,3], dtype=dt2)
self.assert_deprecated(op.div, args=(a,b))
dt2 = dt1
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
which conflicts with the error behavior of np.conjugate.
"""
def test_conjugate(self):
for a in np.array(5), np.array(5j):
self.assert_not_deprecated(a.conjugate)
for a in (np.array('s'), np.array('2016', 'M'),
np.array((1, 2), [('a', int), ('b', int)])):
self.assert_deprecated(a.conjugate)
class TestNPY_CHAR(_DeprecationTestCase):
# 2017-05-03, 1.13.0
def test_npy_char_deprecation(self):
from numpy.core._multiarray_tests import npy_char_deprecation
self.assert_deprecated(npy_char_deprecation)
assert_(npy_char_deprecation() == 'S1')
class TestPyArray_AS1D(_DeprecationTestCase):
def test_npy_pyarrayas1d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
class TestPyArray_AS2D(_DeprecationTestCase):
def test_npy_pyarrayas2d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
WRITEBACKIFCOPY instead
"""
def test_npy_updateifcopy_deprecation(self):
from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
arr = np.arange(9).reshape(3, 3)
v = arr.T
self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
for cls in (np.datetime64, np.timedelta64):
# two valid uses - (unit, num) and (unit, num, den, None)
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
# trying to use the event argument, removed in 1.7.0, is deprecated
# it used to be a uint8
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
# 2017-09-25, 1.14.0
message = '.*truth value of an empty array is ambiguous.*'
def test_1d(self):
self.assert_deprecated(bool, args=(np.array([]),))
def test_2d(self):
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
class TestBincount(_DeprecationTestCase):
# 2017-06-01, 1.14.0
def test_bincount_minlength(self):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
class TestAlen(_DeprecationTestCase):
# 2019-08-02, 1.18.0
def test_alen(self):
self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
class TestSctypeNA(_VisibleDeprecationTestCase):
# 2018-06-24, 1.16
def test_sctypeNA(self):
self.assert_deprecated(lambda: np.sctypeNA['?'])
self.assert_deprecated(lambda: np.typeNA['?'])
self.assert_deprecated(lambda: np.typeNA.get('?'))
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
# 2019-06-08, 1.17.0
# Tests should be moved to real tests when deprecation is done.
message = "string or file could not be read to its end"
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_data_file(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with tempfile.TemporaryFile(mode="w") as f:
x.tofile(f, sep=',', format='%.2f')
f.write(invalid_str)
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
# Should not raise:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
f.seek(0)
res = np.fromfile(f, sep=",", count=4)
assert_array_equal(res, x)
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_string(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
x_str = "1.51,2,3.51,4{}".format(invalid_str)
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
# The C-level API can use not fixed size, but 0 terminated strings,
# so test that as well:
bytestr = x_str.encode("ascii")
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
with assert_warns(DeprecationWarning):
# this is slightly strange, in that fromstring leaves data
# potentially uninitialized (would be good to error when all is
# read, but count is larger then actual data maybe).
res = np.fromstring(x_str, sep=",", count=5)
assert_array_equal(res[:-1], x)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# Should not raise:
res = np.fromstring(x_str, sep=",", count=4)
assert_array_equal(res, x)
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
assert_raises(ValueError, np.set_numeric_ops, add='abc')
class TestShape1Fields(_DeprecationTestCase):
warning_cls = FutureWarning
# 2019-05-20, 1.17.0
def test_shape_1_fields(self):
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
class TestNonZero(_DeprecationTestCase):
# 2019-05-26, 1.17.0
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
| MSeifert04/numpy | numpy/core/tests/test_deprecations.py | Python | bsd-3-clause | 24,541 |
__author__ = 'Bohdan Mushkevych'
from bson import ObjectId
from threading import RLock
from db.model.raw_data import *
from db.model.site_statistics import SiteStatistics
from synergy.db.manager import ds_manager
from synergy.system.decorator import thread_safe
class SiteDao(object):
""" Thread-safe Data Access Object for site_XXX table/collection """
def __init__(self, logger):
super(SiteDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, collection_name, domain_name, timeperiod):
collection = self.ds.connection(collection_name)
query = {DOMAIN_NAME: domain_name,
TIMEPERIOD: timeperiod}
document = collection.find_one(query)
if document is None:
raise LookupError('MongoDB has no site record in %s for (%s, %s)'
% (collection_name, domain_name, timeperiod))
return SiteStatistics.from_json(document)
@thread_safe
def update(self, collection_name, instance, is_safe):
""" method finds Site Statistics record and update it DB representation """
assert isinstance(instance, SiteStatistics)
collection = self.ds.connection(collection_name)
document = instance.document
if instance.db_id:
document['_id'] = ObjectId(instance.db_id)
instance.db_id = collection.save(document, safe=is_safe)
return instance.db_id
| eggsandbeer/scheduler | db/dao/site_dao.py | Python | bsd-3-clause | 1,526 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pyLightcurve',
version='0.1.0',
description='A python package for handling (potentially multispectral) time series observation data in astronomy.',
long_description=readme + '\n\n' + history,
author='Daniel Williams',
author_email='[email protected]',
url='https://github.com/transientlunatic/lightcurve',
packages=[
'pylightcurve',
],
package_dir={'pylightcurve':
'pylightcurve'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='pylightcurve',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
) | transientlunatic/pylightcurve | setup.py | Python | bsd-3-clause | 1,562 |
import sys
import time
import logging
import threading
import GPy
import numpy as np
import matplotlib.pyplot as plt
import pdb
from GPhelpers import *
from IPython.display import display
from poap.strategy import FixedSampleStrategy
from poap.strategy import InputStrategy
from poap.tcpserve import ThreadedTCPServer
from poap.tcpserve import SimpleSocketWorker
from scipy.stats import norm
class GPsim:
def __init__(self, batchsize=100, prunerate=.2, timebound=10, money=1000, fevalcost=1):
self.batchsize = batchsize
self.prunerate = prunerate
self.timebound = timebound
self.money = money
self.fevalcost = fevalcost
def run(self, f, bounds):
breakcond = 1e-5
# run initial batch, deduct money
self.money = self.money - self.batchsize*self.fevalcost
eval_logX = np.random.uniform(bounds[0], bounds[1], self.batchsize)
eval_logY = f(eval_logX)
ybest = np.amin(eval_logY)
while(self.money > 0):
# calc Gaussian Process
m = calcGP(eval_logX, eval_logY)
# calc batchsize, break if necessary
self.batchsize = np.floor(self.batchsize*(1-self.prunerate))
if(self.batchsize < 2):
print "Batch Size reached Minimum"
break
# Deduct Money, evaluate new batch
self.money = self.money - self.batchsize*self.fevalcost
X = batchNewEvals_EI(m, bounds=1, batchsize=self.batchsize, fidelity=1000)
Y = f(X)
eval_logY = np.concatenate([eval_logY, Y])
eval_logX = np.concatenate([eval_logX, X])
ynew = np.amin(eval_logY)
if(np.absolute(ynew - ybest) < breakcond):
print "Break Condition Reached, Improvement Halted"
print "Num evals:", eval_logY.size
break
plotGP(m)
print | ericlee0803/surrogate-GCP | gp/GPsim.py | Python | bsd-3-clause | 1,664 |
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
from . RunnerBase import RunnerBaseClass
from .. Analysers.GPUVerify import GPUVerifyAnalyser
import logging
import os
import psutil
import re
import sys
import yaml
_logger = logging.getLogger(__name__)
class GPUVerifyRunnerException(Exception):
def __init__(self, msg):
self.msg = msg
class GPUVerifyRunner(RunnerBaseClass):
softTimeoutDiff = 5
def __init__(self, boogieProgram, workingDirectory, rc):
_logger.debug('Initialising {}'.format(boogieProgram))
super(GPUVerifyRunner, self).__init__(boogieProgram, workingDirectory, rc)
# Sanity checks
# TODO
self.softTimeout = self.maxTimeInSeconds
if self.maxTimeInSeconds > 0:
# We use GPUVerify's timeout function and enforce the
# requested timeout and enforce a hard timeout slightly later
self.maxTimeInSeconds = self.maxTimeInSeconds + self.softTimeoutDiff
if not self.toolPath.endswith('.py'):
raise GPUVerifyRunnerException(
'toolPath needs to be the GPUVerify python script')
@property
def name(self):
return "gpuverify"
def _buildResultDict(self):
results = super(GPUVerifyRunner, self)._buildResultDict()
# TODO: Remove this. It's now redundant
results['hit_hard_timeout'] = results['backend_timeout']
return results
def GetNewAnalyser(self, resultDict):
return GPUVerifyAnalyser(resultDict)
def run(self):
# Run using python interpreter
cmdLine = [ sys.executable, self.toolPath ]
cmdLine.append('--timeout={}'.format(self.softTimeout))
# Note we ignore self.entryPoint
_logger.info('Ignoring entry point {}'.format(self.entryPoint))
# GPUVerify needs PATH environment variable set
env = {}
path = os.getenv('PATH')
if path == None:
path = ""
env['PATH'] = path
cmdLine.extend(self.additionalArgs)
# Add the boogie source file as last arg
cmdLine.append(self.programPathArgument)
backendResult = self.runTool(cmdLine,
isDotNet=False,
envExtra=env)
if backendResult.outOfTime:
_logger.warning('GPUVerify hit hard timeout')
def get():
return GPUVerifyRunner
| symbooglix/boogie-runner | BoogieRunner/Runners/GPUVerify.py | Python | bsd-3-clause | 2,173 |
import re
from collections import namedtuple
from typing import Optional
from esteid import settings
from esteid.constants import Languages
from esteid.exceptions import InvalidIdCode, InvalidParameter
from esteid.signing.types import InterimSessionData
from esteid.types import PredictableDict
from esteid.validators import id_code_ee_is_valid
PHONE_NUMBER_REGEXP = settings.MOBILE_ID_PHONE_NUMBER_REGEXP
AuthenticateResult = namedtuple(
"AuthenticateResult",
[
"session_id",
"hash_type",
"hash_value",
"verification_code",
"hash_value_b64",
],
)
AuthenticateStatusResult = namedtuple(
"AuthenticateStatusResult",
[
"certificate", # DER-encoded certificate
"certificate_b64", # Base64-encoded DER-encoded certificate
],
)
SignResult = namedtuple(
"SignResult",
[
"session_id",
"digest",
"verification_code",
],
)
# Note: MobileID doesn't return a certificate for SignStatus. It is set from a previous call to `/certificate`
SignStatusResult = namedtuple(
"SignStatusResult",
[
"signature",
"signature_algorithm",
"certificate",
],
)
class UserInput(PredictableDict):
phone_number: str
id_code: str
language: Optional[str]
def is_valid(self, raise_exception=True):
result = super().is_valid(raise_exception=raise_exception)
if result:
if not self.phone_number or PHONE_NUMBER_REGEXP and not re.match(PHONE_NUMBER_REGEXP, self.phone_number):
if not raise_exception:
return False
raise InvalidParameter(param="phone_number")
if not id_code_ee_is_valid(self.id_code):
if not raise_exception:
return False
raise InvalidIdCode
if not (self.get("language") and self.language in Languages.ALL):
self.language = settings.MOBILE_ID_DEFAULT_LANGUAGE
return result
class MobileIdSessionData(InterimSessionData):
session_id: str
| thorgate/django-esteid | esteid/mobileid/types.py | Python | bsd-3-clause | 2,083 |
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
datas = collect_data_files('vispy')
| informatics-isi-edu/synspy | hook-vispy.py | Python | bsd-3-clause | 111 |
from typing import Callable, Iterable, List, Optional
import os
import numpy as np
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def image_reader(prefix="",
pad_w: Optional[int] = None,
pad_h: Optional[int] = None,
rescale_w: bool = False,
rescale_h: bool = False,
keep_aspect_ratio: bool = False,
mode: str = 'RGB') -> Callable:
"""Get a reader of images loading them from a list of pahts.
Args:
prefix: Prefix of the paths that are listed in a image files.
pad_w: Width to which the images will be padded/cropped/resized.
pad_h: Height to with the images will be padded/corpped/resized.
rescale_w: If true, image is rescaled to have given width. It is
cropped/padded otherwise.
rescale_h: If true, image is rescaled to have given height. It is
cropped/padded otherwise.
keep_aspect_ratio: Flag whether the aspect ration should be kept during
rescaling. Can only be used if both width and height are rescaled.
mode: Scipy image loading mode, see scipy documentation for more
details.
Returns:
The reader function that takes a list of image paths (relative to
provided prefix) and returns a list of images as numpy arrays of shape
pad_h x pad_w x number of channels.
"""
if not rescale_w and not rescale_h and keep_aspect_ratio:
raise ValueError(
"It does not make sense to keep the aspect ratio while not "
"rescaling the image.")
if rescale_w != rescale_h and not keep_aspect_ratio:
raise ValueError(
"While rescaling only one side, aspect ratio must be kept, "
"was set to false.")
def load(list_files: List[str]) -> Iterable[np.ndarray]:
for list_file in list_files:
with open(list_file) as f_list:
for i, image_file in enumerate(f_list):
path = os.path.join(prefix, image_file.rstrip())
if not os.path.exists(path):
raise Exception(
("Image file '{}' no."
"{} does not exist.").format(path, i + 1))
try:
image = Image.open(path).convert(mode)
except IOError:
image = Image.new(mode, (pad_w, pad_h))
image = _rescale_or_crop(image, pad_w, pad_h,
rescale_w, rescale_h,
keep_aspect_ratio)
image_np = np.array(image)
if len(image_np.shape) == 2:
channels = 1
image_np = np.expand_dims(image_np, 2)
elif len(image_np.shape) == 3:
channels = image_np.shape[2]
else:
raise ValueError(
("Image should have either 2 (black and white) "
"or three dimensions (color channels), has {} "
"dimension.").format(len(image_np.shape)))
yield _pad(image_np, pad_w, pad_h, channels)
return load
def imagenet_reader(prefix: str,
target_width: int = 227,
target_height: int = 227) -> Callable:
"""Load and prepare image the same way as Caffe scripts."""
def load(list_files: List[str]) -> Iterable[np.ndarray]:
for list_file in list_files:
with open(list_file) as f_list:
for i, image_file in enumerate(f_list):
path = os.path.join(prefix, image_file.rstrip())
if not os.path.exists(path):
raise Exception(
"Image file '{}' no. {} does not exist."
.format(path, i + 1))
image = Image.open(path).convert('RGB')
width, height = image.size
if width == height:
_rescale_or_crop(image, target_width, target_height,
True, True, False)
elif height < width:
_rescale_or_crop(
image,
int(width * float(target_height) / height),
target_height, True, True, False)
else:
_rescale_or_crop(
image, target_width,
int(height * float(target_width) / width),
True, True, False)
cropped_image = _crop(image, target_width, target_height)
res = _pad(np.array(cropped_image),
target_width, target_height, 3)
assert res.shape == (target_width, target_height, 3)
yield res
return load
def _rescale_or_crop(image: Image.Image, pad_w: int, pad_h: int,
rescale_w: bool, rescale_h: bool,
keep_aspect_ratio: bool) -> Image.Image:
"""Rescale and/or crop the image based on the rescale configuration."""
orig_w, orig_h = image.size
if orig_w == pad_w and orig_h == pad_h:
return image
if rescale_w and rescale_h and not keep_aspect_ratio:
image.thumbnail((pad_w, pad_h))
elif rescale_w and rescale_h and keep_aspect_ratio:
ratio = min(pad_h / orig_h, pad_w / orig_h)
image.thumbnail((int(orig_w * ratio), int(orig_h * ratio)))
elif rescale_w and not rescale_h:
orig_w, orig_h = image.size
if orig_w != pad_w:
ratio = pad_w / orig_w
image.thumbnail((pad_w, int(orig_h * ratio)))
elif rescale_h and not rescale_w:
orig_w, orig_h = image.size
if orig_h != pad_h:
ratio = pad_h / orig_h
image.thumbnail((int(orig_w * ratio), pad_h))
return _crop(image, pad_w, pad_h)
def _crop(image: Image.Image, pad_w: int, pad_h: int) -> Image.Image:
orig_w, orig_h = image.size
w_shift = max(orig_w - pad_w, 0) // 2
h_shift = max(orig_h - pad_h, 0) // 2
even_w = max(orig_w - pad_w, 0) % 2
even_h = max(orig_h - pad_h, 0) % 2
return image.crop(
(w_shift, h_shift, orig_w - w_shift - even_w,
orig_h - h_shift - even_h))
def _pad(image: np.ndarray, pad_w: int, pad_h: int,
channels: int) -> np.ndarray:
img_h, img_w = image.shape[:2]
image_padded = np.zeros((pad_h, pad_w, channels))
image_padded[:img_h, :img_w, :] = image
return image_padded
| bastings/neuralmonkey | neuralmonkey/readers/image_reader.py | Python | bsd-3-clause | 6,848 |
import SCPI
import time
import numpy
totalSamples = 10
sampleFreq = 100
#freq= SCPI.SCPI("172.17.5.121")
dmm = SCPI.SCPI("172.17.5.131")
#setup freq gen
#freq.setSquare()
#freq.setVoltage(0,3)
#freq.setFrequency(sampleFreq)
#setup voltage meter
#dmm.setVoltageDC("10V", "MAX")
# set external trigger
#dmm.setTriggerSource("INT")
#dmm.setTriggerCount(str(totalSamples))
# wait for trigger
dmm.setInitiate()
dmm.setCurrentDC("500mA", "MAX")
dmm.setTriggerSource("INT")
dmm.setTriggerCount(str(totalSamples))
dmm.setInitiate()
time.sleep(1)
#freq.setOutput(1)
currentMeasurements = []
#voltageMeasurements = []
while 1:
if len(currentMeasurements) < totalSamples:
currentMeasurements += dmm.getMeasurements()
if (len(currentMeasurements) >= totalSamples):
break
time.sleep(0.1)
#freq.setOutput(0)
s = 0
for i in range(0, totalSamples):
print float(currentMeasurements[i])
#print "Average Power Consumption: ", s/float(totalSamples), "W avg volt: ", numpy.mean(voltageMeasurements), "V avg current: ", numpy.mean(currentMeasurements), "A"
| nesl/SCPI-Scripts | kei/ag-lib/power_measurement_suite_new.py | Python | bsd-3-clause | 1,078 |
from fanstatic import Library, Resource
import js.jquery
library = Library('jquery.socialshareprivacy', 'resources')
css = Resource(library, 'socialshareprivacy/socialshareprivacy.css')
socialshareprivacy = Resource(library, 'jquery.socialshareprivacy.js',
minified='jquery.socialshareprivacy.min.js',
depends=[js.jquery.jquery, css])
| zerobuzz/js.socialshareprivacy | js/socialshareprivacy/__init__.py | Python | bsd-3-clause | 398 |
#!/usr/bin/python2.6
import sys, os, cinesync
if len(sys.argv) == 1:
print >>sys.stderr, 'Usage: %s <file.mov> ...' % sys.argv[0]
sys.exit(1)
# Create the session and add media from command-line arguments
session = cinesync.Session()
session.media = [cinesync.MediaFile(path) for path in sys.argv[1:]]
# Ask cineSync to add the session to its current state
cinesync.commands.open_session(session)
| jmah/cinesync_python | examples/Start Session.py | Python | bsd-3-clause | 409 |
Subsets and Splits