body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def init_host(self): 'Perform any required initialization.' ctxt = context.get_admin_context() LOG.info(_LI('Starting volume driver %(driver_name)s (%(version)s)'), {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception: LOG.exception(_LE('Failed to initialize driver.'), resource={'type': 'driver', 'id': self.__class__.__name__}) return self.driver.init_capabilities() volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) snapshots = self.db.snapshot_get_by_host(ctxt, self.host) self._sync_provider_info(ctxt, volumes, snapshots) try: self.stats['pools'] = {} self.stats.update({'allocated_capacity_gb': 0}) for volume in volumes: if (volume['status'] in ['in-use', 'available']): self._count_allocated_capacity(ctxt, volume) try: if (volume['status'] in ['in-use']): self.driver.ensure_export(ctxt, volume) except Exception: LOG.exception(_LE('Failed to re-export volume, setting to ERROR.'), resource=volume) volume.status = 'error' volume.save() elif (volume['status'] in ('downloading', 'creating')): LOG.warning(_LW('Detected volume stuck in %(curr_status)s status, setting to ERROR.'), {'curr_status': volume['status']}, resource=volume) if (volume['status'] == 'downloading'): self.driver.clear_download(ctxt, volume) volume.status = 'error' volume.save() elif (volume.status == 'uploading'): self.db.volume_update_status_based_on_attachment(ctxt, volume.id) else: pass snapshots = objects.SnapshotList.get_by_host(ctxt, self.host, {'status': fields.SnapshotStatus.CREATING}) for snapshot in snapshots: LOG.warning(_LW('Detected snapshot stuck in creating status, setting to ERROR.'), resource=snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() except Exception: LOG.exception(_LE('Error during re-export on driver init.'), resource=volume) return self.driver.set_throttle() self.driver.set_initialized() for volume in volumes: if (volume['status'] == 'deleting'): if CONF.volume_service_inithost_offload: self._add_to_threadpool(self.delete_volume, ctxt, volume['id'], volume=volume) else: self.delete_volume(ctxt, volume['id'], volume=volume) LOG.info(_LI('Resume volume delete completed successfully.'), resource=volume) self.publish_service_capabilities(ctxt) LOG.info(_LI('Driver initialization completed successfully.'), resource={'type': 'driver', 'id': self.driver.__class__.__name__})
-1,150,854,024,366,898,800
Perform any required initialization.
cinder/volume/manager.py
init_host
ISCAS-VDI/cinder-base
python
def init_host(self): ctxt = context.get_admin_context() LOG.info(_LI('Starting volume driver %(driver_name)s (%(version)s)'), {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception: LOG.exception(_LE('Failed to initialize driver.'), resource={'type': 'driver', 'id': self.__class__.__name__}) return self.driver.init_capabilities() volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) snapshots = self.db.snapshot_get_by_host(ctxt, self.host) self._sync_provider_info(ctxt, volumes, snapshots) try: self.stats['pools'] = {} self.stats.update({'allocated_capacity_gb': 0}) for volume in volumes: if (volume['status'] in ['in-use', 'available']): self._count_allocated_capacity(ctxt, volume) try: if (volume['status'] in ['in-use']): self.driver.ensure_export(ctxt, volume) except Exception: LOG.exception(_LE('Failed to re-export volume, setting to ERROR.'), resource=volume) volume.status = 'error' volume.save() elif (volume['status'] in ('downloading', 'creating')): LOG.warning(_LW('Detected volume stuck in %(curr_status)s status, setting to ERROR.'), {'curr_status': volume['status']}, resource=volume) if (volume['status'] == 'downloading'): self.driver.clear_download(ctxt, volume) volume.status = 'error' volume.save() elif (volume.status == 'uploading'): self.db.volume_update_status_based_on_attachment(ctxt, volume.id) else: pass snapshots = objects.SnapshotList.get_by_host(ctxt, self.host, {'status': fields.SnapshotStatus.CREATING}) for snapshot in snapshots: LOG.warning(_LW('Detected snapshot stuck in creating status, setting to ERROR.'), resource=snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() except Exception: LOG.exception(_LE('Error during re-export on driver init.'), resource=volume) return self.driver.set_throttle() self.driver.set_initialized() for volume in volumes: if (volume['status'] == 'deleting'): if CONF.volume_service_inithost_offload: self._add_to_threadpool(self.delete_volume, ctxt, volume['id'], volume=volume) else: self.delete_volume(ctxt, volume['id'], volume=volume) LOG.info(_LI('Resume volume delete completed successfully.'), resource=volume) self.publish_service_capabilities(ctxt) LOG.info(_LI('Driver initialization completed successfully.'), resource={'type': 'driver', 'id': self.driver.__class__.__name__})
def is_working(self): 'Return if Manager is ready to accept requests.\n\n This is to inform Service class that in case of volume driver\n initialization failure the manager is actually down and not ready to\n accept any requests.\n ' return self.driver.initialized
-7,833,758,696,326,255,000
Return if Manager is ready to accept requests. This is to inform Service class that in case of volume driver initialization failure the manager is actually down and not ready to accept any requests.
cinder/volume/manager.py
is_working
ISCAS-VDI/cinder-base
python
def is_working(self): 'Return if Manager is ready to accept requests.\n\n This is to inform Service class that in case of volume driver\n initialization failure the manager is actually down and not ready to\n accept any requests.\n ' return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, volume=None): 'Creates the volume.' if (volume is None): volume = objects.Volume.get_by_id(context, volume_id) context_elevated = context.elevated() if (filter_properties is None): filter_properties = {} if (request_spec is None): request_spec = {} try: flow_engine = create_volume.get_flow(context_elevated, self, self.db, self.driver, self.scheduler_rpcapi, self.host, volume.id, allow_reschedule, context, request_spec, filter_properties, image_volume_cache=self.image_volume_cache) except Exception: msg = _('Create manager volume flow failed.') LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) raise exception.CinderException(msg) snapshot_id = request_spec.get('snapshot_id') source_volid = request_spec.get('source_volid') source_replicaid = request_spec.get('source_replicaid') if (snapshot_id is not None): locked_action = ('%s-%s' % (snapshot_id, 'delete_snapshot')) elif (source_volid is not None): locked_action = ('%s-%s' % (source_volid, 'delete_volume')) elif (source_replicaid is not None): locked_action = ('%s-%s' % (source_replicaid, 'delete_volume')) else: locked_action = None def _run_flow(): with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() @utils.synchronized(locked_action, external=True) def _run_flow_locked(): _run_flow() rescheduled = False vol_ref = None try: if (locked_action is None): _run_flow() else: _run_flow_locked() finally: try: vol_ref = flow_engine.storage.fetch('volume_ref') except tfe.NotFound: try: rescheduled = flow_engine.storage.get_revert_result(create_volume.OnFailureRescheduleTask.make_name([create_volume.ACTION])) except tfe.NotFound: pass if (not rescheduled): if (not vol_ref): vol_ref = objects.Volume.get_by_id(context, volume.id) self._update_allocated_capacity(vol_ref) LOG.info(_LI('Created volume successfully.'), resource=vol_ref) return vol_ref.id
1,890,941,168,616,453,400
Creates the volume.
cinder/volume/manager.py
create_volume
ISCAS-VDI/cinder-base
python
def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, volume=None): if (volume is None): volume = objects.Volume.get_by_id(context, volume_id) context_elevated = context.elevated() if (filter_properties is None): filter_properties = {} if (request_spec is None): request_spec = {} try: flow_engine = create_volume.get_flow(context_elevated, self, self.db, self.driver, self.scheduler_rpcapi, self.host, volume.id, allow_reschedule, context, request_spec, filter_properties, image_volume_cache=self.image_volume_cache) except Exception: msg = _('Create manager volume flow failed.') LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) raise exception.CinderException(msg) snapshot_id = request_spec.get('snapshot_id') source_volid = request_spec.get('source_volid') source_replicaid = request_spec.get('source_replicaid') if (snapshot_id is not None): locked_action = ('%s-%s' % (snapshot_id, 'delete_snapshot')) elif (source_volid is not None): locked_action = ('%s-%s' % (source_volid, 'delete_volume')) elif (source_replicaid is not None): locked_action = ('%s-%s' % (source_replicaid, 'delete_volume')) else: locked_action = None def _run_flow(): with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() @utils.synchronized(locked_action, external=True) def _run_flow_locked(): _run_flow() rescheduled = False vol_ref = None try: if (locked_action is None): _run_flow() else: _run_flow_locked() finally: try: vol_ref = flow_engine.storage.fetch('volume_ref') except tfe.NotFound: try: rescheduled = flow_engine.storage.get_revert_result(create_volume.OnFailureRescheduleTask.make_name([create_volume.ACTION])) except tfe.NotFound: pass if (not rescheduled): if (not vol_ref): vol_ref = objects.Volume.get_by_id(context, volume.id) self._update_allocated_capacity(vol_ref) LOG.info(_LI('Created volume successfully.'), resource=vol_ref) return vol_ref.id
@locked_volume_operation def delete_volume(self, context, volume_id, unmanage_only=False, volume=None, cascade=False): 'Deletes and unexports volume.\n\n 1. Delete a volume(normal case)\n Delete a volume and update quotas.\n\n 2. Delete a migration volume\n If deleting the volume in a migration, we want to skip\n quotas but we need database updates for the volume.\n ' context = context.elevated() try: if (volume is None): volume = objects.Volume.get_by_id(context, volume_id) else: volume.refresh() except exception.VolumeNotFound: LOG.debug('Attempted delete of non-existent volume: %s', volume_id) return if (context.project_id != volume.project_id): project_id = volume.project_id else: project_id = context.project_id if (volume['attach_status'] == 'attached'): raise exception.VolumeAttached(volume_id=volume_id) if (vol_utils.extract_host(volume.host) != self.host): raise exception.InvalidVolume(reason=_('volume is not local to this node')) if (unmanage_only and cascade): raise exception.Invalid(reason=_('Unmanage and cascade delete options are mutually exclusive.')) is_migrating = (volume.migration_status not in (None, 'error', 'success')) is_migrating_dest = (is_migrating and volume.migration_status.startswith('target:')) self._notify_about_volume_usage(context, volume, 'delete.start') try: utils.require_driver_initialized(self.driver) self.driver.remove_export(context, volume) if unmanage_only: self.driver.unmanage(volume) elif cascade: LOG.debug('Performing cascade delete.') snapshots = objects.SnapshotList.get_all_for_volume(context, volume.id) for s in snapshots: if (s.status != 'deleting'): self._clear_db(context, is_migrating_dest, volume, 'error_deleting') msg = (_("Snapshot %(id)s was found in state %(state)s rather than 'deleting' during cascade delete.") % {'id': s.id, 'state': s.status}) raise exception.InvalidSnapshot(reason=msg) self.delete_snapshot(context, s) LOG.debug('Snapshots deleted, issuing volume delete') self.driver.delete_volume(volume) else: self.driver.delete_volume(volume) except exception.VolumeIsBusy: LOG.error(_LE('Unable to delete busy volume.'), resource=volume) self._clear_db(context, is_migrating_dest, volume, 'available') return except Exception: with excutils.save_and_reraise_exception(): self._clear_db(context, is_migrating_dest, volume, 'error_deleting') if (not is_migrating): try: reserve_opts = {'volumes': (- 1), 'gigabytes': (- volume.size)} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Failed to update usages deleting volume.'), resource=volume) self.db.volume_glance_metadata_delete_by_volume(context, volume_id) volume.destroy() if (not is_migrating): self._notify_about_volume_usage(context, volume, 'delete.end') if reservations: QUOTAS.commit(context, reservations, project_id=project_id) pool = vol_utils.extract_host(volume.host, 'pool') if (pool is None): pool = (self.driver.configuration.safe_get('volume_backend_name') or vol_utils.extract_host(volume.host, 'pool', True)) size = volume.size try: self.stats['pools'][pool]['allocated_capacity_gb'] -= size except KeyError: self.stats['pools'][pool] = dict(allocated_capacity_gb=(- size)) self.publish_service_capabilities(context) LOG.info(_LI('Deleted volume successfully.'), resource=volume)
8,906,263,107,510,917,000
Deletes and unexports volume. 1. Delete a volume(normal case) Delete a volume and update quotas. 2. Delete a migration volume If deleting the volume in a migration, we want to skip quotas but we need database updates for the volume.
cinder/volume/manager.py
delete_volume
ISCAS-VDI/cinder-base
python
@locked_volume_operation def delete_volume(self, context, volume_id, unmanage_only=False, volume=None, cascade=False): 'Deletes and unexports volume.\n\n 1. Delete a volume(normal case)\n Delete a volume and update quotas.\n\n 2. Delete a migration volume\n If deleting the volume in a migration, we want to skip\n quotas but we need database updates for the volume.\n ' context = context.elevated() try: if (volume is None): volume = objects.Volume.get_by_id(context, volume_id) else: volume.refresh() except exception.VolumeNotFound: LOG.debug('Attempted delete of non-existent volume: %s', volume_id) return if (context.project_id != volume.project_id): project_id = volume.project_id else: project_id = context.project_id if (volume['attach_status'] == 'attached'): raise exception.VolumeAttached(volume_id=volume_id) if (vol_utils.extract_host(volume.host) != self.host): raise exception.InvalidVolume(reason=_('volume is not local to this node')) if (unmanage_only and cascade): raise exception.Invalid(reason=_('Unmanage and cascade delete options are mutually exclusive.')) is_migrating = (volume.migration_status not in (None, 'error', 'success')) is_migrating_dest = (is_migrating and volume.migration_status.startswith('target:')) self._notify_about_volume_usage(context, volume, 'delete.start') try: utils.require_driver_initialized(self.driver) self.driver.remove_export(context, volume) if unmanage_only: self.driver.unmanage(volume) elif cascade: LOG.debug('Performing cascade delete.') snapshots = objects.SnapshotList.get_all_for_volume(context, volume.id) for s in snapshots: if (s.status != 'deleting'): self._clear_db(context, is_migrating_dest, volume, 'error_deleting') msg = (_("Snapshot %(id)s was found in state %(state)s rather than 'deleting' during cascade delete.") % {'id': s.id, 'state': s.status}) raise exception.InvalidSnapshot(reason=msg) self.delete_snapshot(context, s) LOG.debug('Snapshots deleted, issuing volume delete') self.driver.delete_volume(volume) else: self.driver.delete_volume(volume) except exception.VolumeIsBusy: LOG.error(_LE('Unable to delete busy volume.'), resource=volume) self._clear_db(context, is_migrating_dest, volume, 'available') return except Exception: with excutils.save_and_reraise_exception(): self._clear_db(context, is_migrating_dest, volume, 'error_deleting') if (not is_migrating): try: reserve_opts = {'volumes': (- 1), 'gigabytes': (- volume.size)} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Failed to update usages deleting volume.'), resource=volume) self.db.volume_glance_metadata_delete_by_volume(context, volume_id) volume.destroy() if (not is_migrating): self._notify_about_volume_usage(context, volume, 'delete.end') if reservations: QUOTAS.commit(context, reservations, project_id=project_id) pool = vol_utils.extract_host(volume.host, 'pool') if (pool is None): pool = (self.driver.configuration.safe_get('volume_backend_name') or vol_utils.extract_host(volume.host, 'pool', True)) size = volume.size try: self.stats['pools'][pool]['allocated_capacity_gb'] -= size except KeyError: self.stats['pools'][pool] = dict(allocated_capacity_gb=(- size)) self.publish_service_capabilities(context) LOG.info(_LI('Deleted volume successfully.'), resource=volume)
def create_snapshot(self, context, volume_id, snapshot): 'Creates and exports the snapshot.' context = context.elevated() self._notify_about_snapshot_usage(context, snapshot, 'create.start') try: utils.require_driver_initialized(self.driver) snapshot.context = context model_update = self.driver.create_snapshot(snapshot) if model_update: snapshot.update(model_update) snapshot.save() except Exception: with excutils.save_and_reraise_exception(): snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() vol_ref = self.db.volume_get(context, volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot(context, snapshot.id, volume_id) except exception.GlanceMetadataNotFound: pass except exception.CinderException as ex: LOG.exception(_LE('Failed updating snapshot metadata using the provided volumes %(volume_id)s metadata'), {'volume_id': volume_id}, resource=snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() raise exception.MetadataCopyFailure(reason=six.text_type(ex)) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.progress = '100%' snapshot.save() self._notify_about_snapshot_usage(context, snapshot, 'create.end') LOG.info(_LI('Create snapshot completed successfully'), resource=snapshot) return snapshot.id
-3,463,493,398,277,073,400
Creates and exports the snapshot.
cinder/volume/manager.py
create_snapshot
ISCAS-VDI/cinder-base
python
def create_snapshot(self, context, volume_id, snapshot): context = context.elevated() self._notify_about_snapshot_usage(context, snapshot, 'create.start') try: utils.require_driver_initialized(self.driver) snapshot.context = context model_update = self.driver.create_snapshot(snapshot) if model_update: snapshot.update(model_update) snapshot.save() except Exception: with excutils.save_and_reraise_exception(): snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() vol_ref = self.db.volume_get(context, volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot(context, snapshot.id, volume_id) except exception.GlanceMetadataNotFound: pass except exception.CinderException as ex: LOG.exception(_LE('Failed updating snapshot metadata using the provided volumes %(volume_id)s metadata'), {'volume_id': volume_id}, resource=snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() raise exception.MetadataCopyFailure(reason=six.text_type(ex)) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.progress = '100%' snapshot.save() self._notify_about_snapshot_usage(context, snapshot, 'create.end') LOG.info(_LI('Create snapshot completed successfully'), resource=snapshot) return snapshot.id
@locked_snapshot_operation def delete_snapshot(self, context, snapshot, unmanage_only=False): 'Deletes and unexports snapshot.' context = context.elevated() snapshot._context = context project_id = snapshot.project_id self._notify_about_snapshot_usage(context, snapshot, 'delete.start') try: utils.require_driver_initialized(self.driver) snapshot.context = context snapshot.save() if unmanage_only: self.driver.unmanage_snapshot(snapshot) else: self.driver.delete_snapshot(snapshot) except exception.SnapshotIsBusy: LOG.error(_LE('Delete snapshot failed, due to snapshot busy.'), resource=snapshot) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() return except Exception: with excutils.save_and_reraise_exception(): snapshot.status = fields.SnapshotStatus.ERROR_DELETING snapshot.save() try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': (- 1)} else: reserve_opts = {'snapshots': (- 1), 'gigabytes': (- snapshot.volume_size)} volume_ref = self.db.volume_get(context, snapshot.volume_id) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Update snapshot usages failed.'), resource=snapshot) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) snapshot.destroy() self._notify_about_snapshot_usage(context, snapshot, 'delete.end') if reservations: QUOTAS.commit(context, reservations, project_id=project_id) LOG.info(_LI('Delete snapshot completed successfully'), resource=snapshot)
-1,303,269,633,822,586,000
Deletes and unexports snapshot.
cinder/volume/manager.py
delete_snapshot
ISCAS-VDI/cinder-base
python
@locked_snapshot_operation def delete_snapshot(self, context, snapshot, unmanage_only=False): context = context.elevated() snapshot._context = context project_id = snapshot.project_id self._notify_about_snapshot_usage(context, snapshot, 'delete.start') try: utils.require_driver_initialized(self.driver) snapshot.context = context snapshot.save() if unmanage_only: self.driver.unmanage_snapshot(snapshot) else: self.driver.delete_snapshot(snapshot) except exception.SnapshotIsBusy: LOG.error(_LE('Delete snapshot failed, due to snapshot busy.'), resource=snapshot) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() return except Exception: with excutils.save_and_reraise_exception(): snapshot.status = fields.SnapshotStatus.ERROR_DELETING snapshot.save() try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': (- 1)} else: reserve_opts = {'snapshots': (- 1), 'gigabytes': (- snapshot.volume_size)} volume_ref = self.db.volume_get(context, snapshot.volume_id) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Update snapshot usages failed.'), resource=snapshot) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) snapshot.destroy() self._notify_about_snapshot_usage(context, snapshot, 'delete.end') if reservations: QUOTAS.commit(context, reservations, project_id=project_id) LOG.info(_LI('Delete snapshot completed successfully'), resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name, mountpoint, mode): 'Updates db to show volume is attached.' @utils.synchronized(volume_id, external=True) def do_attach(): volume = self.db.volume_get(context, volume_id) volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id) if (volume['status'] == 'attaching'): if (volume_metadata.get('attached_mode') and (volume_metadata.get('attached_mode') != mode)): raise exception.InvalidVolume(reason=_('being attached by different mode')) if ((volume['status'] == 'in-use') and (not volume['multiattach']) and (not volume['migration_status'])): raise exception.InvalidVolume(reason=_('volume is already attached')) host_name_sanitized = (utils.sanitize_hostname(host_name) if host_name else None) if instance_uuid: attachments = self.db.volume_attachment_get_all_by_instance_uuid(context, volume_id, instance_uuid) else: attachments = self.db.volume_attachment_get_all_by_host(context, volume_id, host_name_sanitized) if attachments: self.db.volume_update(context, volume_id, {'status': 'in-use'}) return self._notify_about_volume_usage(context, volume, 'attach.start') values = {'volume_id': volume_id, 'attach_status': 'attaching'} attachment = self.db.volume_attach(context.elevated(), values) volume_metadata = self.db.volume_admin_metadata_update(context.elevated(), volume_id, {'attached_mode': mode}, False) attachment_id = attachment['id'] if (instance_uuid and (not uuidutils.is_uuid_like(instance_uuid))): self.db.volume_attachment_update(context, attachment_id, {'attach_status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) volume = self.db.volume_get(context, volume_id) if ((volume_metadata.get('readonly') == 'True') and (mode != 'ro')): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.message_api.create(context, defined_messages.ATTACH_READONLY_VOLUME, context.project_id, resource_type=resource_types.VOLUME, resource_uuid=volume_id) raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id) try: utils.require_driver_initialized(self.driver) LOG.debug('Attaching volume %(volume_id)s to instance %(instance)s at mountpoint %(mount)s on host %(host)s.', {'volume_id': volume_id, 'instance': instance_uuid, 'mount': mountpoint, 'host': host_name_sanitized}, resource=volume) self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update(context, attachment_id, {'attach_status': 'error_attaching'}) volume = self.db.volume_attached(context.elevated(), attachment_id, instance_uuid, host_name_sanitized, mountpoint, mode) self._notify_about_volume_usage(context, volume, 'attach.end') LOG.info(_LI('Attach volume completed successfully.'), resource=volume) return self.db.volume_attachment_get(context, attachment_id) return do_attach()
-8,125,107,459,446,559,000
Updates db to show volume is attached.
cinder/volume/manager.py
attach_volume
ISCAS-VDI/cinder-base
python
def attach_volume(self, context, volume_id, instance_uuid, host_name, mountpoint, mode): @utils.synchronized(volume_id, external=True) def do_attach(): volume = self.db.volume_get(context, volume_id) volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id) if (volume['status'] == 'attaching'): if (volume_metadata.get('attached_mode') and (volume_metadata.get('attached_mode') != mode)): raise exception.InvalidVolume(reason=_('being attached by different mode')) if ((volume['status'] == 'in-use') and (not volume['multiattach']) and (not volume['migration_status'])): raise exception.InvalidVolume(reason=_('volume is already attached')) host_name_sanitized = (utils.sanitize_hostname(host_name) if host_name else None) if instance_uuid: attachments = self.db.volume_attachment_get_all_by_instance_uuid(context, volume_id, instance_uuid) else: attachments = self.db.volume_attachment_get_all_by_host(context, volume_id, host_name_sanitized) if attachments: self.db.volume_update(context, volume_id, {'status': 'in-use'}) return self._notify_about_volume_usage(context, volume, 'attach.start') values = {'volume_id': volume_id, 'attach_status': 'attaching'} attachment = self.db.volume_attach(context.elevated(), values) volume_metadata = self.db.volume_admin_metadata_update(context.elevated(), volume_id, {'attached_mode': mode}, False) attachment_id = attachment['id'] if (instance_uuid and (not uuidutils.is_uuid_like(instance_uuid))): self.db.volume_attachment_update(context, attachment_id, {'attach_status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) volume = self.db.volume_get(context, volume_id) if ((volume_metadata.get('readonly') == 'True') and (mode != 'ro')): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.message_api.create(context, defined_messages.ATTACH_READONLY_VOLUME, context.project_id, resource_type=resource_types.VOLUME, resource_uuid=volume_id) raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id) try: utils.require_driver_initialized(self.driver) LOG.debug('Attaching volume %(volume_id)s to instance %(instance)s at mountpoint %(mount)s on host %(host)s.', {'volume_id': volume_id, 'instance': instance_uuid, 'mount': mountpoint, 'host': host_name_sanitized}, resource=volume) self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update(context, attachment_id, {'attach_status': 'error_attaching'}) volume = self.db.volume_attached(context.elevated(), attachment_id, instance_uuid, host_name_sanitized, mountpoint, mode) self._notify_about_volume_usage(context, volume, 'attach.end') LOG.info(_LI('Attach volume completed successfully.'), resource=volume) return self.db.volume_attachment_get(context, attachment_id) return do_attach()
@locked_detach_operation def detach_volume(self, context, volume_id, attachment_id=None): 'Updates db to show volume is detached.' volume = self.db.volume_get(context, volume_id) attachment = None if attachment_id: try: attachment = self.db.volume_attachment_get(context, attachment_id) except exception.VolumeAttachmentNotFound: LOG.info(_LI('Volume detach called, but volume not attached.'), resource=volume) self.db.volume_detached(context, volume_id, attachment_id) return else: attachments = self.db.volume_attachment_get_all_by_volume_id(context, volume_id) if (len(attachments) > 1): msg = _('Detach volume failed: More than one attachment, but no attachment_id provided.') LOG.error(msg, resource=volume) raise exception.InvalidVolume(reason=msg) elif (len(attachments) == 1): attachment = attachments[0] else: LOG.info(_LI('Volume detach called, but volume not attached.'), resource=volume) self.db.volume_update(context, volume_id, {'status': 'available', 'attach_status': 'detached'}) return self._notify_about_volume_usage(context, volume, 'detach.start') try: utils.require_driver_initialized(self.driver) LOG.debug('Detaching volume %(volume_id)s from instance %(instance)s.', {'volume_id': volume_id, 'instance': attachment.get('instance_uuid')}, resource=volume) self.driver.detach_volume(context, volume, attachment) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update(context, attachment.get('id'), {'attach_status': 'error_detaching'}) self.db.volume_detached(context.elevated(), volume_id, attachment.get('id')) self.db.volume_admin_metadata_delete(context.elevated(), volume_id, 'attached_mode') volume = self.db.volume_get(context, volume_id) try: utils.require_driver_initialized(self.driver) self.driver.remove_export(context.elevated(), volume) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Detach volume failed, due to uninitialized driver.'), resource=volume) except Exception as ex: LOG.exception(_LE('Detach volume failed, due to remove-export failure.'), resource=volume) raise exception.RemoveExportException(volume=volume_id, reason=six.text_type(ex)) self._notify_about_volume_usage(context, volume, 'detach.end') LOG.info(_LI('Detach volume completed successfully.'), resource=volume)
3,415,520,564,871,494,000
Updates db to show volume is detached.
cinder/volume/manager.py
detach_volume
ISCAS-VDI/cinder-base
python
@locked_detach_operation def detach_volume(self, context, volume_id, attachment_id=None): volume = self.db.volume_get(context, volume_id) attachment = None if attachment_id: try: attachment = self.db.volume_attachment_get(context, attachment_id) except exception.VolumeAttachmentNotFound: LOG.info(_LI('Volume detach called, but volume not attached.'), resource=volume) self.db.volume_detached(context, volume_id, attachment_id) return else: attachments = self.db.volume_attachment_get_all_by_volume_id(context, volume_id) if (len(attachments) > 1): msg = _('Detach volume failed: More than one attachment, but no attachment_id provided.') LOG.error(msg, resource=volume) raise exception.InvalidVolume(reason=msg) elif (len(attachments) == 1): attachment = attachments[0] else: LOG.info(_LI('Volume detach called, but volume not attached.'), resource=volume) self.db.volume_update(context, volume_id, {'status': 'available', 'attach_status': 'detached'}) return self._notify_about_volume_usage(context, volume, 'detach.start') try: utils.require_driver_initialized(self.driver) LOG.debug('Detaching volume %(volume_id)s from instance %(instance)s.', {'volume_id': volume_id, 'instance': attachment.get('instance_uuid')}, resource=volume) self.driver.detach_volume(context, volume, attachment) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update(context, attachment.get('id'), {'attach_status': 'error_detaching'}) self.db.volume_detached(context.elevated(), volume_id, attachment.get('id')) self.db.volume_admin_metadata_delete(context.elevated(), volume_id, 'attached_mode') volume = self.db.volume_get(context, volume_id) try: utils.require_driver_initialized(self.driver) self.driver.remove_export(context.elevated(), volume) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Detach volume failed, due to uninitialized driver.'), resource=volume) except Exception as ex: LOG.exception(_LE('Detach volume failed, due to remove-export failure.'), resource=volume) raise exception.RemoveExportException(volume=volume_id, reason=six.text_type(ex)) self._notify_about_volume_usage(context, volume, 'detach.end') LOG.info(_LI('Detach volume completed successfully.'), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref, image_id, image_meta): 'Create a new image-volume and cache entry for it.\n\n This assumes that the image has already been downloaded and stored\n in the volume described by the volume_ref.\n ' image_volume = None try: if (not self.image_volume_cache.ensure_space(ctx, volume_ref['size'], volume_ref['host'])): LOG.warning(_LW('Unable to ensure space for image-volume in cache. Will skip creating entry for image %(image)s on host %(host)s.'), {'image': image_id, 'host': volume_ref['host']}) return image_volume = self._clone_image_volume(ctx, volume_ref, image_meta) if (not image_volume): LOG.warning(_LW('Unable to clone image_volume for image %(image_id)s will not create cache entry.'), {'image_id': image_id}) return self.image_volume_cache.create_cache_entry(ctx, image_volume, image_id, image_meta) except exception.CinderException as e: LOG.warning(_LW('Failed to create new image-volume cache entry. Error: %(exception)s'), {'exception': e}) if image_volume: self.delete_volume(ctx, image_volume.id)
-5,460,950,307,200,344,000
Create a new image-volume and cache entry for it. This assumes that the image has already been downloaded and stored in the volume described by the volume_ref.
cinder/volume/manager.py
_create_image_cache_volume_entry
ISCAS-VDI/cinder-base
python
def _create_image_cache_volume_entry(self, ctx, volume_ref, image_id, image_meta): 'Create a new image-volume and cache entry for it.\n\n This assumes that the image has already been downloaded and stored\n in the volume described by the volume_ref.\n ' image_volume = None try: if (not self.image_volume_cache.ensure_space(ctx, volume_ref['size'], volume_ref['host'])): LOG.warning(_LW('Unable to ensure space for image-volume in cache. Will skip creating entry for image %(image)s on host %(host)s.'), {'image': image_id, 'host': volume_ref['host']}) return image_volume = self._clone_image_volume(ctx, volume_ref, image_meta) if (not image_volume): LOG.warning(_LW('Unable to clone image_volume for image %(image_id)s will not create cache entry.'), {'image_id': image_id}) return self.image_volume_cache.create_cache_entry(ctx, image_volume, image_id, image_meta) except exception.CinderException as e: LOG.warning(_LW('Failed to create new image-volume cache entry. Error: %(exception)s'), {'exception': e}) if image_volume: self.delete_volume(ctx, image_volume.id)
def _clone_image_volume_and_add_location(self, ctx, volume, image_service, image_meta): 'Create a cloned volume and register its location to the image.' if ((image_meta['disk_format'] != 'raw') or (image_meta['container_format'] != 'bare')): return False image_volume_context = ctx if self.driver.configuration.image_upload_use_internal_tenant: internal_ctx = context.get_internal_tenant_context() if internal_ctx: image_volume_context = internal_ctx image_volume = self._clone_image_volume(image_volume_context, volume, image_meta) if (not image_volume): return False uri = ('cinder://%s' % image_volume.id) image_registered = None try: image_registered = image_service.add_location(ctx, image_meta['id'], uri, {}) except (exception.NotAuthorized, exception.Invalid, exception.NotFound): LOG.exception(_LE('Failed to register image volume location %(uri)s.'), {'uri': uri}) if (not image_registered): LOG.warning(_LW('Registration of image volume URI %(uri)s to image %(image_id)s failed.'), {'uri': uri, 'image_id': image_meta['id']}) try: self.delete_volume(image_volume_context, image_volume) except exception.CinderException: LOG.exception(_LE('Could not delete failed image volume %(id)s.'), {'id': image_volume.id}) return False image_volume_meta = {'glance_image_id': image_meta['id'], 'image_owner': ctx.project_id} self.db.volume_metadata_update(image_volume_context, image_volume.id, image_volume_meta, False) return True
1,198,232,022,801,254,700
Create a cloned volume and register its location to the image.
cinder/volume/manager.py
_clone_image_volume_and_add_location
ISCAS-VDI/cinder-base
python
def _clone_image_volume_and_add_location(self, ctx, volume, image_service, image_meta): if ((image_meta['disk_format'] != 'raw') or (image_meta['container_format'] != 'bare')): return False image_volume_context = ctx if self.driver.configuration.image_upload_use_internal_tenant: internal_ctx = context.get_internal_tenant_context() if internal_ctx: image_volume_context = internal_ctx image_volume = self._clone_image_volume(image_volume_context, volume, image_meta) if (not image_volume): return False uri = ('cinder://%s' % image_volume.id) image_registered = None try: image_registered = image_service.add_location(ctx, image_meta['id'], uri, {}) except (exception.NotAuthorized, exception.Invalid, exception.NotFound): LOG.exception(_LE('Failed to register image volume location %(uri)s.'), {'uri': uri}) if (not image_registered): LOG.warning(_LW('Registration of image volume URI %(uri)s to image %(image_id)s failed.'), {'uri': uri, 'image_id': image_meta['id']}) try: self.delete_volume(image_volume_context, image_volume) except exception.CinderException: LOG.exception(_LE('Could not delete failed image volume %(id)s.'), {'id': image_volume.id}) return False image_volume_meta = {'glance_image_id': image_meta['id'], 'image_owner': ctx.project_id} self.db.volume_metadata_update(image_volume_context, image_volume.id, image_volume_meta, False) return True
def copy_volume_to_image(self, context, volume_id, image_meta): "Uploads the specified volume to Glance.\n\n image_meta is a dictionary containing the following keys:\n 'id', 'container_format', 'disk_format'\n\n " payload = {'volume_id': volume_id, 'image_id': image_meta['id']} image_service = None try: volume = self.db.volume_get(context, volume_id) utils.require_driver_initialized(self.driver) (image_service, image_id) = glance.get_remote_image_service(context, image_meta['id']) if (self.driver.configuration.image_upload_use_cinder_backend and self._clone_image_volume_and_add_location(context, volume, image_service, image_meta)): LOG.debug('Registered image volume location to glance image-id: %(image_id)s.', {'image_id': image_meta['id']}, resource=volume) else: self.driver.copy_volume_to_image(context, volume, image_service, image_meta) LOG.debug('Uploaded volume to glance image-id: %(image_id)s.', {'image_id': image_meta['id']}, resource=volume) except Exception as error: LOG.error(_LE('Upload volume to image encountered an error (image-id: %(image_id)s).'), {'image_id': image_meta['id']}, resource=volume) if (image_service is not None): self._delete_image(context, image_meta['id'], image_service) with excutils.save_and_reraise_exception(): payload['message'] = six.text_type(error) if isinstance(error, exception.ImageLimitExceeded): self.message_api.create(context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA, context.project_id, resource_type=resource_types.VOLUME, resource_uuid=volume_id) finally: self.db.volume_update_status_based_on_attachment(context, volume_id) LOG.info(_LI('Copy volume to image completed successfully.'), resource=volume)
-5,284,815,560,622,527,000
Uploads the specified volume to Glance. image_meta is a dictionary containing the following keys: 'id', 'container_format', 'disk_format'
cinder/volume/manager.py
copy_volume_to_image
ISCAS-VDI/cinder-base
python
def copy_volume_to_image(self, context, volume_id, image_meta): "Uploads the specified volume to Glance.\n\n image_meta is a dictionary containing the following keys:\n 'id', 'container_format', 'disk_format'\n\n " payload = {'volume_id': volume_id, 'image_id': image_meta['id']} image_service = None try: volume = self.db.volume_get(context, volume_id) utils.require_driver_initialized(self.driver) (image_service, image_id) = glance.get_remote_image_service(context, image_meta['id']) if (self.driver.configuration.image_upload_use_cinder_backend and self._clone_image_volume_and_add_location(context, volume, image_service, image_meta)): LOG.debug('Registered image volume location to glance image-id: %(image_id)s.', {'image_id': image_meta['id']}, resource=volume) else: self.driver.copy_volume_to_image(context, volume, image_service, image_meta) LOG.debug('Uploaded volume to glance image-id: %(image_id)s.', {'image_id': image_meta['id']}, resource=volume) except Exception as error: LOG.error(_LE('Upload volume to image encountered an error (image-id: %(image_id)s).'), {'image_id': image_meta['id']}, resource=volume) if (image_service is not None): self._delete_image(context, image_meta['id'], image_service) with excutils.save_and_reraise_exception(): payload['message'] = six.text_type(error) if isinstance(error, exception.ImageLimitExceeded): self.message_api.create(context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA, context.project_id, resource_type=resource_types.VOLUME, resource_uuid=volume_id) finally: self.db.volume_update_status_based_on_attachment(context, volume_id) LOG.info(_LI('Copy volume to image completed successfully.'), resource=volume)
def _delete_image(self, context, image_id, image_service): 'Deletes an image stuck in queued or saving state.' try: image_meta = image_service.show(context, image_id) image_status = image_meta.get('status') if ((image_status == 'queued') or (image_status == 'saving')): LOG.warning(_LW('Deleting image in unexpected status: %(image_status)s.'), {'image_status': image_status}, resource={'type': 'image', 'id': image_id}) image_service.delete(context, image_id) except Exception: LOG.warning(_LW('Image delete encountered an error.'), exc_info=True, resource={'type': 'image', 'id': image_id})
8,074,731,748,494,501,000
Deletes an image stuck in queued or saving state.
cinder/volume/manager.py
_delete_image
ISCAS-VDI/cinder-base
python
def _delete_image(self, context, image_id, image_service): try: image_meta = image_service.show(context, image_id) image_status = image_meta.get('status') if ((image_status == 'queued') or (image_status == 'saving')): LOG.warning(_LW('Deleting image in unexpected status: %(image_status)s.'), {'image_status': image_status}, resource={'type': 'image', 'id': image_id}) image_service.delete(context, image_id) except Exception: LOG.warning(_LW('Image delete encountered an error.'), exc_info=True, resource={'type': 'image', 'id': image_id})
def initialize_connection(self, context, volume_id, connector): "Prepare volume for connection from host represented by connector.\n\n This method calls the driver initialize_connection and returns\n it to the caller. The connector parameter is a dictionary with\n information about the host that will connect to the volume in the\n following format::\n\n {\n 'ip': ip,\n 'initiator': initiator,\n }\n\n ip: the ip address of the connecting machine\n\n initiator: the iscsi initiator name of the connecting machine.\n This can be None if the connecting machine does not support iscsi\n connections.\n\n driver is responsible for doing any necessary security setup and\n returning a connection_info dictionary in the following format::\n\n {\n 'driver_volume_type': driver_volume_type,\n 'data': data,\n }\n\n driver_volume_type: a string to identify the type of volume. This\n can be used by the calling code to determine the\n strategy for connecting to the volume. This could\n be 'iscsi', 'rbd', 'sheepdog', etc.\n\n data: this is the data that the calling code will use to connect\n to the volume. Keep in mind that this will be serialized to\n json in various places, so it should not contain any non-json\n data types.\n " utils.require_driver_initialized(self.driver) volume = self.db.volume_get(context, volume_id) model_update = None try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: raise exception.InvalidInput(reason=six.text_type(err)) except Exception as err: err_msg = (_('Validate volume connection failed (error: %(err)s).') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: model_update = self.driver.create_export(context.elevated(), volume, connector) except exception.CinderException: err_msg = _('Create export for volume failed.') LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: if model_update: volume = self.db.volume_update(context, volume_id, model_update) except exception.CinderException as ex: LOG.exception(_LE('Model update failed.'), resource=volume) raise exception.ExportFailure(reason=six.text_type(ex)) try: conn_info = self.driver.initialize_connection(volume, connector) except Exception as err: err_msg = (_('Driver initialize connection failed (error: %(err)s).') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume) self.driver.remove_export(context.elevated(), volume) raise exception.VolumeBackendAPIException(data=err_msg) typeid = volume['volume_type_id'] specs = None if typeid: res = volume_types.get_volume_type_qos_specs(typeid) qos = res['qos_specs'] if (qos and (qos.get('consumer') in ['front-end', 'both'])): specs = qos.get('specs') qos_spec = dict(qos_specs=specs) conn_info['data'].update(qos_spec) volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id) access_mode = volume_metadata.get('attached_mode') if (access_mode is None): access_mode = ('ro' if (volume_metadata.get('readonly') == 'True') else 'rw') conn_info['data']['access_mode'] = access_mode if (conn_info['data'].get('encrypted') is None): encrypted = bool(volume.get('encryption_key_id')) conn_info['data']['encrypted'] = encrypted if (conn_info['data'].get('discard') is None): discard_supported = self.driver.configuration.safe_get('report_discard_supported') if discard_supported: conn_info['data']['discard'] = True LOG.info(_LI('Initialize volume connection completed successfully.'), resource=volume) return conn_info
-8,752,532,811,227,430,000
Prepare volume for connection from host represented by connector. This method calls the driver initialize_connection and returns it to the caller. The connector parameter is a dictionary with information about the host that will connect to the volume in the following format:: { 'ip': ip, 'initiator': initiator, } ip: the ip address of the connecting machine initiator: the iscsi initiator name of the connecting machine. This can be None if the connecting machine does not support iscsi connections. driver is responsible for doing any necessary security setup and returning a connection_info dictionary in the following format:: { 'driver_volume_type': driver_volume_type, 'data': data, } driver_volume_type: a string to identify the type of volume. This can be used by the calling code to determine the strategy for connecting to the volume. This could be 'iscsi', 'rbd', 'sheepdog', etc. data: this is the data that the calling code will use to connect to the volume. Keep in mind that this will be serialized to json in various places, so it should not contain any non-json data types.
cinder/volume/manager.py
initialize_connection
ISCAS-VDI/cinder-base
python
def initialize_connection(self, context, volume_id, connector): "Prepare volume for connection from host represented by connector.\n\n This method calls the driver initialize_connection and returns\n it to the caller. The connector parameter is a dictionary with\n information about the host that will connect to the volume in the\n following format::\n\n {\n 'ip': ip,\n 'initiator': initiator,\n }\n\n ip: the ip address of the connecting machine\n\n initiator: the iscsi initiator name of the connecting machine.\n This can be None if the connecting machine does not support iscsi\n connections.\n\n driver is responsible for doing any necessary security setup and\n returning a connection_info dictionary in the following format::\n\n {\n 'driver_volume_type': driver_volume_type,\n 'data': data,\n }\n\n driver_volume_type: a string to identify the type of volume. This\n can be used by the calling code to determine the\n strategy for connecting to the volume. This could\n be 'iscsi', 'rbd', 'sheepdog', etc.\n\n data: this is the data that the calling code will use to connect\n to the volume. Keep in mind that this will be serialized to\n json in various places, so it should not contain any non-json\n data types.\n " utils.require_driver_initialized(self.driver) volume = self.db.volume_get(context, volume_id) model_update = None try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: raise exception.InvalidInput(reason=six.text_type(err)) except Exception as err: err_msg = (_('Validate volume connection failed (error: %(err)s).') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: model_update = self.driver.create_export(context.elevated(), volume, connector) except exception.CinderException: err_msg = _('Create export for volume failed.') LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: if model_update: volume = self.db.volume_update(context, volume_id, model_update) except exception.CinderException as ex: LOG.exception(_LE('Model update failed.'), resource=volume) raise exception.ExportFailure(reason=six.text_type(ex)) try: conn_info = self.driver.initialize_connection(volume, connector) except Exception as err: err_msg = (_('Driver initialize connection failed (error: %(err)s).') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume) self.driver.remove_export(context.elevated(), volume) raise exception.VolumeBackendAPIException(data=err_msg) typeid = volume['volume_type_id'] specs = None if typeid: res = volume_types.get_volume_type_qos_specs(typeid) qos = res['qos_specs'] if (qos and (qos.get('consumer') in ['front-end', 'both'])): specs = qos.get('specs') qos_spec = dict(qos_specs=specs) conn_info['data'].update(qos_spec) volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id) access_mode = volume_metadata.get('attached_mode') if (access_mode is None): access_mode = ('ro' if (volume_metadata.get('readonly') == 'True') else 'rw') conn_info['data']['access_mode'] = access_mode if (conn_info['data'].get('encrypted') is None): encrypted = bool(volume.get('encryption_key_id')) conn_info['data']['encrypted'] = encrypted if (conn_info['data'].get('discard') is None): discard_supported = self.driver.configuration.safe_get('report_discard_supported') if discard_supported: conn_info['data']['discard'] = True LOG.info(_LI('Initialize volume connection completed successfully.'), resource=volume) return conn_info
def terminate_connection(self, context, volume_id, connector, force=False): 'Cleanup connection from host represented by connector.\n\n The format of connector is the same as for initialize_connection.\n ' utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.terminate_connection(volume_ref, connector, force=force) except Exception as err: err_msg = (_('Terminate volume connection failed: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info(_LI('Terminate volume connection completed successfully.'), resource=volume_ref)
-8,020,229,695,655,376,000
Cleanup connection from host represented by connector. The format of connector is the same as for initialize_connection.
cinder/volume/manager.py
terminate_connection
ISCAS-VDI/cinder-base
python
def terminate_connection(self, context, volume_id, connector, force=False): 'Cleanup connection from host represented by connector.\n\n The format of connector is the same as for initialize_connection.\n ' utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.terminate_connection(volume_ref, connector, force=force) except Exception as err: err_msg = (_('Terminate volume connection failed: %(err)s') % {'err': six.text_type(err)}) LOG.error(err_msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info(_LI('Terminate volume connection completed successfully.'), resource=volume_ref)
def remove_export(self, context, volume_id): 'Removes an export for a volume.' utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.remove_export(context, volume_ref) except Exception: msg = _('Remove volume export failed.') LOG.exception(msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI('Remove volume export completed successfully.'), resource=volume_ref)
-5,397,532,297,899,845,000
Removes an export for a volume.
cinder/volume/manager.py
remove_export
ISCAS-VDI/cinder-base
python
def remove_export(self, context, volume_id): utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.remove_export(context, volume_ref) except Exception: msg = _('Remove volume export failed.') LOG.exception(msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=msg) LOG.info(_LI('Remove volume export completed successfully.'), resource=volume_ref)
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None): 'Copy data from src_vol to dest_vol.' LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {'src': src_vol['name'], 'dest': dest_vol['name']}) properties = utils.brick_get_connector_properties() dest_remote = (remote in ['dest', 'both']) dest_attach_info = self._attach_volume(ctxt, dest_vol, properties, remote=dest_remote) try: src_remote = (remote in ['src', 'both']) src_attach_info = self._attach_volume(ctxt, src_vol, properties, remote=src_remote) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to attach source volume for copy.')) self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, remote=dest_remote) rpcapi = volume_rpcapi.VolumeAPI() capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'], False) sparse_copy_volume = bool((capabilities and capabilities.get('sparse_copy_volume', False))) copy_error = True try: size_in_mb = (int(src_vol['size']) * units.Ki) vol_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], size_in_mb, self.configuration.volume_dd_blocksize, sparse=sparse_copy_volume) copy_error = False except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to copy volume %(src)s to %(dest)s.'), {'src': src_vol['id'], 'dest': dest_vol['id']}) finally: try: self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, force=copy_error, remote=dest_remote) finally: self._detach_volume(ctxt, src_attach_info, src_vol, properties, force=copy_error, remote=src_remote)
2,135,584,882,741,025,000
Copy data from src_vol to dest_vol.
cinder/volume/manager.py
_copy_volume_data
ISCAS-VDI/cinder-base
python
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None): LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {'src': src_vol['name'], 'dest': dest_vol['name']}) properties = utils.brick_get_connector_properties() dest_remote = (remote in ['dest', 'both']) dest_attach_info = self._attach_volume(ctxt, dest_vol, properties, remote=dest_remote) try: src_remote = (remote in ['src', 'both']) src_attach_info = self._attach_volume(ctxt, src_vol, properties, remote=src_remote) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to attach source volume for copy.')) self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, remote=dest_remote) rpcapi = volume_rpcapi.VolumeAPI() capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'], False) sparse_copy_volume = bool((capabilities and capabilities.get('sparse_copy_volume', False))) copy_error = True try: size_in_mb = (int(src_vol['size']) * units.Ki) vol_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], size_in_mb, self.configuration.volume_dd_blocksize, sparse=sparse_copy_volume) copy_error = False except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to copy volume %(src)s to %(dest)s.'), {'src': src_vol['id'], 'dest': dest_vol['id']}) finally: try: self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, force=copy_error, remote=dest_remote) finally: self._detach_volume(ctxt, src_attach_info, src_vol, properties, force=copy_error, remote=src_remote)
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, new_type_id=None, volume=None): 'Migrate the volume to the specified host (called on source host).' if (volume is None): volume = objects.Volume.get_by_id(ctxt, volume_id) try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.migration_status = 'error' volume.save() model_update = None moved = False status_update = None if (volume.status in ('retyping', 'maintenance')): status_update = {'status': volume.previous_status} volume.migration_status = 'migrating' volume.save() if ((not force_host_copy) and (new_type_id is None)): try: LOG.debug('Issue driver.migrate_volume.', resource=volume) (moved, model_update) = self.driver.migrate_volume(ctxt, volume, host) if moved: updates = {'host': host['host'], 'migration_status': 'success', 'previous_status': volume.status} if status_update: updates.update(status_update) if model_update: updates.update(model_update) volume.update(updates) volume.save() except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() if (not moved): try: self._migrate_volume_generic(ctxt, volume, host, new_type_id) except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() LOG.info(_LI('Migrate volume completed successfully.'), resource=volume)
-6,172,334,654,003,001,000
Migrate the volume to the specified host (called on source host).
cinder/volume/manager.py
migrate_volume
ISCAS-VDI/cinder-base
python
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, new_type_id=None, volume=None): if (volume is None): volume = objects.Volume.get_by_id(ctxt, volume_id) try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.migration_status = 'error' volume.save() model_update = None moved = False status_update = None if (volume.status in ('retyping', 'maintenance')): status_update = {'status': volume.previous_status} volume.migration_status = 'migrating' volume.save() if ((not force_host_copy) and (new_type_id is None)): try: LOG.debug('Issue driver.migrate_volume.', resource=volume) (moved, model_update) = self.driver.migrate_volume(ctxt, volume, host) if moved: updates = {'host': host['host'], 'migration_status': 'success', 'previous_status': volume.status} if status_update: updates.update(status_update) if model_update: updates.update(model_update) volume.update(updates) volume.save() except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() if (not moved): try: self._migrate_volume_generic(ctxt, volume, host, new_type_id) except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() LOG.info(_LI('Migrate volume completed successfully.'), resource=volume)
def _append_filter_goodness_functions(self, volume_stats): 'Returns volume_stats updated as needed.' if ('filter_function' not in volume_stats): volume_stats['filter_function'] = self.driver.get_filter_function() if ('goodness_function' not in volume_stats): volume_stats['goodness_function'] = self.driver.get_goodness_function() return volume_stats
-4,678,197,258,167,024,000
Returns volume_stats updated as needed.
cinder/volume/manager.py
_append_filter_goodness_functions
ISCAS-VDI/cinder-base
python
def _append_filter_goodness_functions(self, volume_stats): if ('filter_function' not in volume_stats): volume_stats['filter_function'] = self.driver.get_filter_function() if ('goodness_function' not in volume_stats): volume_stats['goodness_function'] = self.driver.get_goodness_function() return volume_stats
def publish_service_capabilities(self, context): 'Collect driver status and then publish.' self._report_driver_status(context) self._publish_service_capabilities(context)
3,688,470,462,004,612,000
Collect driver status and then publish.
cinder/volume/manager.py
publish_service_capabilities
ISCAS-VDI/cinder-base
python
def publish_service_capabilities(self, context): self._report_driver_status(context) self._publish_service_capabilities(context)
def promote_replica(self, ctxt, volume_id): 'Promote volume replica secondary to be the primary volume.' volume = self.db.volume_get(ctxt, volume_id) model_update = None try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Promote volume replica failed.'), resource=volume) try: model_update = self.driver.promote_replica(ctxt, volume) except exception.CinderException: err_msg = _('Error promoting secondary volume to primary') raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) try: if model_update: volume = self.db.volume_update(ctxt, volume_id, model_update) except exception.CinderException: err_msg = (_('Failed updating model with driver provided model %(model)s') % {'model': model_update}) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) LOG.info(_LI('Promote volume replica completed successfully.'), resource=volume)
2,842,671,097,530,260,500
Promote volume replica secondary to be the primary volume.
cinder/volume/manager.py
promote_replica
ISCAS-VDI/cinder-base
python
def promote_replica(self, ctxt, volume_id): volume = self.db.volume_get(ctxt, volume_id) model_update = None try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Promote volume replica failed.'), resource=volume) try: model_update = self.driver.promote_replica(ctxt, volume) except exception.CinderException: err_msg = _('Error promoting secondary volume to primary') raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) try: if model_update: volume = self.db.volume_update(ctxt, volume_id, model_update) except exception.CinderException: err_msg = (_('Failed updating model with driver provided model %(model)s') % {'model': model_update}) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) LOG.info(_LI('Promote volume replica completed successfully.'), resource=volume)
def reenable_replication(self, ctxt, volume_id): 'Re-enable replication of secondary volume with primary volumes.' volume = self.db.volume_get(ctxt, volume_id) model_update = None try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Sync volume replica failed.'), resource=volume) try: model_update = self.driver.reenable_replication(ctxt, volume) except exception.CinderException: err_msg = _('Synchronizing secondary volume to primary failed.') raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) try: if model_update: volume = self.db.volume_update(ctxt, volume_id, model_update) except exception.CinderException: err_msg = (_('Failed updating model with driver provided model %(model)s') % {'model': model_update}) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id)
6,959,273,489,633,431,000
Re-enable replication of secondary volume with primary volumes.
cinder/volume/manager.py
reenable_replication
ISCAS-VDI/cinder-base
python
def reenable_replication(self, ctxt, volume_id): volume = self.db.volume_get(ctxt, volume_id) model_update = None try: utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Sync volume replica failed.'), resource=volume) try: model_update = self.driver.reenable_replication(ctxt, volume) except exception.CinderException: err_msg = _('Synchronizing secondary volume to primary failed.') raise exception.ReplicationError(reason=err_msg, volume_id=volume_id) try: if model_update: volume = self.db.volume_update(ctxt, volume_id, model_update) except exception.CinderException: err_msg = (_('Failed updating model with driver provided model %(model)s') % {'model': model_update}) raise exception.ReplicationError(reason=err_msg, volume_id=volume_id)
def create_consistencygroup(self, context, group): 'Creates the consistency group.' context = context.elevated() status = fields.ConsistencyGroupStatus.AVAILABLE model_update = None self._notify_about_consistencygroup_usage(context, group, 'create.start') try: utils.require_driver_initialized(self.driver) LOG.info(_LI('Consistency group %s: creating'), group.name) model_update = self.driver.create_consistencygroup(context, group) if model_update: if (model_update['status'] == fields.ConsistencyGroupStatus.ERROR): msg = _('Create consistency group failed.') LOG.error(msg, resource={'type': 'consistency_group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = fields.ConsistencyGroupStatus.ERROR group.save() LOG.error(_LE('Consistency group %s: create failed'), group.name) group.status = status group.created_at = timeutils.utcnow() group.save() LOG.info(_LI('Consistency group %s: created successfully'), group.name) self._notify_about_consistencygroup_usage(context, group, 'create.end') LOG.info(_LI('Create consistency group completed successfully.'), resource={'type': 'consistency_group', 'id': group.id}) return group
7,328,819,059,921,083,000
Creates the consistency group.
cinder/volume/manager.py
create_consistencygroup
ISCAS-VDI/cinder-base
python
def create_consistencygroup(self, context, group): context = context.elevated() status = fields.ConsistencyGroupStatus.AVAILABLE model_update = None self._notify_about_consistencygroup_usage(context, group, 'create.start') try: utils.require_driver_initialized(self.driver) LOG.info(_LI('Consistency group %s: creating'), group.name) model_update = self.driver.create_consistencygroup(context, group) if model_update: if (model_update['status'] == fields.ConsistencyGroupStatus.ERROR): msg = _('Create consistency group failed.') LOG.error(msg, resource={'type': 'consistency_group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = fields.ConsistencyGroupStatus.ERROR group.save() LOG.error(_LE('Consistency group %s: create failed'), group.name) group.status = status group.created_at = timeutils.utcnow() group.save() LOG.info(_LI('Consistency group %s: created successfully'), group.name) self._notify_about_consistencygroup_usage(context, group, 'create.end') LOG.info(_LI('Create consistency group completed successfully.'), resource={'type': 'consistency_group', 'id': group.id}) return group
def create_consistencygroup_from_src(self, context, group, cgsnapshot=None, source_cg=None): 'Creates the consistency group from source.\n\n The source can be a CG snapshot or a source CG.\n ' source_name = None snapshots = None source_vols = None try: volumes = self.db.volume_get_all_by_group(context, group.id) if cgsnapshot: try: cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot.id) except exception.CgSnapshotNotFound: LOG.error(_LE('Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound.'), {'snap': cgsnapshot.id}, resource={'type': 'consistency_group', 'id': group.id}) raise source_name = (_('snapshot-%s') % cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot(context, cgsnapshot.id) for snap in snapshots: if (snap.status not in VALID_CREATE_CG_SRC_SNAP_STATUS): msg = (_('Cannot create consistency group %(group)s because snapshot %(snap)s is not in a valid state. Valid states are: %(valid)s.') % {'group': group.id, 'snap': snap['id'], 'valid': VALID_CREATE_CG_SRC_SNAP_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) if source_cg: try: source_cg = objects.ConsistencyGroup.get_by_id(context, source_cg.id) except exception.ConsistencyGroupNotFound: LOG.error(_LE('Create consistency group from source cg-%(cg)s failed: ConsistencyGroupNotFound.'), {'cg': source_cg.id}, resource={'type': 'consistency_group', 'id': group.id}) raise source_name = (_('cg-%s') % source_cg.id) source_vols = self.db.volume_get_all_by_group(context, source_cg.id) for source_vol in source_vols: if (source_vol['status'] not in VALID_CREATE_CG_SRC_CG_STATUS): msg = (_('Cannot create consistency group %(group)s because source volume %(source_vol)s is not in a valid state. Valid states are: %(valid)s.') % {'group': group.id, 'source_vol': source_vol['id'], 'valid': VALID_CREATE_CG_SRC_CG_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) sorted_snapshots = None if (cgsnapshot and snapshots): sorted_snapshots = self._sort_snapshots(volumes, snapshots) sorted_source_vols = None if (source_cg and source_vols): sorted_source_vols = self._sort_source_vols(volumes, source_vols) self._notify_about_consistencygroup_usage(context, group, 'create.start') utils.require_driver_initialized(self.driver) (model_update, volumes_model_update) = self.driver.create_consistencygroup_from_src(context, group, volumes, cgsnapshot, sorted_snapshots, source_cg, sorted_source_vols) if volumes_model_update: for update in volumes_model_update: self.db.volume_update(context, update['id'], update) if model_update: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = 'error' group.save() LOG.error(_LE('Create consistency group from source %(source)s failed.'), {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) for vol in volumes: self.db.volume_update(context, vol['id'], {'status': 'error'}) now = timeutils.utcnow() status = 'available' for vol in volumes: update = {'status': status, 'created_at': now} self._update_volume_from_src(context, vol, update, group=group) self._update_allocated_capacity(vol) group.status = status group.created_at = now group.save() self._notify_about_consistencygroup_usage(context, group, 'create.end') LOG.info(_LI('Create consistency group from source-%(source)s completed successfully.'), {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) return group
-3,745,478,315,313,215,000
Creates the consistency group from source. The source can be a CG snapshot or a source CG.
cinder/volume/manager.py
create_consistencygroup_from_src
ISCAS-VDI/cinder-base
python
def create_consistencygroup_from_src(self, context, group, cgsnapshot=None, source_cg=None): 'Creates the consistency group from source.\n\n The source can be a CG snapshot or a source CG.\n ' source_name = None snapshots = None source_vols = None try: volumes = self.db.volume_get_all_by_group(context, group.id) if cgsnapshot: try: cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot.id) except exception.CgSnapshotNotFound: LOG.error(_LE('Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound.'), {'snap': cgsnapshot.id}, resource={'type': 'consistency_group', 'id': group.id}) raise source_name = (_('snapshot-%s') % cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot(context, cgsnapshot.id) for snap in snapshots: if (snap.status not in VALID_CREATE_CG_SRC_SNAP_STATUS): msg = (_('Cannot create consistency group %(group)s because snapshot %(snap)s is not in a valid state. Valid states are: %(valid)s.') % {'group': group.id, 'snap': snap['id'], 'valid': VALID_CREATE_CG_SRC_SNAP_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) if source_cg: try: source_cg = objects.ConsistencyGroup.get_by_id(context, source_cg.id) except exception.ConsistencyGroupNotFound: LOG.error(_LE('Create consistency group from source cg-%(cg)s failed: ConsistencyGroupNotFound.'), {'cg': source_cg.id}, resource={'type': 'consistency_group', 'id': group.id}) raise source_name = (_('cg-%s') % source_cg.id) source_vols = self.db.volume_get_all_by_group(context, source_cg.id) for source_vol in source_vols: if (source_vol['status'] not in VALID_CREATE_CG_SRC_CG_STATUS): msg = (_('Cannot create consistency group %(group)s because source volume %(source_vol)s is not in a valid state. Valid states are: %(valid)s.') % {'group': group.id, 'source_vol': source_vol['id'], 'valid': VALID_CREATE_CG_SRC_CG_STATUS}) raise exception.InvalidConsistencyGroup(reason=msg) sorted_snapshots = None if (cgsnapshot and snapshots): sorted_snapshots = self._sort_snapshots(volumes, snapshots) sorted_source_vols = None if (source_cg and source_vols): sorted_source_vols = self._sort_source_vols(volumes, source_vols) self._notify_about_consistencygroup_usage(context, group, 'create.start') utils.require_driver_initialized(self.driver) (model_update, volumes_model_update) = self.driver.create_consistencygroup_from_src(context, group, volumes, cgsnapshot, sorted_snapshots, source_cg, sorted_source_vols) if volumes_model_update: for update in volumes_model_update: self.db.volume_update(context, update['id'], update) if model_update: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = 'error' group.save() LOG.error(_LE('Create consistency group from source %(source)s failed.'), {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) for vol in volumes: self.db.volume_update(context, vol['id'], {'status': 'error'}) now = timeutils.utcnow() status = 'available' for vol in volumes: update = {'status': status, 'created_at': now} self._update_volume_from_src(context, vol, update, group=group) self._update_allocated_capacity(vol) group.status = status group.created_at = now group.save() self._notify_about_consistencygroup_usage(context, group, 'create.end') LOG.info(_LI('Create consistency group from source-%(source)s completed successfully.'), {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) return group
def delete_consistencygroup(self, context, group): 'Deletes consistency group and the volumes in the group.' context = context.elevated() project_id = group.project_id if (context.project_id != group.project_id): project_id = group.project_id else: project_id = context.project_id volumes = self.db.volume_get_all_by_group(context, group.id) for volume_ref in volumes: if (volume_ref['attach_status'] == 'attached'): raise exception.VolumeAttached(volume_id=volume_ref['id']) if volume_ref['host']: new_host = vol_utils.extract_host(volume_ref['host']) if (new_host != self.host): raise exception.InvalidVolume(reason=_('Volume is not local to this node')) self._notify_about_consistencygroup_usage(context, group, 'delete.start') volumes_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) (model_update, volumes_model_update) = self.driver.delete_consistencygroup(context, group, volumes) if volumes_model_update: for volume in volumes_model_update: update = {'status': volume['status']} self.db.volume_update(context, volume['id'], update) if ((volume['status'] in ['error_deleting', 'error']) and (model_update['status'] not in ['error_deleting', 'error'])): model_update['status'] = volume['status'] if model_update: if (model_update['status'] in ['error_deleting', 'error']): msg = _('Delete consistency group failed.') LOG.error(msg, resource={'type': 'consistency_group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = 'error' group.save() if (not volumes_model_update): for vol in volumes: self.db.volume_update(context, vol['id'], {'status': 'error'}) try: reserve_opts = {'consistencygroups': (- 1)} cgreservations = CGQUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: cgreservations = None LOG.exception(_LE('Delete consistency group failed to update usages.'), resource={'type': 'consistency_group', 'id': group.id}) for volume_ref in volumes: try: volume_id = volume_ref['id'] reserve_opts = {'volumes': (- 1), 'gigabytes': (- volume_ref['size'])} QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Delete consistency group failed to update usages.'), resource={'type': 'consistency_group', 'id': group.id}) self.db.volume_glance_metadata_delete_by_volume(context, volume_id) self.db.volume_destroy(context, volume_id) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.stats['allocated_capacity_gb'] -= volume_ref['size'] if cgreservations: CGQUOTAS.commit(context, cgreservations, project_id=project_id) group.destroy() self._notify_about_consistencygroup_usage(context, group, 'delete.end', volumes) self.publish_service_capabilities(context) LOG.info(_LI('Delete consistency group completed successfully.'), resource={'type': 'consistency_group', 'id': group.id})
932,044,870,691,766,100
Deletes consistency group and the volumes in the group.
cinder/volume/manager.py
delete_consistencygroup
ISCAS-VDI/cinder-base
python
def delete_consistencygroup(self, context, group): context = context.elevated() project_id = group.project_id if (context.project_id != group.project_id): project_id = group.project_id else: project_id = context.project_id volumes = self.db.volume_get_all_by_group(context, group.id) for volume_ref in volumes: if (volume_ref['attach_status'] == 'attached'): raise exception.VolumeAttached(volume_id=volume_ref['id']) if volume_ref['host']: new_host = vol_utils.extract_host(volume_ref['host']) if (new_host != self.host): raise exception.InvalidVolume(reason=_('Volume is not local to this node')) self._notify_about_consistencygroup_usage(context, group, 'delete.start') volumes_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) (model_update, volumes_model_update) = self.driver.delete_consistencygroup(context, group, volumes) if volumes_model_update: for volume in volumes_model_update: update = {'status': volume['status']} self.db.volume_update(context, volume['id'], update) if ((volume['status'] in ['error_deleting', 'error']) and (model_update['status'] not in ['error_deleting', 'error'])): model_update['status'] = volume['status'] if model_update: if (model_update['status'] in ['error_deleting', 'error']): msg = _('Delete consistency group failed.') LOG.error(msg, resource={'type': 'consistency_group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = 'error' group.save() if (not volumes_model_update): for vol in volumes: self.db.volume_update(context, vol['id'], {'status': 'error'}) try: reserve_opts = {'consistencygroups': (- 1)} cgreservations = CGQUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: cgreservations = None LOG.exception(_LE('Delete consistency group failed to update usages.'), resource={'type': 'consistency_group', 'id': group.id}) for volume_ref in volumes: try: volume_id = volume_ref['id'] reserve_opts = {'volumes': (- 1), 'gigabytes': (- volume_ref['size'])} QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Delete consistency group failed to update usages.'), resource={'type': 'consistency_group', 'id': group.id}) self.db.volume_glance_metadata_delete_by_volume(context, volume_id) self.db.volume_destroy(context, volume_id) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.stats['allocated_capacity_gb'] -= volume_ref['size'] if cgreservations: CGQUOTAS.commit(context, cgreservations, project_id=project_id) group.destroy() self._notify_about_consistencygroup_usage(context, group, 'delete.end', volumes) self.publish_service_capabilities(context) LOG.info(_LI('Delete consistency group completed successfully.'), resource={'type': 'consistency_group', 'id': group.id})
def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): 'Updates consistency group.\n\n Update consistency group by adding volumes to the group,\n or removing volumes from the group.\n ' add_volumes_ref = [] remove_volumes_ref = [] add_volumes_list = [] remove_volumes_list = [] if add_volumes: add_volumes_list = add_volumes.split(',') if remove_volumes: remove_volumes_list = remove_volumes.split(',') for add_vol in add_volumes_list: try: add_vol_ref = self.db.volume_get(context, add_vol) except exception.VolumeNotFound: LOG.error(_LE('Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound.'), {'volume_id': add_vol_ref['id']}, resource={'type': 'consistency_group', 'id': group.id}) raise if (add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS): msg = (_('Cannot add volume %(volume_id)s to consistency group %(group_id)s because volume is in an invalid state: %(status)s. Valid states are: %(valid)s.') % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_ADD_VOL_TO_CG_STATUS}) raise exception.InvalidVolume(reason=msg) new_host = vol_utils.extract_host(add_vol_ref['host']) if (new_host != self.host): raise exception.InvalidVolume(reason=_('Volume is not local to this node.')) add_volumes_ref.append(add_vol_ref) for remove_vol in remove_volumes_list: try: remove_vol_ref = self.db.volume_get(context, remove_vol) except exception.VolumeNotFound: LOG.error(_LE('Update consistency group failed to remove volume-%(volume_id)s: VolumeNotFound.'), {'volume_id': remove_vol_ref['id']}, resource={'type': 'consistency_group', 'id': group.id}) raise if (remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS): msg = (_('Cannot remove volume %(volume_id)s from consistency group %(group_id)s because volume is in an invalid state: %(status)s. Valid states are: %(valid)s.') % {'volume_id': remove_vol_ref['id'], 'group_id': group.id, 'status': remove_vol_ref['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) remove_volumes_ref.append(remove_vol_ref) self._notify_about_consistencygroup_usage(context, group, 'update.start') try: utils.require_driver_initialized(self.driver) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_consistencygroup(context, group, add_volumes=add_volumes_ref, remove_volumes=remove_volumes_ref) if add_volumes_update: for update in add_volumes_update: self.db.volume_update(context, update['id'], update) if remove_volumes_update: for update in remove_volumes_update: self.db.volume_update(context, update['id'], update) if model_update: if (model_update['status'] in [fields.ConsistencyGroupStatus.ERROR]): msg = (_('Error occurred when updating consistency group %s.') % group.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) group.update(model_update) group.save() except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error occurred in the volume driver when updating consistency group %(group_id)s.'), {'group_id': group.id}) group.status = 'error' group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'status': 'error'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error occurred when updating consistency group %(group_id)s.'), {'group_id': group.id}) group.status = 'error' group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'status': 'error'}) now = timeutils.utcnow() group.status = 'available' group.update_at = now group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'consistencygroup_id': group.id, 'updated_at': now}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'consistencygroup_id': None, 'updated_at': now}) self._notify_about_consistencygroup_usage(context, group, 'update.end') LOG.info(_LI('Update consistency group completed successfully.'), resource={'type': 'consistency_group', 'id': group.id})
2,105,641,434,782,602,500
Updates consistency group. Update consistency group by adding volumes to the group, or removing volumes from the group.
cinder/volume/manager.py
update_consistencygroup
ISCAS-VDI/cinder-base
python
def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): 'Updates consistency group.\n\n Update consistency group by adding volumes to the group,\n or removing volumes from the group.\n ' add_volumes_ref = [] remove_volumes_ref = [] add_volumes_list = [] remove_volumes_list = [] if add_volumes: add_volumes_list = add_volumes.split(',') if remove_volumes: remove_volumes_list = remove_volumes.split(',') for add_vol in add_volumes_list: try: add_vol_ref = self.db.volume_get(context, add_vol) except exception.VolumeNotFound: LOG.error(_LE('Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound.'), {'volume_id': add_vol_ref['id']}, resource={'type': 'consistency_group', 'id': group.id}) raise if (add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS): msg = (_('Cannot add volume %(volume_id)s to consistency group %(group_id)s because volume is in an invalid state: %(status)s. Valid states are: %(valid)s.') % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_ADD_VOL_TO_CG_STATUS}) raise exception.InvalidVolume(reason=msg) new_host = vol_utils.extract_host(add_vol_ref['host']) if (new_host != self.host): raise exception.InvalidVolume(reason=_('Volume is not local to this node.')) add_volumes_ref.append(add_vol_ref) for remove_vol in remove_volumes_list: try: remove_vol_ref = self.db.volume_get(context, remove_vol) except exception.VolumeNotFound: LOG.error(_LE('Update consistency group failed to remove volume-%(volume_id)s: VolumeNotFound.'), {'volume_id': remove_vol_ref['id']}, resource={'type': 'consistency_group', 'id': group.id}) raise if (remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS): msg = (_('Cannot remove volume %(volume_id)s from consistency group %(group_id)s because volume is in an invalid state: %(status)s. Valid states are: %(valid)s.') % {'volume_id': remove_vol_ref['id'], 'group_id': group.id, 'status': remove_vol_ref['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) remove_volumes_ref.append(remove_vol_ref) self._notify_about_consistencygroup_usage(context, group, 'update.start') try: utils.require_driver_initialized(self.driver) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_consistencygroup(context, group, add_volumes=add_volumes_ref, remove_volumes=remove_volumes_ref) if add_volumes_update: for update in add_volumes_update: self.db.volume_update(context, update['id'], update) if remove_volumes_update: for update in remove_volumes_update: self.db.volume_update(context, update['id'], update) if model_update: if (model_update['status'] in [fields.ConsistencyGroupStatus.ERROR]): msg = (_('Error occurred when updating consistency group %s.') % group.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) group.update(model_update) group.save() except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error occurred in the volume driver when updating consistency group %(group_id)s.'), {'group_id': group.id}) group.status = 'error' group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'status': 'error'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error occurred when updating consistency group %(group_id)s.'), {'group_id': group.id}) group.status = 'error' group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'status': 'error'}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'status': 'error'}) now = timeutils.utcnow() group.status = 'available' group.update_at = now group.save() for add_vol in add_volumes_ref: self.db.volume_update(context, add_vol['id'], {'consistencygroup_id': group.id, 'updated_at': now}) for rem_vol in remove_volumes_ref: self.db.volume_update(context, rem_vol['id'], {'consistencygroup_id': None, 'updated_at': now}) self._notify_about_consistencygroup_usage(context, group, 'update.end') LOG.info(_LI('Update consistency group completed successfully.'), resource={'type': 'consistency_group', 'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot): 'Creates the cgsnapshot.' caller_context = context context = context.elevated() LOG.info(_LI('Cgsnapshot %s: creating.'), cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot(context, cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'create.start') snapshots_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) LOG.debug('Cgsnapshot %(cgsnap_id)s: creating.', {'cgsnap_id': cgsnapshot.id}) cgsnapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context (model_update, snapshots_model_update) = self.driver.create_cgsnapshot(context, cgsnapshot, snapshots) if snapshots_model_update: for snap_model in snapshots_model_update: self.db.snapshot_update(context, snap_model['id'], snap_model) if ((snap_model['status'] in [fields.SnapshotStatus.ERROR_DELETING, fields.SnapshotStatus.ERROR]) and (model_update['status'] not in ['error_deleting', 'error'])): model_update['status'] = snap_model['status'] if model_update: if (model_update['status'] == 'error'): msg = (_('Error occurred when creating cgsnapshot %s.') % cgsnapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) cgsnapshot.update(model_update) cgsnapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): cgsnapshot.status = 'error' cgsnapshot.save() if (not snapshots_model_update): for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() for snapshot in snapshots: volume_id = snapshot['volume_id'] snapshot_id = snapshot['id'] vol_ref = self.db.volume_get(context, volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id) except exception.CinderException as ex: LOG.error(_LE('Failed updating %(snapshot_id)s metadata using the provided volumes %(volume_id)s metadata'), {'volume_id': volume_id, 'snapshot_id': snapshot_id}) self.db.snapshot_update(context, snapshot_id, {'status': fields.SnapshotStatus.ERROR}) raise exception.MetadataCopyFailure(reason=six.text_type(ex)) self.db.snapshot_update(context, snapshot['id'], {'status': fields.SnapshotStatus.AVAILABLE, 'progress': '100%'}) cgsnapshot.status = 'available' cgsnapshot.save() LOG.info(_LI('cgsnapshot %s: created successfully'), cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'create.end') return cgsnapshot
-1,498,914,793,036,234,000
Creates the cgsnapshot.
cinder/volume/manager.py
create_cgsnapshot
ISCAS-VDI/cinder-base
python
def create_cgsnapshot(self, context, cgsnapshot): caller_context = context context = context.elevated() LOG.info(_LI('Cgsnapshot %s: creating.'), cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot(context, cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'create.start') snapshots_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) LOG.debug('Cgsnapshot %(cgsnap_id)s: creating.', {'cgsnap_id': cgsnapshot.id}) cgsnapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context (model_update, snapshots_model_update) = self.driver.create_cgsnapshot(context, cgsnapshot, snapshots) if snapshots_model_update: for snap_model in snapshots_model_update: self.db.snapshot_update(context, snap_model['id'], snap_model) if ((snap_model['status'] in [fields.SnapshotStatus.ERROR_DELETING, fields.SnapshotStatus.ERROR]) and (model_update['status'] not in ['error_deleting', 'error'])): model_update['status'] = snap_model['status'] if model_update: if (model_update['status'] == 'error'): msg = (_('Error occurred when creating cgsnapshot %s.') % cgsnapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) cgsnapshot.update(model_update) cgsnapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): cgsnapshot.status = 'error' cgsnapshot.save() if (not snapshots_model_update): for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() for snapshot in snapshots: volume_id = snapshot['volume_id'] snapshot_id = snapshot['id'] vol_ref = self.db.volume_get(context, volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id) except exception.CinderException as ex: LOG.error(_LE('Failed updating %(snapshot_id)s metadata using the provided volumes %(volume_id)s metadata'), {'volume_id': volume_id, 'snapshot_id': snapshot_id}) self.db.snapshot_update(context, snapshot_id, {'status': fields.SnapshotStatus.ERROR}) raise exception.MetadataCopyFailure(reason=six.text_type(ex)) self.db.snapshot_update(context, snapshot['id'], {'status': fields.SnapshotStatus.AVAILABLE, 'progress': '100%'}) cgsnapshot.status = 'available' cgsnapshot.save() LOG.info(_LI('cgsnapshot %s: created successfully'), cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'create.end') return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot): 'Deletes cgsnapshot.' caller_context = context context = context.elevated() project_id = cgsnapshot.project_id LOG.info(_LI('cgsnapshot %s: deleting'), cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot(context, cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'delete.start') snapshots_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) LOG.debug('cgsnapshot %(cgsnap_id)s: deleting', {'cgsnap_id': cgsnapshot.id}) cgsnapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context (model_update, snapshots_model_update) = self.driver.delete_cgsnapshot(context, cgsnapshot, snapshots) if snapshots_model_update: for snap_model in snapshots_model_update: snap = next((item for item in snapshots if (item.id == snap_model['id'])), None) if snap: snap.status = snap_model['status'] snap.save() if ((snap_model['status'] in [fields.SnapshotStatus.ERROR_DELETING, fields.SnapshotStatus.ERROR]) and (model_update['status'] not in ['error_deleting', 'error'])): model_update['status'] = snap_model['status'] if model_update: if (model_update['status'] in ['error_deleting', 'error']): msg = (_('Error occurred when deleting cgsnapshot %s.') % cgsnapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: cgsnapshot.update(model_update) cgsnapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): cgsnapshot.status = 'error' cgsnapshot.save() if (not snapshots_model_update): for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() for snapshot in snapshots: try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': (- 1)} else: reserve_opts = {'snapshots': (- 1), 'gigabytes': (- snapshot['volume_size'])} volume_ref = self.db.volume_get(context, snapshot['volume_id']) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Failed to update usages deleting snapshot')) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot['id']) self.db.snapshot_destroy(context, snapshot['id']) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) cgsnapshot.destroy() LOG.info(_LI('cgsnapshot %s: deleted successfully'), cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'delete.end', snapshots)
-3,840,660,759,487,432,000
Deletes cgsnapshot.
cinder/volume/manager.py
delete_cgsnapshot
ISCAS-VDI/cinder-base
python
def delete_cgsnapshot(self, context, cgsnapshot): caller_context = context context = context.elevated() project_id = cgsnapshot.project_id LOG.info(_LI('cgsnapshot %s: deleting'), cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot(context, cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'delete.start') snapshots_model_update = None model_update = None try: utils.require_driver_initialized(self.driver) LOG.debug('cgsnapshot %(cgsnap_id)s: deleting', {'cgsnap_id': cgsnapshot.id}) cgsnapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context (model_update, snapshots_model_update) = self.driver.delete_cgsnapshot(context, cgsnapshot, snapshots) if snapshots_model_update: for snap_model in snapshots_model_update: snap = next((item for item in snapshots if (item.id == snap_model['id'])), None) if snap: snap.status = snap_model['status'] snap.save() if ((snap_model['status'] in [fields.SnapshotStatus.ERROR_DELETING, fields.SnapshotStatus.ERROR]) and (model_update['status'] not in ['error_deleting', 'error'])): model_update['status'] = snap_model['status'] if model_update: if (model_update['status'] in ['error_deleting', 'error']): msg = (_('Error occurred when deleting cgsnapshot %s.') % cgsnapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: cgsnapshot.update(model_update) cgsnapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): cgsnapshot.status = 'error' cgsnapshot.save() if (not snapshots_model_update): for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() for snapshot in snapshots: try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': (- 1)} else: reserve_opts = {'snapshots': (- 1), 'gigabytes': (- snapshot['volume_size'])} volume_ref = self.db.volume_get(context, snapshot['volume_id']) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_LE('Failed to update usages deleting snapshot')) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot['id']) self.db.snapshot_destroy(context, snapshot['id']) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) cgsnapshot.destroy() LOG.info(_LI('cgsnapshot %s: deleted successfully'), cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, 'delete.end', snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): 'Finalize migration process on backend device.' model_update = None model_update_default = {'_name_id': new_volume.name_id, 'provider_location': new_volume.provider_location} try: model_update = self.driver.update_migrated_volume(ctxt, volume, new_volume, volume_status) except NotImplementedError: model_update = model_update_default if model_update: model_update_default.update(model_update) model_update_new = dict() for key in model_update: if (key == 'metadata'): if volume.get('volume_metadata'): model_update_new[key] = {metadata['key']: metadata['value'] for metadata in volume.volume_metadata} elif (key == 'admin_metadata'): model_update_new[key] = {metadata['key']: metadata['value'] for metadata in volume.volume_admin_metadata} else: model_update_new[key] = volume[key] with new_volume.obj_as_admin(): new_volume.update(model_update_new) new_volume.save() with volume.obj_as_admin(): volume.update(model_update_default) volume.save()
-2,092,127,233,417,161,500
Finalize migration process on backend device.
cinder/volume/manager.py
update_migrated_volume
ISCAS-VDI/cinder-base
python
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): model_update = None model_update_default = {'_name_id': new_volume.name_id, 'provider_location': new_volume.provider_location} try: model_update = self.driver.update_migrated_volume(ctxt, volume, new_volume, volume_status) except NotImplementedError: model_update = model_update_default if model_update: model_update_default.update(model_update) model_update_new = dict() for key in model_update: if (key == 'metadata'): if volume.get('volume_metadata'): model_update_new[key] = {metadata['key']: metadata['value'] for metadata in volume.volume_metadata} elif (key == 'admin_metadata'): model_update_new[key] = {metadata['key']: metadata['value'] for metadata in volume.volume_admin_metadata} else: model_update_new[key] = volume[key] with new_volume.obj_as_admin(): new_volume.update(model_update_new) new_volume.save() with volume.obj_as_admin(): volume.update(model_update_default) volume.save()
def failover_host(self, context, secondary_backend_id=None): "Failover a backend to a secondary replication target.\n\n Instructs a replication capable/configured backend to failover\n to one of it's secondary replication targets. host=None is\n an acceptable input, and leaves it to the driver to failover\n to the only configured target, or to choose a target on it's\n own. All of the hosts volumes will be passed on to the driver\n in order for it to determine the replicated volumes on the host,\n if needed.\n\n :param context: security context\n :param secondary_backend_id: Specifies backend_id to fail over to\n " svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') volumes = objects.VolumeList.get_all_by_host(context, self.host) exception_encountered = False try: (active_backend_id, volume_update_list) = self.driver.failover_host(context, volumes, secondary_id=secondary_backend_id) except exception.UnableToFailOver: LOG.exception(_LE('Failed to perform replication failover')) service.replication_status = fields.ReplicationStatus.FAILOVER_ERROR service.save() exception_encountered = True except exception.InvalidReplicationTarget: LOG.exception(_LE('Invalid replication target specified for failover')) if (secondary_backend_id == 'default'): service.replication_status = fields.ReplicationStatus.FAILED_OVER else: service.replication_status = fields.ReplicationStatus.ENABLED service.save() exception_encountered = True except exception.VolumeDriverException: LOG.error(_LE('Driver reported error during replication failover.')) service.status = 'error' service.save() exception_encountered = True if exception_encountered: LOG.error(_LE('Error encountered during failover on host: %(host)s invalid target ID %(backend_id)s'), {'host': self.host, 'backend_id': secondary_backend_id}) return if (secondary_backend_id == 'default'): service.replication_status = fields.ReplicationStatus.ENABLED service.active_backend_id = '' if service.frozen: service.disabled = True service.disabled_reason = 'frozen' else: service.disabled = False service.disabled_reason = '' service.save() else: service.replication_status = fields.ReplicationStatus.FAILED_OVER service.active_backend_id = active_backend_id service.disabled = True service.disabled_reason = 'failed-over' service.save() for update in volume_update_list: if (not update.get('volume_id')): raise exception.UnableToFailOver(reason=_("Update list, doesn't include volume_id")) vobj = objects.Volume.get_by_id(context, update['volume_id']) vobj.update(update.get('updates', {})) vobj.save() LOG.info(_LI('Failed over to replication target successfully.'))
-7,785,357,875,959,712,000
Failover a backend to a secondary replication target. Instructs a replication capable/configured backend to failover to one of it's secondary replication targets. host=None is an acceptable input, and leaves it to the driver to failover to the only configured target, or to choose a target on it's own. All of the hosts volumes will be passed on to the driver in order for it to determine the replicated volumes on the host, if needed. :param context: security context :param secondary_backend_id: Specifies backend_id to fail over to
cinder/volume/manager.py
failover_host
ISCAS-VDI/cinder-base
python
def failover_host(self, context, secondary_backend_id=None): "Failover a backend to a secondary replication target.\n\n Instructs a replication capable/configured backend to failover\n to one of it's secondary replication targets. host=None is\n an acceptable input, and leaves it to the driver to failover\n to the only configured target, or to choose a target on it's\n own. All of the hosts volumes will be passed on to the driver\n in order for it to determine the replicated volumes on the host,\n if needed.\n\n :param context: security context\n :param secondary_backend_id: Specifies backend_id to fail over to\n " svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') volumes = objects.VolumeList.get_all_by_host(context, self.host) exception_encountered = False try: (active_backend_id, volume_update_list) = self.driver.failover_host(context, volumes, secondary_id=secondary_backend_id) except exception.UnableToFailOver: LOG.exception(_LE('Failed to perform replication failover')) service.replication_status = fields.ReplicationStatus.FAILOVER_ERROR service.save() exception_encountered = True except exception.InvalidReplicationTarget: LOG.exception(_LE('Invalid replication target specified for failover')) if (secondary_backend_id == 'default'): service.replication_status = fields.ReplicationStatus.FAILED_OVER else: service.replication_status = fields.ReplicationStatus.ENABLED service.save() exception_encountered = True except exception.VolumeDriverException: LOG.error(_LE('Driver reported error during replication failover.')) service.status = 'error' service.save() exception_encountered = True if exception_encountered: LOG.error(_LE('Error encountered during failover on host: %(host)s invalid target ID %(backend_id)s'), {'host': self.host, 'backend_id': secondary_backend_id}) return if (secondary_backend_id == 'default'): service.replication_status = fields.ReplicationStatus.ENABLED service.active_backend_id = if service.frozen: service.disabled = True service.disabled_reason = 'frozen' else: service.disabled = False service.disabled_reason = service.save() else: service.replication_status = fields.ReplicationStatus.FAILED_OVER service.active_backend_id = active_backend_id service.disabled = True service.disabled_reason = 'failed-over' service.save() for update in volume_update_list: if (not update.get('volume_id')): raise exception.UnableToFailOver(reason=_("Update list, doesn't include volume_id")) vobj = objects.Volume.get_by_id(context, update['volume_id']) vobj.update(update.get('updates', {})) vobj.save() LOG.info(_LI('Failed over to replication target successfully.'))
def freeze_host(self, context): 'Freeze management plane on this backend.\n\n Basically puts the control/management plane into a\n Read Only state. We should handle this in the scheduler,\n however this is provided to let the driver know in case it\n needs/wants to do something specific on the backend.\n\n :param context: security context\n ' try: self.driver.freeze_backend(context) except exception.VolumeDriverException: LOG.warning(_LW('Error encountered on Cinder backend during freeze operation, service is frozen, however notification to driver has failed.')) svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') service.disabled = True service.disabled_reason = 'frozen' service.save() LOG.info(_LI('Set backend status to frozen successfully.')) return True
2,362,276,871,906,208,000
Freeze management plane on this backend. Basically puts the control/management plane into a Read Only state. We should handle this in the scheduler, however this is provided to let the driver know in case it needs/wants to do something specific on the backend. :param context: security context
cinder/volume/manager.py
freeze_host
ISCAS-VDI/cinder-base
python
def freeze_host(self, context): 'Freeze management plane on this backend.\n\n Basically puts the control/management plane into a\n Read Only state. We should handle this in the scheduler,\n however this is provided to let the driver know in case it\n needs/wants to do something specific on the backend.\n\n :param context: security context\n ' try: self.driver.freeze_backend(context) except exception.VolumeDriverException: LOG.warning(_LW('Error encountered on Cinder backend during freeze operation, service is frozen, however notification to driver has failed.')) svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') service.disabled = True service.disabled_reason = 'frozen' service.save() LOG.info(_LI('Set backend status to frozen successfully.')) return True
def thaw_host(self, context): 'UnFreeze management plane on this backend.\n\n Basically puts the control/management plane back into\n a normal state. We should handle this in the scheduler,\n however this is provided to let the driver know in case it\n needs/wants to do something specific on the backend.\n\n :param context: security context\n ' try: self.driver.thaw_backend(context) except exception.VolumeDriverException: LOG.error(_LE('Error encountered on Cinder backend during thaw operation, service will remain frozen.')) return False svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') service.disabled = False service.disabled_reason = '' service.save() LOG.info(_LI('Thawed backend successfully.')) return True
3,111,996,613,704,219,000
UnFreeze management plane on this backend. Basically puts the control/management plane back into a normal state. We should handle this in the scheduler, however this is provided to let the driver know in case it needs/wants to do something specific on the backend. :param context: security context
cinder/volume/manager.py
thaw_host
ISCAS-VDI/cinder-base
python
def thaw_host(self, context): 'UnFreeze management plane on this backend.\n\n Basically puts the control/management plane back into\n a normal state. We should handle this in the scheduler,\n however this is provided to let the driver know in case it\n needs/wants to do something specific on the backend.\n\n :param context: security context\n ' try: self.driver.thaw_backend(context) except exception.VolumeDriverException: LOG.error(_LE('Error encountered on Cinder backend during thaw operation, service will remain frozen.')) return False svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') service.disabled = False service.disabled_reason = service.save() LOG.info(_LI('Thawed backend successfully.')) return True
def get_capabilities(self, context, discover): 'Get capabilities of backend storage.' if discover: self.driver.init_capabilities() capabilities = self.driver.capabilities LOG.debug('Obtained capabilities list: %s.', capabilities) return capabilities
5,667,258,279,005,569,000
Get capabilities of backend storage.
cinder/volume/manager.py
get_capabilities
ISCAS-VDI/cinder-base
python
def get_capabilities(self, context, discover): if discover: self.driver.init_capabilities() capabilities = self.driver.capabilities LOG.debug('Obtained capabilities list: %s.', capabilities) return capabilities
def compute_mask(argv=None): 'Function to compute a mask of non-resposive pixels from FXS images\n extracted from xtc (smd,idx,xtc format) or h5 files.\n Works for Single CPU, Multi-Processor interactive jobs and MPI batch jobs\n\n For a definition of input arguments argv and batch processing instructions see *** mpi_fxs_launch.py ***\n\n compute_mask produces the following output files:\n\n * Index file : Information about the events processed including time-stamps, beam center, total and peak intensities, streak locations, particle size etc\n * Average : Average image in cartesian coordinates\n * Variance : Variance map of intensities in cartesian coordinates\n * Mask : Mask image in cartesian coordinates\n\n ' if (argv == None): argv = sys.argv[1:] try: from mpi4py import MPI except ImportError: raise Sorry('MPI not found') comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() if (argv.hit is None): hit = (- 1e+20) else: hit = argv.hit ftype = argv.ftype if (argv.param_path is not None): if (ftype == 'h5'): param_file = np.genfromtxt(argv.param_path, skiprows=1, dtype=None) (timestamps, filestamps) = pnccd_tbx.get_h5_event(param_file) elif (ftype == 'xtc'): param_file = np.genfromtxt(argv.param_path, skiprows=1, dtype=None) timestamps = pnccd_tbx.get_time(param_file) else: param_file = np.genfromtxt(argv.param_path, skiprows=1) timestamps = pnccd_tbx.get_psana_event(param_file) else: timestamps = None first = argv.first last = argv.last if (ftype == 'h5'): import h5py run = int(argv.run) if (argv.param_path is None): timestamps = [] filestamps = [] for i in os.listdir(argv.xtc_dir): if i.endswith('.h5'): f = h5py.File(i, 'r') filestamps.append(i[(- 7):(- 4)]) timestamps.append(list(f.keys())) continue else: continue dataset_name = ('%s-r%s' % (argv.experiment, str(argv.run).zfill(4))) exprun = os.path.join(argv.xtc_dir, dataset_name) if (argv.first is None): first = 0 if (argv.last is None): last = len(timestamps) else: last = min(last, len(timestamps)) timestamps = timestamps[first:last] filestamps = filestamps[first:last] evtgen = h5gen else: exprun = ('exp=%s:run=%d' % (argv.experiment, argv.run)) if (ftype == 'xtc'): dataset_name = (exprun + ':xtc') elif (ftype == 'idx'): dataset_name = (exprun + ':idx') elif (ftype == 'idx_ffb'): dataset_name = (exprun + ':idx') dataset_name += (':dir=/reg/d/ffb/%s/%s/xtc' % (argv.experiment[0:3], argv.experiment)) elif (ftype == 'smd'): dataset_name = (exprun + ':smd') elif (ftype == 'smd_ffb'): dataset_name = (exprun + ':smd') dataset_name += (':dir=/reg/d/ffb/%s/%s/xtc:live' % (argv.experiment[0:3], argv.experiment)) exprun = dataset_name ds = DataSource(dataset_name) run = next(ds.runs()) if ((ftype == 'smd') or (ftype == 'smd_ffb') or (ftype == 'xtc')): evtgen = smdgen elif ((ftype == 'idx') or (ftype == 'idx_ffb')): evtgen = idxgen if (size == 1): plot = argv.plot else: plot = 0 FXS = fxs.fluctuation_scattering(dataset_name=exprun, detector_address=argv.address, data_type=argv.ftype, mask_path=argv.mask_path, mask_angles=None, mask_widths=None, backimg_path=argv.bg_img_path, backmsk_path=argv.bg_msk_path, geom_path=argv.geom_path, det_dist=argv.det_distance, det_pix=argv.det_pixel, beam_l=argv.lambda_b, mask_thr=argv.thr, nQ=argv.nQ, nPhi=argv.nPhi, dQ=argv.dQ, dPhi=argv.dP, cent0=[argv.x, argv.y], r_max=argv.r_max, dr=argv.dr, dx=argv.dx, dy=argv.dy, r_0=argv.r0, q_bound=argv.q_bound, peak=[0.037, 0.064], dpeak=[0.002, 0.002]) FXS.cnt = np.array([0.0]) if (argv.param_path is None): maxevents = 400000 else: maxevents = min(len(timestamps), len(timestamps[first:last])) FXS.get_index(maxevents, flag=1) if (size > 1): if (rank > 0): hd = pnccd_hit.hit() for (j, evt) in evtgen(run, timestamps=timestamps, first=first, last=last): if ((j % 10) == 0): print('Rank', rank, 'processing event', j) if (ftype == 'h5'): FXS.get_h5(filestamps[j], evt) else: FXS.get_image(evt) FXS.image = np.copy(FXS.img) if ((FXS.image is not None) and (float(FXS.image.sum()) > hit)): FXS.get_beam(plot=plot) FXS.get_polar(plot=plot) FXS.get_streak_mask(plot=plot) FXS.store_image(j) if (ftype == 'h5'): FXS.store_index_h5(evt, j, flag=0) else: time = evt.get(EventId).time() fid = evt.get(EventId).fiducials() sec = time[0] nsec = time[1] et = EventTime(int(((sec << 32) | nsec)), fid) FXS.store_index(et, j, flag=0) if ((int(FXS.cnt) % 10) == 0): print('Rank', rank, 'processed events: ', int(FXS.cnt)) if ((int(FXS.cnt) % 50) == 0): tmp_n = int(FXS.cnt) tmp_ind = np.column_stack((FXS.tot_int, FXS.tot_size, FXS.tot_score)) hd.send(tmp_n, ind=tmp_ind) hd.endrun() print('Rank', rank, 'total events: ', int(FXS.cnt), ' * ') else: if (ftype == 'h5'): FXS.run_nr = run else: FXS.run_nr = int(run.run()) hd = pnccd_hit.hit() idim = (maxevents, 3) hd.total_ind = ([np.zeros(idim)] * (size - 1)) hd.total_ev_i = ([0.0] * (size - 1)) nClients = (size - 1) while (nClients > 0): if hd.recv(): nClients -= 1 else: ns = sum(hd.total_ev_s) ni = sum(hd.total_ev_i) if ((ns % 100) == 0): IND = np.zeros(idim) for i in range((size - 1)): IND = (IND + hd.total_ind[i]) FXS.publish(ind=IND, n_i=ni) else: for (j, evt) in evtgen(run, timestamps=timestamps, first=first, last=last): if ((j % 10) == 0): print('Rank', rank, 'processing event', j) if (ftype == 'h5'): FXS.get_h5(filestamps[j], evt) else: FXS.get_image(evt) FXS.image = np.copy(FXS.img) if ((FXS.image is not None) and (float(FXS.image.sum()) > hit)): FXS.get_beam(plot=plot) FXS.get_polar() FXS.get_streak_mask(plot=0) FXS.store_image(j) if (ftype == 'h5'): FXS.store_index_h5(evt, j, flag=0) else: time = evt.get(EventId).time() fid = evt.get(EventId).fiducials() sec = time[0] nsec = time[1] et = EventTime(int(((sec << 32) | nsec)), fid) FXS.store_index(et, j, flag=0) print('Rank', rank, 'total events: ', int(FXS.cnt), ' * ') if (size > 1): print('Synchronizing rank', rank) Tot = np.zeros(FXS.cnt.shape) comm.Reduce(FXS.cnt, Tot) if ((rank == 0) and (Tot[0] == 0)): raise Sorry('No events found in the run') Images = np.zeros(FXS.images.shape) comm.Reduce(FXS.images, Images) Tot_t = np.zeros(FXS.tot_t.shape) comm.Reduce(FXS.tot_t, Tot_t) Tot_s = np.zeros(FXS.tot_s.shape) comm.Reduce(FXS.tot_s, Tot_s) Tot_ns = np.zeros(FXS.tot_ns.shape) comm.Reduce(FXS.tot_ns, Tot_ns) Tot_fd = np.zeros(FXS.tot_fd.shape) comm.Reduce(FXS.tot_fd, Tot_fd) Tot_int = np.zeros(FXS.tot_int.shape) comm.Reduce(FXS.tot_int, Tot_int) Tot_peak1 = np.zeros(FXS.tot_peak1_int.shape) comm.Reduce(FXS.tot_peak1_int, Tot_peak1) Tot_peak2 = np.zeros(FXS.tot_peak2_int.shape) comm.Reduce(FXS.tot_peak2_int, Tot_peak2) Tot_s_m = np.zeros(FXS.tot_streak_m.shape) comm.Reduce(FXS.tot_streak_m, Tot_s_m) Tot_s_s = np.zeros(FXS.tot_streak_s.shape) comm.Reduce(FXS.tot_streak_s, Tot_s_s) Tot_cx = np.zeros(FXS.tot_cx.shape) comm.Reduce(FXS.tot_cx, Tot_cx) Tot_cy = np.zeros(FXS.tot_cy.shape) comm.Reduce(FXS.tot_cy, Tot_cy) Tot_size = np.zeros(FXS.tot_size.shape) comm.Reduce(FXS.tot_size, Tot_size) Tot_score = np.zeros(FXS.tot_score.shape) comm.Reduce(FXS.tot_score, Tot_score) if (rank == 0): if (size > 1): print('Synchronized') (Ave, Var, Mask) = pnccd_tbx.pixel_mask(Images, thr=0.12) if (argv.outputdir is None): opath = os.getcwd() else: opath = argv.outputdir f_index = os.path.join(opath, (('Index_run' + str(argv.run)) + '.dat')) stamps = ['Time', 'Seconds', 'Nanoseconds', 'Fiducial', 'Total Intensity', ((('Peak1, q=' + str(FXS.peak[0])) + '+/-') + str(FXS.dpeak[0])), ((('Peak2, q=' + str(FXS.peak[1])) + '+/-') + str(FXS.dpeak[1])), 'Mean streak angle', 'Std streak angle', 'Beam X', 'Beam Y', 'Radius [Ang]', 'Score'] head = ' '.join(stamps) f_ave = os.path.join(opath, (('Average_map_' + str(argv.run)) + '.dat')) f_var = os.path.join(opath, (('Variance_map_' + str(argv.run)) + '.dat')) f_mask = os.path.join(opath, (('Mask_map_' + str(argv.run)) + '.dat')) nz = np.nonzero(Tot_t) fend = (nz[0][(- 1)] + 1) f = open(f_index, 'w') np.savetxt(f, np.c_[(Tot_t[:fend], Tot_s[:fend], Tot_ns[:fend], Tot_fd[:fend], Tot_int[:fend], Tot_peak1[:fend], Tot_peak2[:fend], Tot_s_m[:fend], Tot_s_s[:fend], Tot_cx[:fend], Tot_cy[:fend], Tot_size[:fend], Tot_score[:fend])], header=head, comments='') f.close() f = open(f_ave, 'w') np.savetxt(f, Ave) f.close() f = open(f_var, 'w') np.savetxt(f, Var) f.close() f = open(f_mask, 'w') np.savetxt(f, Mask) f.close()
-1,029,744,097,754,570,900
Function to compute a mask of non-resposive pixels from FXS images extracted from xtc (smd,idx,xtc format) or h5 files. Works for Single CPU, Multi-Processor interactive jobs and MPI batch jobs For a definition of input arguments argv and batch processing instructions see *** mpi_fxs_launch.py *** compute_mask produces the following output files: * Index file : Information about the events processed including time-stamps, beam center, total and peak intensities, streak locations, particle size etc * Average : Average image in cartesian coordinates * Variance : Variance map of intensities in cartesian coordinates * Mask : Mask image in cartesian coordinates
modules/cctbx_project/xfel/amo/pnccd_ana/mpi_fxs_mask.py
compute_mask
jorgediazjr/dials-dev20191018
python
def compute_mask(argv=None): 'Function to compute a mask of non-resposive pixels from FXS images\n extracted from xtc (smd,idx,xtc format) or h5 files.\n Works for Single CPU, Multi-Processor interactive jobs and MPI batch jobs\n\n For a definition of input arguments argv and batch processing instructions see *** mpi_fxs_launch.py ***\n\n compute_mask produces the following output files:\n\n * Index file : Information about the events processed including time-stamps, beam center, total and peak intensities, streak locations, particle size etc\n * Average : Average image in cartesian coordinates\n * Variance : Variance map of intensities in cartesian coordinates\n * Mask : Mask image in cartesian coordinates\n\n ' if (argv == None): argv = sys.argv[1:] try: from mpi4py import MPI except ImportError: raise Sorry('MPI not found') comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() if (argv.hit is None): hit = (- 1e+20) else: hit = argv.hit ftype = argv.ftype if (argv.param_path is not None): if (ftype == 'h5'): param_file = np.genfromtxt(argv.param_path, skiprows=1, dtype=None) (timestamps, filestamps) = pnccd_tbx.get_h5_event(param_file) elif (ftype == 'xtc'): param_file = np.genfromtxt(argv.param_path, skiprows=1, dtype=None) timestamps = pnccd_tbx.get_time(param_file) else: param_file = np.genfromtxt(argv.param_path, skiprows=1) timestamps = pnccd_tbx.get_psana_event(param_file) else: timestamps = None first = argv.first last = argv.last if (ftype == 'h5'): import h5py run = int(argv.run) if (argv.param_path is None): timestamps = [] filestamps = [] for i in os.listdir(argv.xtc_dir): if i.endswith('.h5'): f = h5py.File(i, 'r') filestamps.append(i[(- 7):(- 4)]) timestamps.append(list(f.keys())) continue else: continue dataset_name = ('%s-r%s' % (argv.experiment, str(argv.run).zfill(4))) exprun = os.path.join(argv.xtc_dir, dataset_name) if (argv.first is None): first = 0 if (argv.last is None): last = len(timestamps) else: last = min(last, len(timestamps)) timestamps = timestamps[first:last] filestamps = filestamps[first:last] evtgen = h5gen else: exprun = ('exp=%s:run=%d' % (argv.experiment, argv.run)) if (ftype == 'xtc'): dataset_name = (exprun + ':xtc') elif (ftype == 'idx'): dataset_name = (exprun + ':idx') elif (ftype == 'idx_ffb'): dataset_name = (exprun + ':idx') dataset_name += (':dir=/reg/d/ffb/%s/%s/xtc' % (argv.experiment[0:3], argv.experiment)) elif (ftype == 'smd'): dataset_name = (exprun + ':smd') elif (ftype == 'smd_ffb'): dataset_name = (exprun + ':smd') dataset_name += (':dir=/reg/d/ffb/%s/%s/xtc:live' % (argv.experiment[0:3], argv.experiment)) exprun = dataset_name ds = DataSource(dataset_name) run = next(ds.runs()) if ((ftype == 'smd') or (ftype == 'smd_ffb') or (ftype == 'xtc')): evtgen = smdgen elif ((ftype == 'idx') or (ftype == 'idx_ffb')): evtgen = idxgen if (size == 1): plot = argv.plot else: plot = 0 FXS = fxs.fluctuation_scattering(dataset_name=exprun, detector_address=argv.address, data_type=argv.ftype, mask_path=argv.mask_path, mask_angles=None, mask_widths=None, backimg_path=argv.bg_img_path, backmsk_path=argv.bg_msk_path, geom_path=argv.geom_path, det_dist=argv.det_distance, det_pix=argv.det_pixel, beam_l=argv.lambda_b, mask_thr=argv.thr, nQ=argv.nQ, nPhi=argv.nPhi, dQ=argv.dQ, dPhi=argv.dP, cent0=[argv.x, argv.y], r_max=argv.r_max, dr=argv.dr, dx=argv.dx, dy=argv.dy, r_0=argv.r0, q_bound=argv.q_bound, peak=[0.037, 0.064], dpeak=[0.002, 0.002]) FXS.cnt = np.array([0.0]) if (argv.param_path is None): maxevents = 400000 else: maxevents = min(len(timestamps), len(timestamps[first:last])) FXS.get_index(maxevents, flag=1) if (size > 1): if (rank > 0): hd = pnccd_hit.hit() for (j, evt) in evtgen(run, timestamps=timestamps, first=first, last=last): if ((j % 10) == 0): print('Rank', rank, 'processing event', j) if (ftype == 'h5'): FXS.get_h5(filestamps[j], evt) else: FXS.get_image(evt) FXS.image = np.copy(FXS.img) if ((FXS.image is not None) and (float(FXS.image.sum()) > hit)): FXS.get_beam(plot=plot) FXS.get_polar(plot=plot) FXS.get_streak_mask(plot=plot) FXS.store_image(j) if (ftype == 'h5'): FXS.store_index_h5(evt, j, flag=0) else: time = evt.get(EventId).time() fid = evt.get(EventId).fiducials() sec = time[0] nsec = time[1] et = EventTime(int(((sec << 32) | nsec)), fid) FXS.store_index(et, j, flag=0) if ((int(FXS.cnt) % 10) == 0): print('Rank', rank, 'processed events: ', int(FXS.cnt)) if ((int(FXS.cnt) % 50) == 0): tmp_n = int(FXS.cnt) tmp_ind = np.column_stack((FXS.tot_int, FXS.tot_size, FXS.tot_score)) hd.send(tmp_n, ind=tmp_ind) hd.endrun() print('Rank', rank, 'total events: ', int(FXS.cnt), ' * ') else: if (ftype == 'h5'): FXS.run_nr = run else: FXS.run_nr = int(run.run()) hd = pnccd_hit.hit() idim = (maxevents, 3) hd.total_ind = ([np.zeros(idim)] * (size - 1)) hd.total_ev_i = ([0.0] * (size - 1)) nClients = (size - 1) while (nClients > 0): if hd.recv(): nClients -= 1 else: ns = sum(hd.total_ev_s) ni = sum(hd.total_ev_i) if ((ns % 100) == 0): IND = np.zeros(idim) for i in range((size - 1)): IND = (IND + hd.total_ind[i]) FXS.publish(ind=IND, n_i=ni) else: for (j, evt) in evtgen(run, timestamps=timestamps, first=first, last=last): if ((j % 10) == 0): print('Rank', rank, 'processing event', j) if (ftype == 'h5'): FXS.get_h5(filestamps[j], evt) else: FXS.get_image(evt) FXS.image = np.copy(FXS.img) if ((FXS.image is not None) and (float(FXS.image.sum()) > hit)): FXS.get_beam(plot=plot) FXS.get_polar() FXS.get_streak_mask(plot=0) FXS.store_image(j) if (ftype == 'h5'): FXS.store_index_h5(evt, j, flag=0) else: time = evt.get(EventId).time() fid = evt.get(EventId).fiducials() sec = time[0] nsec = time[1] et = EventTime(int(((sec << 32) | nsec)), fid) FXS.store_index(et, j, flag=0) print('Rank', rank, 'total events: ', int(FXS.cnt), ' * ') if (size > 1): print('Synchronizing rank', rank) Tot = np.zeros(FXS.cnt.shape) comm.Reduce(FXS.cnt, Tot) if ((rank == 0) and (Tot[0] == 0)): raise Sorry('No events found in the run') Images = np.zeros(FXS.images.shape) comm.Reduce(FXS.images, Images) Tot_t = np.zeros(FXS.tot_t.shape) comm.Reduce(FXS.tot_t, Tot_t) Tot_s = np.zeros(FXS.tot_s.shape) comm.Reduce(FXS.tot_s, Tot_s) Tot_ns = np.zeros(FXS.tot_ns.shape) comm.Reduce(FXS.tot_ns, Tot_ns) Tot_fd = np.zeros(FXS.tot_fd.shape) comm.Reduce(FXS.tot_fd, Tot_fd) Tot_int = np.zeros(FXS.tot_int.shape) comm.Reduce(FXS.tot_int, Tot_int) Tot_peak1 = np.zeros(FXS.tot_peak1_int.shape) comm.Reduce(FXS.tot_peak1_int, Tot_peak1) Tot_peak2 = np.zeros(FXS.tot_peak2_int.shape) comm.Reduce(FXS.tot_peak2_int, Tot_peak2) Tot_s_m = np.zeros(FXS.tot_streak_m.shape) comm.Reduce(FXS.tot_streak_m, Tot_s_m) Tot_s_s = np.zeros(FXS.tot_streak_s.shape) comm.Reduce(FXS.tot_streak_s, Tot_s_s) Tot_cx = np.zeros(FXS.tot_cx.shape) comm.Reduce(FXS.tot_cx, Tot_cx) Tot_cy = np.zeros(FXS.tot_cy.shape) comm.Reduce(FXS.tot_cy, Tot_cy) Tot_size = np.zeros(FXS.tot_size.shape) comm.Reduce(FXS.tot_size, Tot_size) Tot_score = np.zeros(FXS.tot_score.shape) comm.Reduce(FXS.tot_score, Tot_score) if (rank == 0): if (size > 1): print('Synchronized') (Ave, Var, Mask) = pnccd_tbx.pixel_mask(Images, thr=0.12) if (argv.outputdir is None): opath = os.getcwd() else: opath = argv.outputdir f_index = os.path.join(opath, (('Index_run' + str(argv.run)) + '.dat')) stamps = ['Time', 'Seconds', 'Nanoseconds', 'Fiducial', 'Total Intensity', ((('Peak1, q=' + str(FXS.peak[0])) + '+/-') + str(FXS.dpeak[0])), ((('Peak2, q=' + str(FXS.peak[1])) + '+/-') + str(FXS.dpeak[1])), 'Mean streak angle', 'Std streak angle', 'Beam X', 'Beam Y', 'Radius [Ang]', 'Score'] head = ' '.join(stamps) f_ave = os.path.join(opath, (('Average_map_' + str(argv.run)) + '.dat')) f_var = os.path.join(opath, (('Variance_map_' + str(argv.run)) + '.dat')) f_mask = os.path.join(opath, (('Mask_map_' + str(argv.run)) + '.dat')) nz = np.nonzero(Tot_t) fend = (nz[0][(- 1)] + 1) f = open(f_index, 'w') np.savetxt(f, np.c_[(Tot_t[:fend], Tot_s[:fend], Tot_ns[:fend], Tot_fd[:fend], Tot_int[:fend], Tot_peak1[:fend], Tot_peak2[:fend], Tot_s_m[:fend], Tot_s_s[:fend], Tot_cx[:fend], Tot_cy[:fend], Tot_size[:fend], Tot_score[:fend])], header=head, comments=) f.close() f = open(f_ave, 'w') np.savetxt(f, Ave) f.close() f = open(f_var, 'w') np.savetxt(f, Var) f.close() f = open(f_mask, 'w') np.savetxt(f, Mask) f.close()
def to_ltx(a, fmt='{:6.4f}', latexarraytype='array', imstring='i', is_row_vector=True, mathform=True, brackets='()', mark_elements=[], mark_color='pink', separate_columns=[], separate_rows=[]): '\n Return a LaTeX array given a numpy array.\n\n Parameters\n ----------\n a : numpy.ndarray\n fmt : str, default = \'{:6.2f}\'\n python 3 formatter, optional-\n https://mkaz.tech/python-string-format.html\n latexarraytype : str, default = \'array\'\n Any of\n\n .. code:: python\n\n "array"\n "pmatrix"\n "bmatrix"\n "vmatrix"\n "Vmatrix"\n "Bmatrix"\n\n if "array", you can specifiy the brackets\n with the keyword ``brackets``.\n imstring : str, default = \'i\'\n Character to use to represent the imaginary unit.\n Usually ``\'i\'`` or ``\'j\'``\n is_row_vector : bool, default = True\n If the array is 1D, should the output be\n a row (True) or column (False) vector?\n mathform : bool, default = True\n wether to convert strings like ``1e+05``\n to ``1\\times10^{5}``.\n brackets : iterable, default = \'()\'\n which brackets to use to wrap the matrix\n (must be two elements long).\n Use ``brackets = None`` if you don\'t want\n any brackets around the array.\n mark_elements : list, default = []\n list of tuples containing element indices that\n should be marked with a colorbox.\n mark_color : str, default = \'pink\'\n The color with which to mark matrix elements.\n separate_columns : list, default = []\n list of column indices before which a vertical\n line should be drawn\n separate_rows : list, default = []\n list of row indices before which a horizontal\n line should be drawn\n\n Returns\n -------\n out: str\n Formatted LaTeX string\n\n Examples\n --------\n >>> from numpyarray_to_latex import to_ltx\n >>> tex = to_ltx([[2.,2.],[2.,2.]])\n >>> print(tex)\n \\left(\n \\begin{array}{}\n 2.00 & 2.00\\\\\n 2.00 & 2.00\n \\end{array}\n \\right)\n\n ' a = np.array(a) if (len(a.shape) > 2): raise NotImplementedError('Arrays having more than two dimensions cannot be converted.') if (mark_elements is None): mark_elements = [] if ((a.ndim == 2) and (len(mark_elements) > 0) and (not all([hasattr(mark, '__len__') for mark in mark_elements]))): raise ValueError("If the array is 2D, ``mark_elements`` should be 2D as well, but isn't") if (len(a.shape) == 1): if ((len(mark_elements) > 0) and hasattr(mark_elements[0], '__len__')): raise ValueError("If the array is 1D, ``mark_elements`` should be 1D as well, but isn't.") a = np.array([a]) if (is_row_vector is False): a = a.T mark_elements = [(mark, 0) for mark in mark_elements] else: mark_elements = [(0, mark) for mark in mark_elements] if isinstance(mark_elements, np.ndarray): mark_elements = mark_elements.tolist() mark_elements = [tuple(row) for row in mark_elements] (nrow, ncol) = a.shape out = '' if ((brackets is not None) and (latexarraytype not in ['bmatrix', 'pmatrix', 'vmatrix', 'Bmatrix', 'Vmatrix'])): out = (('\\left' + brackets[0]) + '\n') if (len(separate_columns) > 0): if (latexarraytype != 'array'): raise ValueError('column separators can only be used for `latexarraytype = "array"`') colstr = '{' for i in range(ncol): if ((i in separate_columns) and (i > 0)): colstr += '|' colstr += 'c' colstr += '}' else: colstr = '{}' out += (((('\\begin{' + latexarraytype) + '}') + colstr) + '\n') for i in np.arange(nrow): if ((i in separate_rows) and (i > 0)): out += ' \\hline\n' out = (out + ' ') for j in np.arange(ncol): this_element = '' if (np.real(a[(i, j)]) < 0): leadstr = '' else: leadstr = ' ' if ('.' not in fmt.format(a[(i, j)])): dot_space = ' ' else: dot_space = '' if np.iscomplexobj(a[(i, j)]): real = math_form(fmt.format(np.real(a[(i, j)])), mathform=mathform) real = real.lstrip(' ') imag = math_form(fmt.format(np.imag(a[(i, j)])), is_imaginary=True, mathform=mathform) imag = imag.lstrip(' ') if (not (imag.startswith('-') or imag.startswith('+'))): number = ((real + '+') + imag) else: number = (real + imag) this_element = ((((this_element + leadstr) + number) + imstring) + dot_space) else: this_element = (((this_element + leadstr) + math_form(fmt.format(np.real(a[(i, j)])), mathform=mathform)) + dot_space) if ((i, j) in mark_elements): this_element = (((('\\colorbox{' + mark_color) + '}{$') + this_element) + '$} ') if (j < (ncol - 1)): this_element += ' & ' out += this_element if (i < (nrow - 1)): out = (out + '\\\\\n') out = ((((out + '\n') + '\\end{') + latexarraytype) + '}') if ((brackets is not None) and (latexarraytype not in ['bmatrix', 'pmatrix', 'vmatrix', 'Bmatrix', 'Vmatrix'])): out += ('\n\\right' + brackets[1]) return out
-1,413,996,771,371,158,000
Return a LaTeX array given a numpy array. Parameters ---------- a : numpy.ndarray fmt : str, default = '{:6.2f}' python 3 formatter, optional- https://mkaz.tech/python-string-format.html latexarraytype : str, default = 'array' Any of .. code:: python "array" "pmatrix" "bmatrix" "vmatrix" "Vmatrix" "Bmatrix" if "array", you can specifiy the brackets with the keyword ``brackets``. imstring : str, default = 'i' Character to use to represent the imaginary unit. Usually ``'i'`` or ``'j'`` is_row_vector : bool, default = True If the array is 1D, should the output be a row (True) or column (False) vector? mathform : bool, default = True wether to convert strings like ``1e+05`` to ``1\times10^{5}``. brackets : iterable, default = '()' which brackets to use to wrap the matrix (must be two elements long). Use ``brackets = None`` if you don't want any brackets around the array. mark_elements : list, default = [] list of tuples containing element indices that should be marked with a colorbox. mark_color : str, default = 'pink' The color with which to mark matrix elements. separate_columns : list, default = [] list of column indices before which a vertical line should be drawn separate_rows : list, default = [] list of row indices before which a horizontal line should be drawn Returns ------- out: str Formatted LaTeX string Examples -------- >>> from numpyarray_to_latex import to_ltx >>> tex = to_ltx([[2.,2.],[2.,2.]]) >>> print(tex) \left( \begin{array}{} 2.00 & 2.00\\ 2.00 & 2.00 \end{array} \right)
numpyarray_to_latex/main.py
to_ltx
psychemedia/numpyarray_to_latex
python
def to_ltx(a, fmt='{:6.4f}', latexarraytype='array', imstring='i', is_row_vector=True, mathform=True, brackets='()', mark_elements=[], mark_color='pink', separate_columns=[], separate_rows=[]): '\n Return a LaTeX array given a numpy array.\n\n Parameters\n ----------\n a : numpy.ndarray\n fmt : str, default = \'{:6.2f}\'\n python 3 formatter, optional-\n https://mkaz.tech/python-string-format.html\n latexarraytype : str, default = \'array\'\n Any of\n\n .. code:: python\n\n "array"\n "pmatrix"\n "bmatrix"\n "vmatrix"\n "Vmatrix"\n "Bmatrix"\n\n if "array", you can specifiy the brackets\n with the keyword ``brackets``.\n imstring : str, default = \'i\'\n Character to use to represent the imaginary unit.\n Usually ``\'i\'`` or ``\'j\'``\n is_row_vector : bool, default = True\n If the array is 1D, should the output be\n a row (True) or column (False) vector?\n mathform : bool, default = True\n wether to convert strings like ``1e+05``\n to ``1\\times10^{5}``.\n brackets : iterable, default = \'()\'\n which brackets to use to wrap the matrix\n (must be two elements long).\n Use ``brackets = None`` if you don\'t want\n any brackets around the array.\n mark_elements : list, default = []\n list of tuples containing element indices that\n should be marked with a colorbox.\n mark_color : str, default = \'pink\'\n The color with which to mark matrix elements.\n separate_columns : list, default = []\n list of column indices before which a vertical\n line should be drawn\n separate_rows : list, default = []\n list of row indices before which a horizontal\n line should be drawn\n\n Returns\n -------\n out: str\n Formatted LaTeX string\n\n Examples\n --------\n >>> from numpyarray_to_latex import to_ltx\n >>> tex = to_ltx([[2.,2.],[2.,2.]])\n >>> print(tex)\n \\left(\n \\begin{array}{}\n 2.00 & 2.00\\\\\n 2.00 & 2.00\n \\end{array}\n \\right)\n\n ' a = np.array(a) if (len(a.shape) > 2): raise NotImplementedError('Arrays having more than two dimensions cannot be converted.') if (mark_elements is None): mark_elements = [] if ((a.ndim == 2) and (len(mark_elements) > 0) and (not all([hasattr(mark, '__len__') for mark in mark_elements]))): raise ValueError("If the array is 2D, ``mark_elements`` should be 2D as well, but isn't") if (len(a.shape) == 1): if ((len(mark_elements) > 0) and hasattr(mark_elements[0], '__len__')): raise ValueError("If the array is 1D, ``mark_elements`` should be 1D as well, but isn't.") a = np.array([a]) if (is_row_vector is False): a = a.T mark_elements = [(mark, 0) for mark in mark_elements] else: mark_elements = [(0, mark) for mark in mark_elements] if isinstance(mark_elements, np.ndarray): mark_elements = mark_elements.tolist() mark_elements = [tuple(row) for row in mark_elements] (nrow, ncol) = a.shape out = if ((brackets is not None) and (latexarraytype not in ['bmatrix', 'pmatrix', 'vmatrix', 'Bmatrix', 'Vmatrix'])): out = (('\\left' + brackets[0]) + '\n') if (len(separate_columns) > 0): if (latexarraytype != 'array'): raise ValueError('column separators can only be used for `latexarraytype = "array"`') colstr = '{' for i in range(ncol): if ((i in separate_columns) and (i > 0)): colstr += '|' colstr += 'c' colstr += '}' else: colstr = '{}' out += (((('\\begin{' + latexarraytype) + '}') + colstr) + '\n') for i in np.arange(nrow): if ((i in separate_rows) and (i > 0)): out += ' \\hline\n' out = (out + ' ') for j in np.arange(ncol): this_element = if (np.real(a[(i, j)]) < 0): leadstr = else: leadstr = ' ' if ('.' not in fmt.format(a[(i, j)])): dot_space = ' ' else: dot_space = if np.iscomplexobj(a[(i, j)]): real = math_form(fmt.format(np.real(a[(i, j)])), mathform=mathform) real = real.lstrip(' ') imag = math_form(fmt.format(np.imag(a[(i, j)])), is_imaginary=True, mathform=mathform) imag = imag.lstrip(' ') if (not (imag.startswith('-') or imag.startswith('+'))): number = ((real + '+') + imag) else: number = (real + imag) this_element = ((((this_element + leadstr) + number) + imstring) + dot_space) else: this_element = (((this_element + leadstr) + math_form(fmt.format(np.real(a[(i, j)])), mathform=mathform)) + dot_space) if ((i, j) in mark_elements): this_element = (((('\\colorbox{' + mark_color) + '}{$') + this_element) + '$} ') if (j < (ncol - 1)): this_element += ' & ' out += this_element if (i < (nrow - 1)): out = (out + '\\\\\n') out = ((((out + '\n') + '\\end{') + latexarraytype) + '}') if ((brackets is not None) and (latexarraytype not in ['bmatrix', 'pmatrix', 'vmatrix', 'Bmatrix', 'Vmatrix'])): out += ('\n\\right' + brackets[1]) return out
def get_plugin_media_path(instance, filename): '\n Django 1.7 requires that unbound function used in fields\' definitions are defined outside the parent class\n (see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values)\n This function is used withing field definition:\n\n file = models.FileField(_("file"), upload_to=get_plugin_media_path)\n\n and it invokes the bounded method on the given instance at runtime\n ' return instance.get_media_path(filename)
450,341,442,002,724,000
Django 1.7 requires that unbound function used in fields' definitions are defined outside the parent class (see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values) This function is used withing field definition: file = models.FileField(_("file"), upload_to=get_plugin_media_path) and it invokes the bounded method on the given instance at runtime
cms/models/pluginmodel.py
get_plugin_media_path
stefanw/django-cms
python
def get_plugin_media_path(instance, filename): '\n Django 1.7 requires that unbound function used in fields\' definitions are defined outside the parent class\n (see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values)\n This function is used withing field definition:\n\n file = models.FileField(_("file"), upload_to=get_plugin_media_path)\n\n and it invokes the bounded method on the given instance at runtime\n ' return instance.get_media_path(filename)
def __reduce__(self): "\n Provide pickling support. Normally, this just dispatches to Python's\n standard handling. However, for models with deferred field loading, we\n need to do things manually, as they're dynamically created classes and\n only module-level classes can be pickled by the default path.\n " data = self.__dict__ deferred_fields = [f for f in self._meta.fields if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)] model = self._meta.proxy_for_model return (model_unpickle, (model, deferred_fields), data)
6,809,623,482,667,342,000
Provide pickling support. Normally, this just dispatches to Python's standard handling. However, for models with deferred field loading, we need to do things manually, as they're dynamically created classes and only module-level classes can be pickled by the default path.
cms/models/pluginmodel.py
__reduce__
stefanw/django-cms
python
def __reduce__(self): "\n Provide pickling support. Normally, this just dispatches to Python's\n standard handling. However, for models with deferred field loading, we\n need to do things manually, as they're dynamically created classes and\n only module-level classes can be pickled by the default path.\n " data = self.__dict__ deferred_fields = [f for f in self._meta.fields if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)] model = self._meta.proxy_for_model return (model_unpickle, (model, deferred_fields), data)
def get_plugin_instance(self, admin=None): "\n Given a plugin instance (usually as a CMSPluginBase), this method\n returns a tuple containing:\n\n instance - The instance AS THE APPROPRIATE SUBCLASS OF\n CMSPluginBase and not necessarily just 'self', which is\n often just a CMSPluginBase,\n\n plugin - the associated plugin class instance (subclass\n of CMSPlugin)\n " plugin = self.get_plugin_class_instance(admin) if hasattr(self, '_inst'): return (self._inst, plugin) if (plugin.model != self.__class__): try: instance = plugin.model.objects.get(cmsplugin_ptr=self) instance._render_meta = self._render_meta except (AttributeError, ObjectDoesNotExist): instance = None else: instance = self self._inst = instance return (self._inst, plugin)
-8,788,274,808,039,571,000
Given a plugin instance (usually as a CMSPluginBase), this method returns a tuple containing: instance - The instance AS THE APPROPRIATE SUBCLASS OF CMSPluginBase and not necessarily just 'self', which is often just a CMSPluginBase, plugin - the associated plugin class instance (subclass of CMSPlugin)
cms/models/pluginmodel.py
get_plugin_instance
stefanw/django-cms
python
def get_plugin_instance(self, admin=None): "\n Given a plugin instance (usually as a CMSPluginBase), this method\n returns a tuple containing:\n\n instance - The instance AS THE APPROPRIATE SUBCLASS OF\n CMSPluginBase and not necessarily just 'self', which is\n often just a CMSPluginBase,\n\n plugin - the associated plugin class instance (subclass\n of CMSPlugin)\n " plugin = self.get_plugin_class_instance(admin) if hasattr(self, '_inst'): return (self._inst, plugin) if (plugin.model != self.__class__): try: instance = plugin.model.objects.get(cmsplugin_ptr=self) instance._render_meta = self._render_meta except (AttributeError, ObjectDoesNotExist): instance = None else: instance = self self._inst = instance return (self._inst, plugin)
def get_instance_icon_src(self): "\n Get src URL for instance's icon\n " (instance, plugin) = self.get_plugin_instance() return (plugin.icon_src(instance) if instance else u'')
-5,296,560,135,758,746,000
Get src URL for instance's icon
cms/models/pluginmodel.py
get_instance_icon_src
stefanw/django-cms
python
def get_instance_icon_src(self): "\n \n " (instance, plugin) = self.get_plugin_instance() return (plugin.icon_src(instance) if instance else u)
def get_instance_icon_alt(self): "\n Get alt text for instance's icon\n " (instance, plugin) = self.get_plugin_instance() return (force_text(plugin.icon_alt(instance)) if instance else u'')
-2,353,452,820,428,487,700
Get alt text for instance's icon
cms/models/pluginmodel.py
get_instance_icon_alt
stefanw/django-cms
python
def get_instance_icon_alt(self): "\n \n " (instance, plugin) = self.get_plugin_instance() return (force_text(plugin.icon_alt(instance)) if instance else u)
def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False): "\n Copy this plugin and return the new plugin.\n\n The logic of this method is the following:\n\n # get a new generic plugin instance\n # assign the position in the plugin tree\n # save it to let mptt/treebeard calculate the tree attributes\n # then get a copy of the current plugin instance\n # assign to it the id of the generic plugin instance above;\n this will effectively change the generic plugin created above\n into a concrete one\n # copy the tree related attributes from the generic plugin to\n the concrete one\n # save the concrete plugin\n # trigger the copy relations\n # return the generic plugin instance\n\n This copy logic is required because we don't know what the fields of\n the real plugin are. By getting another instance of it at step 4 and\n then overwriting its ID at step 5, the ORM will copy the custom\n fields for us.\n " try: (plugin_instance, cls) = self.get_plugin_instance() except KeyError: return new_plugin = CMSPlugin() new_plugin.placeholder = target_placeholder parent_cache[self.pk] = new_plugin if self.parent: parent = parent_cache[self.parent_id] parent = CMSPlugin.objects.get(pk=parent.pk) new_plugin.parent_id = parent.pk new_plugin.parent = parent new_plugin.language = target_language new_plugin.plugin_type = self.plugin_type if no_signals: from cms.signals import pre_save_plugins signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin') signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin) new_plugin._no_reorder = True new_plugin.save() if plugin_instance: plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk) plugin_instance.pk = new_plugin.pk plugin_instance.id = new_plugin.pk plugin_instance.placeholder = target_placeholder plugin_instance.cmsplugin_ptr = new_plugin plugin_instance.language = target_language plugin_instance.parent = new_plugin.parent plugin_instance.depth = new_plugin.depth plugin_instance.path = new_plugin.path plugin_instance.numchild = new_plugin.numchild plugin_instance._no_reorder = True plugin_instance.save() old_instance = plugin_instance.__class__.objects.get(pk=self.pk) plugin_instance.copy_relations(old_instance) if no_signals: signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin') return new_plugin
81,708,884,157,565,040
Copy this plugin and return the new plugin. The logic of this method is the following: # get a new generic plugin instance # assign the position in the plugin tree # save it to let mptt/treebeard calculate the tree attributes # then get a copy of the current plugin instance # assign to it the id of the generic plugin instance above; this will effectively change the generic plugin created above into a concrete one # copy the tree related attributes from the generic plugin to the concrete one # save the concrete plugin # trigger the copy relations # return the generic plugin instance This copy logic is required because we don't know what the fields of the real plugin are. By getting another instance of it at step 4 and then overwriting its ID at step 5, the ORM will copy the custom fields for us.
cms/models/pluginmodel.py
copy_plugin
stefanw/django-cms
python
def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False): "\n Copy this plugin and return the new plugin.\n\n The logic of this method is the following:\n\n # get a new generic plugin instance\n # assign the position in the plugin tree\n # save it to let mptt/treebeard calculate the tree attributes\n # then get a copy of the current plugin instance\n # assign to it the id of the generic plugin instance above;\n this will effectively change the generic plugin created above\n into a concrete one\n # copy the tree related attributes from the generic plugin to\n the concrete one\n # save the concrete plugin\n # trigger the copy relations\n # return the generic plugin instance\n\n This copy logic is required because we don't know what the fields of\n the real plugin are. By getting another instance of it at step 4 and\n then overwriting its ID at step 5, the ORM will copy the custom\n fields for us.\n " try: (plugin_instance, cls) = self.get_plugin_instance() except KeyError: return new_plugin = CMSPlugin() new_plugin.placeholder = target_placeholder parent_cache[self.pk] = new_plugin if self.parent: parent = parent_cache[self.parent_id] parent = CMSPlugin.objects.get(pk=parent.pk) new_plugin.parent_id = parent.pk new_plugin.parent = parent new_plugin.language = target_language new_plugin.plugin_type = self.plugin_type if no_signals: from cms.signals import pre_save_plugins signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin') signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin) new_plugin._no_reorder = True new_plugin.save() if plugin_instance: plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk) plugin_instance.pk = new_plugin.pk plugin_instance.id = new_plugin.pk plugin_instance.placeholder = target_placeholder plugin_instance.cmsplugin_ptr = new_plugin plugin_instance.language = target_language plugin_instance.parent = new_plugin.parent plugin_instance.depth = new_plugin.depth plugin_instance.path = new_plugin.path plugin_instance.numchild = new_plugin.numchild plugin_instance._no_reorder = True plugin_instance.save() old_instance = plugin_instance.__class__.objects.get(pk=self.pk) plugin_instance.copy_relations(old_instance) if no_signals: signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin') return new_plugin
def post_copy(self, old_instance, new_old_ziplist): '\n Handle more advanced cases (eg Text Plugins) after the original is\n copied\n ' pass
-5,287,991,199,225,462,000
Handle more advanced cases (eg Text Plugins) after the original is copied
cms/models/pluginmodel.py
post_copy
stefanw/django-cms
python
def post_copy(self, old_instance, new_old_ziplist): '\n Handle more advanced cases (eg Text Plugins) after the original is\n copied\n ' pass
def copy_relations(self, old_instance): '\n Handle copying of any relations attached to this plugin. Custom plugins\n have to do this themselves!\n ' pass
-6,676,088,627,951,634,000
Handle copying of any relations attached to this plugin. Custom plugins have to do this themselves!
cms/models/pluginmodel.py
copy_relations
stefanw/django-cms
python
def copy_relations(self, old_instance): '\n Handle copying of any relations attached to this plugin. Custom plugins\n have to do this themselves!\n ' pass
def get_position_in_placeholder(self): '\n 1 based position!\n ' return (self.position + 1)
-6,577,428,658,556,805,000
1 based position!
cms/models/pluginmodel.py
get_position_in_placeholder
stefanw/django-cms
python
def get_position_in_placeholder(self): '\n \n ' return (self.position + 1)
def notify_on_autoadd(self, request, conf): '\n Method called when we auto add this plugin via default_plugins in\n CMS_PLACEHOLDER_CONF.\n Some specific plugins may have some special stuff to do when they are\n auto added.\n ' pass
-8,901,765,656,126,953,000
Method called when we auto add this plugin via default_plugins in CMS_PLACEHOLDER_CONF. Some specific plugins may have some special stuff to do when they are auto added.
cms/models/pluginmodel.py
notify_on_autoadd
stefanw/django-cms
python
def notify_on_autoadd(self, request, conf): '\n Method called when we auto add this plugin via default_plugins in\n CMS_PLACEHOLDER_CONF.\n Some specific plugins may have some special stuff to do when they are\n auto added.\n ' pass
def notify_on_autoadd_children(self, request, conf, children): '\n Method called when we auto add children to this plugin via\n default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF.\n Some specific plugins may have some special stuff to do when we add\n children to them. ie : TextPlugin must update its content to add HTML\n tags to be able to see his children in WYSIWYG.\n ' pass
-8,075,000,125,713,863,000
Method called when we auto add children to this plugin via default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF. Some specific plugins may have some special stuff to do when we add children to them. ie : TextPlugin must update its content to add HTML tags to be able to see his children in WYSIWYG.
cms/models/pluginmodel.py
notify_on_autoadd_children
stefanw/django-cms
python
def notify_on_autoadd_children(self, request, conf, children): '\n Method called when we auto add children to this plugin via\n default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF.\n Some specific plugins may have some special stuff to do when we add\n children to them. ie : TextPlugin must update its content to add HTML\n tags to be able to see his children in WYSIWYG.\n ' pass
def get_translatable_content(self): "\n Returns {field_name: field_contents} for translatable fields, where\n field_contents > ''\n " fields = (f for f in self._meta.fields if (isinstance(f, (models.CharField, models.TextField)) and f.editable and (not f.choices) and (f.name not in self.translatable_content_excluded_fields))) return dict(filter(itemgetter(1), ((f.name, getattr(self, f.name)) for f in fields)))
-8,645,680,237,000,977,000
Returns {field_name: field_contents} for translatable fields, where field_contents > ''
cms/models/pluginmodel.py
get_translatable_content
stefanw/django-cms
python
def get_translatable_content(self): "\n Returns {field_name: field_contents} for translatable fields, where\n field_contents > \n " fields = (f for f in self._meta.fields if (isinstance(f, (models.CharField, models.TextField)) and f.editable and (not f.choices) and (f.name not in self.translatable_content_excluded_fields))) return dict(filter(itemgetter(1), ((f.name, getattr(self, f.name)) for f in fields)))
@property def add_url(self): '\n Returns a custom url to add plugin instances\n ' return None
2,399,919,798,950,241,000
Returns a custom url to add plugin instances
cms/models/pluginmodel.py
add_url
stefanw/django-cms
python
@property def add_url(self): '\n \n ' return None
@property def edit_url(self): '\n Returns a custom url to edit plugin instances\n ' return None
-4,633,932,615,375,681,000
Returns a custom url to edit plugin instances
cms/models/pluginmodel.py
edit_url
stefanw/django-cms
python
@property def edit_url(self): '\n \n ' return None
@property def move_url(self): '\n Returns a custom url to move plugin instances\n ' return None
5,817,508,592,457,980,000
Returns a custom url to move plugin instances
cms/models/pluginmodel.py
move_url
stefanw/django-cms
python
@property def move_url(self): '\n \n ' return None
@property def delete_url(self): '\n Returns a custom url to delete plugin instances\n ' return None
-7,538,766,510,792,490,000
Returns a custom url to delete plugin instances
cms/models/pluginmodel.py
delete_url
stefanw/django-cms
python
@property def delete_url(self): '\n \n ' return None
@property def copy_url(self): '\n Returns a custom url to copy plugin instances\n ' return None
2,064,812,285,791,519,700
Returns a custom url to copy plugin instances
cms/models/pluginmodel.py
copy_url
stefanw/django-cms
python
@property def copy_url(self): '\n \n ' return None
def H_from_ransac(fp, tp, model, maxiter=1000, match_theshold=10): ' Robust estimation of homography H from point \n correspondences using RANSAC (ransac.py from\n http://www.scipy.org/Cookbook/RANSAC).\n \n input: fp,tp (3*n arrays) points in hom. coordinates. ' from PCV.tools import ransac data = vstack((fp, tp)) (H, ransac_data) = ransac.ransac(data.T, model, 4, maxiter, match_theshold, 10, return_all=True) return (H, ransac_data['inliers'])
-6,410,962,700,355,088,000
Robust estimation of homography H from point correspondences using RANSAC (ransac.py from http://www.scipy.org/Cookbook/RANSAC). input: fp,tp (3*n arrays) points in hom. coordinates.
PCV/geometry/homography.py
H_from_ransac
BeToMeve/PCV
python
def H_from_ransac(fp, tp, model, maxiter=1000, match_theshold=10): ' Robust estimation of homography H from point \n correspondences using RANSAC (ransac.py from\n http://www.scipy.org/Cookbook/RANSAC).\n \n input: fp,tp (3*n arrays) points in hom. coordinates. ' from PCV.tools import ransac data = vstack((fp, tp)) (H, ransac_data) = ransac.ransac(data.T, model, 4, maxiter, match_theshold, 10, return_all=True) return (H, ransac_data['inliers'])
def H_from_points(fp, tp): ' Find homography H, such that fp is mapped to tp\n using the linear DLT method. Points are conditioned\n automatically. ' if (fp.shape != tp.shape): raise RuntimeError('number of points do not match') m = mean(fp[:2], axis=1) maxstd = (max(std(fp[:2], axis=1)) + 1e-09) C1 = diag([(1 / maxstd), (1 / maxstd), 1]) C1[0][2] = ((- m[0]) / maxstd) C1[1][2] = ((- m[1]) / maxstd) fp = dot(C1, fp) m = mean(tp[:2], axis=1) maxstd = (max(std(tp[:2], axis=1)) + 1e-09) C2 = diag([(1 / maxstd), (1 / maxstd), 1]) C2[0][2] = ((- m[0]) / maxstd) C2[1][2] = ((- m[1]) / maxstd) tp = dot(C2, tp) nbr_correspondences = fp.shape[1] A = zeros(((2 * nbr_correspondences), 9)) for i in range(nbr_correspondences): A[(2 * i)] = [(- fp[0][i]), (- fp[1][i]), (- 1), 0, 0, 0, (tp[0][i] * fp[0][i]), (tp[0][i] * fp[1][i]), tp[0][i]] A[((2 * i) + 1)] = [0, 0, 0, (- fp[0][i]), (- fp[1][i]), (- 1), (tp[1][i] * fp[0][i]), (tp[1][i] * fp[1][i]), tp[1][i]] (U, S, V) = linalg.svd(A) H = V[8].reshape((3, 3)) H = dot(linalg.inv(C2), dot(H, C1)) return (H / H[(2, 2)])
-2,517,881,027,021,305,000
Find homography H, such that fp is mapped to tp using the linear DLT method. Points are conditioned automatically.
PCV/geometry/homography.py
H_from_points
BeToMeve/PCV
python
def H_from_points(fp, tp): ' Find homography H, such that fp is mapped to tp\n using the linear DLT method. Points are conditioned\n automatically. ' if (fp.shape != tp.shape): raise RuntimeError('number of points do not match') m = mean(fp[:2], axis=1) maxstd = (max(std(fp[:2], axis=1)) + 1e-09) C1 = diag([(1 / maxstd), (1 / maxstd), 1]) C1[0][2] = ((- m[0]) / maxstd) C1[1][2] = ((- m[1]) / maxstd) fp = dot(C1, fp) m = mean(tp[:2], axis=1) maxstd = (max(std(tp[:2], axis=1)) + 1e-09) C2 = diag([(1 / maxstd), (1 / maxstd), 1]) C2[0][2] = ((- m[0]) / maxstd) C2[1][2] = ((- m[1]) / maxstd) tp = dot(C2, tp) nbr_correspondences = fp.shape[1] A = zeros(((2 * nbr_correspondences), 9)) for i in range(nbr_correspondences): A[(2 * i)] = [(- fp[0][i]), (- fp[1][i]), (- 1), 0, 0, 0, (tp[0][i] * fp[0][i]), (tp[0][i] * fp[1][i]), tp[0][i]] A[((2 * i) + 1)] = [0, 0, 0, (- fp[0][i]), (- fp[1][i]), (- 1), (tp[1][i] * fp[0][i]), (tp[1][i] * fp[1][i]), tp[1][i]] (U, S, V) = linalg.svd(A) H = V[8].reshape((3, 3)) H = dot(linalg.inv(C2), dot(H, C1)) return (H / H[(2, 2)])
def Haffine_from_points(fp, tp): ' Find H, affine transformation, such that \n tp is affine transf of fp. ' if (fp.shape != tp.shape): raise RuntimeError('number of points do not match') m = mean(fp[:2], axis=1) maxstd = (max(std(fp[:2], axis=1)) + 1e-09) C1 = diag([(1 / maxstd), (1 / maxstd), 1]) C1[0][2] = ((- m[0]) / maxstd) C1[1][2] = ((- m[1]) / maxstd) fp_cond = dot(C1, fp) m = mean(tp[:2], axis=1) C2 = C1.copy() C2[0][2] = ((- m[0]) / maxstd) C2[1][2] = ((- m[1]) / maxstd) tp_cond = dot(C2, tp) A = concatenate((fp_cond[:2], tp_cond[:2]), axis=0) (U, S, V) = linalg.svd(A.T) tmp = V[:2].T B = tmp[:2] C = tmp[2:4] tmp2 = concatenate((dot(C, linalg.pinv(B)), zeros((2, 1))), axis=1) H = vstack((tmp2, [0, 0, 1])) H = dot(linalg.inv(C2), dot(H, C1)) return (H / H[(2, 2)])
-4,405,595,007,920,864,000
Find H, affine transformation, such that tp is affine transf of fp.
PCV/geometry/homography.py
Haffine_from_points
BeToMeve/PCV
python
def Haffine_from_points(fp, tp): ' Find H, affine transformation, such that \n tp is affine transf of fp. ' if (fp.shape != tp.shape): raise RuntimeError('number of points do not match') m = mean(fp[:2], axis=1) maxstd = (max(std(fp[:2], axis=1)) + 1e-09) C1 = diag([(1 / maxstd), (1 / maxstd), 1]) C1[0][2] = ((- m[0]) / maxstd) C1[1][2] = ((- m[1]) / maxstd) fp_cond = dot(C1, fp) m = mean(tp[:2], axis=1) C2 = C1.copy() C2[0][2] = ((- m[0]) / maxstd) C2[1][2] = ((- m[1]) / maxstd) tp_cond = dot(C2, tp) A = concatenate((fp_cond[:2], tp_cond[:2]), axis=0) (U, S, V) = linalg.svd(A.T) tmp = V[:2].T B = tmp[:2] C = tmp[2:4] tmp2 = concatenate((dot(C, linalg.pinv(B)), zeros((2, 1))), axis=1) H = vstack((tmp2, [0, 0, 1])) H = dot(linalg.inv(C2), dot(H, C1)) return (H / H[(2, 2)])
def normalize(points): ' Normalize a collection of points in \n homogeneous coordinates so that last row = 1. ' for row in points: row /= points[(- 1)] return points
948,872,030,814,935,000
Normalize a collection of points in homogeneous coordinates so that last row = 1.
PCV/geometry/homography.py
normalize
BeToMeve/PCV
python
def normalize(points): ' Normalize a collection of points in \n homogeneous coordinates so that last row = 1. ' for row in points: row /= points[(- 1)] return points
def make_homog(points): ' Convert a set of points (dim*n array) to \n homogeneous coordinates. ' return vstack((points, ones((1, points.shape[1]))))
1,080,106,401,644,308,700
Convert a set of points (dim*n array) to homogeneous coordinates.
PCV/geometry/homography.py
make_homog
BeToMeve/PCV
python
def make_homog(points): ' Convert a set of points (dim*n array) to \n homogeneous coordinates. ' return vstack((points, ones((1, points.shape[1]))))
def fit(self, data): ' Fit homography to four selected correspondences. ' data = data.T fp = data[:3, :4] tp = data[3:, :4] return H_from_points(fp, tp)
-6,976,366,703,067,128,000
Fit homography to four selected correspondences.
PCV/geometry/homography.py
fit
BeToMeve/PCV
python
def fit(self, data): ' ' data = data.T fp = data[:3, :4] tp = data[3:, :4] return H_from_points(fp, tp)
def get_error(self, data, H): ' Apply homography to all correspondences, \n return error for each transformed point. ' data = data.T fp = data[:3] tp = data[3:] fp_transformed = dot(H, fp) fp_transformed = normalize(fp_transformed) return sqrt(sum(((tp - fp_transformed) ** 2), axis=0))
-1,736,261,869,613,256,200
Apply homography to all correspondences, return error for each transformed point.
PCV/geometry/homography.py
get_error
BeToMeve/PCV
python
def get_error(self, data, H): ' Apply homography to all correspondences, \n return error for each transformed point. ' data = data.T fp = data[:3] tp = data[3:] fp_transformed = dot(H, fp) fp_transformed = normalize(fp_transformed) return sqrt(sum(((tp - fp_transformed) ** 2), axis=0))
def _get_auth_response_with_retries(response_generator, num_of_retries=NUM_OF_RETRIES_FOR_AUTHENTICATION, auth_wait_time_sec=WAIT_TIME_FOR_AUTHENTICATION_RETRIES_SEC): '\n Sends an authentication request (first time/refresh) with a retry mechanism.\n :param response_generator (lambda)\n A function call that sends the wanted REST request.\n :return: The response received from the authentication server.\n ' for i in range((num_of_retries + 1)): try: response = response_generator() response.json() get_logger().info(f'Got an authentication response after {i} retries.') break except Exception: if (i == num_of_retries): response = _create_a_bad_response((('{"errors": ["Could not connect to authentication server", "Number of retries: ' + str(i)) + '"]}')) else: time.sleep(auth_wait_time_sec) return response
6,639,735,644,807,461,000
Sends an authentication request (first time/refresh) with a retry mechanism. :param response_generator (lambda) A function call that sends the wanted REST request. :return: The response received from the authentication server.
mona_sdk/authentication.py
_get_auth_response_with_retries
TalzMona/mona-sdk
python
def _get_auth_response_with_retries(response_generator, num_of_retries=NUM_OF_RETRIES_FOR_AUTHENTICATION, auth_wait_time_sec=WAIT_TIME_FOR_AUTHENTICATION_RETRIES_SEC): '\n Sends an authentication request (first time/refresh) with a retry mechanism.\n :param response_generator (lambda)\n A function call that sends the wanted REST request.\n :return: The response received from the authentication server.\n ' for i in range((num_of_retries + 1)): try: response = response_generator() response.json() get_logger().info(f'Got an authentication response after {i} retries.') break except Exception: if (i == num_of_retries): response = _create_a_bad_response((('{"errors": ["Could not connect to authentication server", "Number of retries: ' + str(i)) + '"]}')) else: time.sleep(auth_wait_time_sec) return response
def _request_access_token_once(api_key, secret): '\n Sends an access token REST request and returns the response.\n ' return requests.request('POST', AUTH_API_TOKEN_URL, headers=BASIC_HEADER, json={'clientId': api_key, 'secret': secret})
896,239,106,726,657,700
Sends an access token REST request and returns the response.
mona_sdk/authentication.py
_request_access_token_once
TalzMona/mona-sdk
python
def _request_access_token_once(api_key, secret): '\n \n ' return requests.request('POST', AUTH_API_TOKEN_URL, headers=BASIC_HEADER, json={'clientId': api_key, 'secret': secret})
def _request_refresh_token_once(refresh_token_key): '\n Sends a refresh token REST request and returns the response.\n ' return requests.request('POST', REFRESH_TOKEN_URL, headers=BASIC_HEADER, json={'refreshToken': refresh_token_key})
2,580,173,910,022,958,600
Sends a refresh token REST request and returns the response.
mona_sdk/authentication.py
_request_refresh_token_once
TalzMona/mona-sdk
python
def _request_refresh_token_once(refresh_token_key): '\n \n ' return requests.request('POST', REFRESH_TOKEN_URL, headers=BASIC_HEADER, json={'refreshToken': refresh_token_key})
def _create_a_bad_response(content): '\n :param: content (str)\n The content of the response.\n :return: A functioning bad REST response instance with the given content.\n ' response = Response() response.status_code = 400 if (type(content) is str): response._content = bytes(content, 'utf8') return response
8,681,903,556,756,483,000
:param: content (str) The content of the response. :return: A functioning bad REST response instance with the given content.
mona_sdk/authentication.py
_create_a_bad_response
TalzMona/mona-sdk
python
def _create_a_bad_response(content): '\n :param: content (str)\n The content of the response.\n :return: A functioning bad REST response instance with the given content.\n ' response = Response() response.status_code = 400 if (type(content) is str): response._content = bytes(content, 'utf8') return response
def get_current_token_by_api_key(api_key): "\n :return: The given api_key's current access token.\n " return _get_token_info_by_api_key(api_key, ACCESS_TOKEN)
2,081,441,044,815,147,500
:return: The given api_key's current access token.
mona_sdk/authentication.py
get_current_token_by_api_key
TalzMona/mona-sdk
python
def get_current_token_by_api_key(api_key): "\n \n " return _get_token_info_by_api_key(api_key, ACCESS_TOKEN)
def _get_token_info_by_api_key(api_key, token_data_arg): '\n Returns the value of the wanted data for the given api_key.\n Returns None if the api_key or the arg does not exist.\n ' return API_KEYS_TO_TOKEN_DATA.get(api_key, {}).get(token_data_arg)
-1,166,280,672,322,913,800
Returns the value of the wanted data for the given api_key. Returns None if the api_key or the arg does not exist.
mona_sdk/authentication.py
_get_token_info_by_api_key
TalzMona/mona-sdk
python
def _get_token_info_by_api_key(api_key, token_data_arg): '\n Returns the value of the wanted data for the given api_key.\n Returns None if the api_key or the arg does not exist.\n ' return API_KEYS_TO_TOKEN_DATA.get(api_key, {}).get(token_data_arg)
def is_authenticated(api_key): "\n :return: True if Mona's client holds a valid token and can communicate with Mona's\n servers (or can refresh the token in order to), False otherwise.\n " return _get_token_info_by_api_key(api_key, IS_AUTHENTICATED)
4,448,667,322,111,320,600
:return: True if Mona's client holds a valid token and can communicate with Mona's servers (or can refresh the token in order to), False otherwise.
mona_sdk/authentication.py
is_authenticated
TalzMona/mona-sdk
python
def is_authenticated(api_key): "\n :return: True if Mona's client holds a valid token and can communicate with Mona's\n servers (or can refresh the token in order to), False otherwise.\n " return _get_token_info_by_api_key(api_key, IS_AUTHENTICATED)
def _set_api_key_authentication_status(api_key, bool_value): '\n Sets the IS_AUTHENTICATED arg in the token data dict of the given api_key, this\n setter is only needed to spare redundant calls for authentication.\n ' API_KEYS_TO_TOKEN_DATA[api_key][IS_AUTHENTICATED] = bool_value
-4,218,618,756,367,162,400
Sets the IS_AUTHENTICATED arg in the token data dict of the given api_key, this setter is only needed to spare redundant calls for authentication.
mona_sdk/authentication.py
_set_api_key_authentication_status
TalzMona/mona-sdk
python
def _set_api_key_authentication_status(api_key, bool_value): '\n Sets the IS_AUTHENTICATED arg in the token data dict of the given api_key, this\n setter is only needed to spare redundant calls for authentication.\n ' API_KEYS_TO_TOKEN_DATA[api_key][IS_AUTHENTICATED] = bool_value
def _calculate_and_set_time_to_refresh(api_key): '\n Calculates the time the access token needs to be refreshed and updates the relevant\n api_key token data.\n ' if is_authenticated(api_key): token_expires = datetime.datetime.strptime(_get_token_info_by_api_key(api_key, EXPIRES), TOKEN_EXPIRED_DATE_FORMAT) API_KEYS_TO_TOKEN_DATA[api_key][TIME_TO_REFRESH] = (token_expires - REFRESH_TOKEN_SAFETY_MARGIN)
-1,675,712,254,189,625,000
Calculates the time the access token needs to be refreshed and updates the relevant api_key token data.
mona_sdk/authentication.py
_calculate_and_set_time_to_refresh
TalzMona/mona-sdk
python
def _calculate_and_set_time_to_refresh(api_key): '\n Calculates the time the access token needs to be refreshed and updates the relevant\n api_key token data.\n ' if is_authenticated(api_key): token_expires = datetime.datetime.strptime(_get_token_info_by_api_key(api_key, EXPIRES), TOKEN_EXPIRED_DATE_FORMAT) API_KEYS_TO_TOKEN_DATA[api_key][TIME_TO_REFRESH] = (token_expires - REFRESH_TOKEN_SAFETY_MARGIN)
def _handle_authentications_error(error_message): '\n Logs an error and raises MonaAuthenticationException if\n RAISE_AUTHENTICATION_EXCEPTIONS is true, else returns false.\n ' get_logger().error(error_message) if RAISE_AUTHENTICATION_EXCEPTIONS: raise MonaAuthenticationException(error_message) return False
1,139,361,633,615,693,800
Logs an error and raises MonaAuthenticationException if RAISE_AUTHENTICATION_EXCEPTIONS is true, else returns false.
mona_sdk/authentication.py
_handle_authentications_error
TalzMona/mona-sdk
python
def _handle_authentications_error(error_message): '\n Logs an error and raises MonaAuthenticationException if\n RAISE_AUTHENTICATION_EXCEPTIONS is true, else returns false.\n ' get_logger().error(error_message) if RAISE_AUTHENTICATION_EXCEPTIONS: raise MonaAuthenticationException(error_message) return False
def _should_refresh_token(api_key): '\n :return: True if the token has expired, or is about to expire in\n REFRESH_TOKEN_SAFETY_MARGIN hours or less, False otherwise.\n ' return (_get_token_info_by_api_key(api_key, TIME_TO_REFRESH) < datetime.datetime.now())
-2,934,975,799,896,226,300
:return: True if the token has expired, or is about to expire in REFRESH_TOKEN_SAFETY_MARGIN hours or less, False otherwise.
mona_sdk/authentication.py
_should_refresh_token
TalzMona/mona-sdk
python
def _should_refresh_token(api_key): '\n :return: True if the token has expired, or is about to expire in\n REFRESH_TOKEN_SAFETY_MARGIN hours or less, False otherwise.\n ' return (_get_token_info_by_api_key(api_key, TIME_TO_REFRESH) < datetime.datetime.now())
def _refresh_token(api_key): '\n Gets a new token and sets the needed fields.\n ' refresh_token_key = _get_token_info_by_api_key(api_key, REFRESH_TOKEN) response = _request_refresh_token_with_retries(refresh_token_key) authentications_response_info = response.json() if (not response.ok): return _handle_authentications_error(f'Could not refresh token: {response.text}') API_KEYS_TO_TOKEN_DATA[api_key] = authentications_response_info _set_api_key_authentication_status(api_key, True) _calculate_and_set_time_to_refresh(api_key) get_logger().info(f'Refreshed access token, the new token info: {API_KEYS_TO_TOKEN_DATA[api_key]}') return True
2,829,551,205,414,393,300
Gets a new token and sets the needed fields.
mona_sdk/authentication.py
_refresh_token
TalzMona/mona-sdk
python
def _refresh_token(api_key): '\n \n ' refresh_token_key = _get_token_info_by_api_key(api_key, REFRESH_TOKEN) response = _request_refresh_token_with_retries(refresh_token_key) authentications_response_info = response.json() if (not response.ok): return _handle_authentications_error(f'Could not refresh token: {response.text}') API_KEYS_TO_TOKEN_DATA[api_key] = authentications_response_info _set_api_key_authentication_status(api_key, True) _calculate_and_set_time_to_refresh(api_key) get_logger().info(f'Refreshed access token, the new token info: {API_KEYS_TO_TOKEN_DATA[api_key]}') return True
@classmethod def refresh_token_if_needed(cls, decorated): "\n This decorator checks if the current client's access token is about to\n be expired/already expired, and if so, updates to a new one.\n " @wraps(decorated) def inner(*args, **kwargs): api_key = args[0]._api_key if (not is_authenticated(api_key)): get_logger().warn("Mona's client is not authenticated") return False if _should_refresh_token(api_key): with authentication_lock: if _should_refresh_token(api_key): did_refresh_token = _refresh_token(api_key) if (not did_refresh_token): return False return decorated(*args, **kwargs) return inner
-2,552,210,874,975,340,500
This decorator checks if the current client's access token is about to be expired/already expired, and if so, updates to a new one.
mona_sdk/authentication.py
refresh_token_if_needed
TalzMona/mona-sdk
python
@classmethod def refresh_token_if_needed(cls, decorated): "\n This decorator checks if the current client's access token is about to\n be expired/already expired, and if so, updates to a new one.\n " @wraps(decorated) def inner(*args, **kwargs): api_key = args[0]._api_key if (not is_authenticated(api_key)): get_logger().warn("Mona's client is not authenticated") return False if _should_refresh_token(api_key): with authentication_lock: if _should_refresh_token(api_key): did_refresh_token = _refresh_token(api_key) if (not did_refresh_token): return False return decorated(*args, **kwargs) return inner
def __init__(self, filename): 'Initialize exceptions interface.' with open(filename) as fname: self.exceptions = yaml.safe_load(fname)
-3,824,527,174,756,549,000
Initialize exceptions interface.
statick_tool/exceptions.py
__init__
axydes/statick
python
def __init__(self, filename): with open(filename) as fname: self.exceptions = yaml.safe_load(fname)
def get_ignore_packages(self): 'Get list of packages to skip when scanning a workspace.' ignore = [] if (('ignore_packages' in self.exceptions) and (self.exceptions['ignore_packages'] is not None)): ignore = self.exceptions['ignore_packages'] return ignore
-477,062,781,152,776,700
Get list of packages to skip when scanning a workspace.
statick_tool/exceptions.py
get_ignore_packages
axydes/statick
python
def get_ignore_packages(self): ignore = [] if (('ignore_packages' in self.exceptions) and (self.exceptions['ignore_packages'] is not None)): ignore = self.exceptions['ignore_packages'] return ignore
def get_exceptions(self, package): 'Get specific exceptions for given package.' exceptions = {'file': [], 'message_regex': []} if (('global' in self.exceptions) and ('exceptions' in self.exceptions['global'])): global_exceptions = self.exceptions['global']['exceptions'] if ('file' in global_exceptions): exceptions['file'] += global_exceptions['file'] if ('message_regex' in global_exceptions): exceptions['message_regex'] += global_exceptions['message_regex'] if (self.exceptions and ('packages' in self.exceptions) and self.exceptions['packages'] and (package.name in self.exceptions['packages']) and self.exceptions['packages'][package.name] and ('exceptions' in self.exceptions['packages'][package.name])): package_exceptions = self.exceptions['packages'][package.name]['exceptions'] if ('file' in package_exceptions): exceptions['file'] += package_exceptions['file'] if ('message_regex' in package_exceptions): exceptions['message_regex'] += package_exceptions['message_regex'] return exceptions
684,870,473,872,272,800
Get specific exceptions for given package.
statick_tool/exceptions.py
get_exceptions
axydes/statick
python
def get_exceptions(self, package): exceptions = {'file': [], 'message_regex': []} if (('global' in self.exceptions) and ('exceptions' in self.exceptions['global'])): global_exceptions = self.exceptions['global']['exceptions'] if ('file' in global_exceptions): exceptions['file'] += global_exceptions['file'] if ('message_regex' in global_exceptions): exceptions['message_regex'] += global_exceptions['message_regex'] if (self.exceptions and ('packages' in self.exceptions) and self.exceptions['packages'] and (package.name in self.exceptions['packages']) and self.exceptions['packages'][package.name] and ('exceptions' in self.exceptions['packages'][package.name])): package_exceptions = self.exceptions['packages'][package.name]['exceptions'] if ('file' in package_exceptions): exceptions['file'] += package_exceptions['file'] if ('message_regex' in package_exceptions): exceptions['message_regex'] += package_exceptions['message_regex'] return exceptions
def filter_file_exceptions_early(self, package, file_list): "\n Filter files based on file pattern exceptions list.\n\n Only filters files which have tools=all, intended for use after the\n discovery plugins have been run (so that Statick doesn't run the tool\n plugins against files which will be ignored anyway).\n " exceptions = self.get_exceptions(package) to_remove = [] for filename in file_list: removed = False for exception in exceptions['file']: if (exception['tools'] == 'all'): for pattern in exception['globs']: fname = filename prefix = '/home/travis/build/' if ((pattern == '*/build/*') and fname.startswith(prefix)): fname = fname[len(prefix):] if fnmatch.fnmatch(fname, pattern): to_remove.append(filename) removed = True break if removed: break file_list = [filename for filename in file_list if (filename not in to_remove)] return file_list
-1,934,236,728,198,714,000
Filter files based on file pattern exceptions list. Only filters files which have tools=all, intended for use after the discovery plugins have been run (so that Statick doesn't run the tool plugins against files which will be ignored anyway).
statick_tool/exceptions.py
filter_file_exceptions_early
axydes/statick
python
def filter_file_exceptions_early(self, package, file_list): "\n Filter files based on file pattern exceptions list.\n\n Only filters files which have tools=all, intended for use after the\n discovery plugins have been run (so that Statick doesn't run the tool\n plugins against files which will be ignored anyway).\n " exceptions = self.get_exceptions(package) to_remove = [] for filename in file_list: removed = False for exception in exceptions['file']: if (exception['tools'] == 'all'): for pattern in exception['globs']: fname = filename prefix = '/home/travis/build/' if ((pattern == '*/build/*') and fname.startswith(prefix)): fname = fname[len(prefix):] if fnmatch.fnmatch(fname, pattern): to_remove.append(filename) removed = True break if removed: break file_list = [filename for filename in file_list if (filename not in to_remove)] return file_list
def filter_file_exceptions(self, package, exceptions, issues): 'Filter issues based on file pattern exceptions list.' for (tool, tool_issues) in list(issues.items()): warning_printed = False to_remove = [] for issue in tool_issues: if (not os.path.isabs(issue.filename)): if (not warning_printed): self.print_exception_warning(tool) warning_printed = True continue rel_path = os.path.relpath(issue.filename, package.path) for exception in exceptions: if ((exception['tools'] == 'all') or (tool in exception['tools'])): for pattern in exception['globs']: fname = issue.filename prefix = '/home/travis/build/' if ((pattern == '*/build/*') and fname.startswith(prefix)): fname = fname[len(prefix):] if (fnmatch.fnmatch(fname, pattern) or fnmatch.fnmatch(rel_path, pattern)): to_remove.append(issue) issues[tool] = [issue for issue in tool_issues if (issue not in to_remove)] return issues
-7,031,852,162,055,159,000
Filter issues based on file pattern exceptions list.
statick_tool/exceptions.py
filter_file_exceptions
axydes/statick
python
def filter_file_exceptions(self, package, exceptions, issues): for (tool, tool_issues) in list(issues.items()): warning_printed = False to_remove = [] for issue in tool_issues: if (not os.path.isabs(issue.filename)): if (not warning_printed): self.print_exception_warning(tool) warning_printed = True continue rel_path = os.path.relpath(issue.filename, package.path) for exception in exceptions: if ((exception['tools'] == 'all') or (tool in exception['tools'])): for pattern in exception['globs']: fname = issue.filename prefix = '/home/travis/build/' if ((pattern == '*/build/*') and fname.startswith(prefix)): fname = fname[len(prefix):] if (fnmatch.fnmatch(fname, pattern) or fnmatch.fnmatch(rel_path, pattern)): to_remove.append(issue) issues[tool] = [issue for issue in tool_issues if (issue not in to_remove)] return issues
@classmethod def filter_regex_exceptions(cls, exceptions, issues): 'Filter issues based on message regex exceptions list.' for exception in exceptions: exception_re = exception['regex'] exception_tools = exception['tools'] compiled_re = re.compile(exception_re) for (tool, tool_issues) in list(issues.items()): to_remove = [] if ((exception_tools == 'all') or (tool in exception_tools)): for issue in tool_issues: match = compiled_re.match(issue.message) if match: to_remove.append(issue) issues[tool] = [issue for issue in tool_issues if (issue not in to_remove)] return issues
-5,461,150,174,868,070,000
Filter issues based on message regex exceptions list.
statick_tool/exceptions.py
filter_regex_exceptions
axydes/statick
python
@classmethod def filter_regex_exceptions(cls, exceptions, issues): for exception in exceptions: exception_re = exception['regex'] exception_tools = exception['tools'] compiled_re = re.compile(exception_re) for (tool, tool_issues) in list(issues.items()): to_remove = [] if ((exception_tools == 'all') or (tool in exception_tools)): for issue in tool_issues: match = compiled_re.match(issue.message) if match: to_remove.append(issue) issues[tool] = [issue for issue in tool_issues if (issue not in to_remove)] return issues
def filter_nolint(self, issues): "\n Filter out lines that have an explicit NOLINT on them.\n\n Sometimes the tools themselves don't properly filter these out if\n there is a complex macro or something.\n " for (tool, tool_issues) in list(issues.items()): warning_printed = False to_remove = [] for issue in tool_issues: if (not os.path.isabs(issue.filename)): if (not warning_printed): self.print_exception_warning(tool) warning_printed = True continue lines = open(issue.filename, 'r+', encoding='utf-8').readlines() line_number = (int(issue.line_number) - 1) if ((line_number < len(lines)) and ('NOLINT' in lines[line_number])): to_remove.append(issue) issues[tool] = [issue for issue in tool_issues if (issue not in to_remove)] return issues
3,960,404,777,609,598,000
Filter out lines that have an explicit NOLINT on them. Sometimes the tools themselves don't properly filter these out if there is a complex macro or something.
statick_tool/exceptions.py
filter_nolint
axydes/statick
python
def filter_nolint(self, issues): "\n Filter out lines that have an explicit NOLINT on them.\n\n Sometimes the tools themselves don't properly filter these out if\n there is a complex macro or something.\n " for (tool, tool_issues) in list(issues.items()): warning_printed = False to_remove = [] for issue in tool_issues: if (not os.path.isabs(issue.filename)): if (not warning_printed): self.print_exception_warning(tool) warning_printed = True continue lines = open(issue.filename, 'r+', encoding='utf-8').readlines() line_number = (int(issue.line_number) - 1) if ((line_number < len(lines)) and ('NOLINT' in lines[line_number])): to_remove.append(issue) issues[tool] = [issue for issue in tool_issues if (issue not in to_remove)] return issues
def filter_issues(self, package, issues): 'Filter issues based on exceptions list.' exceptions = self.get_exceptions(package) if exceptions['file']: issues = self.filter_file_exceptions(package, exceptions['file'], issues) if exceptions['message_regex']: issues = self.filter_regex_exceptions(exceptions['message_regex'], issues) issues = self.filter_nolint(issues) return issues
739,342,406,282,945,700
Filter issues based on exceptions list.
statick_tool/exceptions.py
filter_issues
axydes/statick
python
def filter_issues(self, package, issues): exceptions = self.get_exceptions(package) if exceptions['file']: issues = self.filter_file_exceptions(package, exceptions['file'], issues) if exceptions['message_regex']: issues = self.filter_regex_exceptions(exceptions['message_regex'], issues) issues = self.filter_nolint(issues) return issues
@classmethod def print_exception_warning(cls, tool): '\n Print warning about exception not being applied for an issue.\n\n Warning will only be printed once per tool.\n ' print('[WARNING] File exceptions not available for {} tool plugin due to lack of absolute paths for issues.'.format(tool))
6,992,496,818,379,847,000
Print warning about exception not being applied for an issue. Warning will only be printed once per tool.
statick_tool/exceptions.py
print_exception_warning
axydes/statick
python
@classmethod def print_exception_warning(cls, tool): '\n Print warning about exception not being applied for an issue.\n\n Warning will only be printed once per tool.\n ' print('[WARNING] File exceptions not available for {} tool plugin due to lack of absolute paths for issues.'.format(tool))
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): 'IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList - a model defined in OpenAPI' if (local_vars_configuration is None): local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if (api_version is not None): self.api_version = api_version self.items = items if (kind is not None): self.kind = kind if (metadata is not None): self.metadata = metadata
6,291,405,993,752,813,000
IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList - a model defined in OpenAPI
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
__init__
mariusgheorghies/python
python
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if (local_vars_configuration is None): local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if (api_version is not None): self.api_version = api_version self.items = items if (kind is not None): self.kind = kind if (metadata is not None): self.metadata = metadata
@property def api_version(self): 'Gets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :return: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: str\n ' return self._api_version
4,384,381,070,071,019,500
Gets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :rtype: str
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
api_version
mariusgheorghies/python
python
@property def api_version(self): 'Gets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :return: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: str\n ' return self._api_version
@api_version.setter def api_version(self, api_version): 'Sets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :param api_version: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: str\n ' self._api_version = api_version
2,410,444,314,191,797,000
Sets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :type: str
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
api_version
mariusgheorghies/python
python
@api_version.setter def api_version(self, api_version): 'Sets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :param api_version: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: str\n ' self._api_version = api_version
@property def items(self): 'Gets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501\n\n :return: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]\n ' return self._items
5,934,657,655,099,130,000
Gets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501 :return: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :rtype: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
items
mariusgheorghies/python
python
@property def items(self): 'Gets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501\n\n :return: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]\n ' return self._items
@items.setter def items(self, items): 'Sets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501\n\n :param items: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]\n ' if (self.local_vars_configuration.client_side_validation and (items is None)): raise ValueError('Invalid value for `items`, must not be `None`') self._items = items
-8,176,135,908,638,800,000
Sets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501 :param items: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :type: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
items
mariusgheorghies/python
python
@items.setter def items(self, items): 'Sets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501\n\n :param items: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]\n ' if (self.local_vars_configuration.client_side_validation and (items is None)): raise ValueError('Invalid value for `items`, must not be `None`') self._items = items
@property def kind(self): 'Gets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :return: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: str\n ' return self._kind
1,721,828,530,106,403,300
Gets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :rtype: str
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
kind
mariusgheorghies/python
python
@property def kind(self): 'Gets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :return: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: str\n ' return self._kind
@kind.setter def kind(self, kind): 'Sets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :param kind: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: str\n ' self._kind = kind
5,888,563,764,424,450,000
Sets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :type: str
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
kind
mariusgheorghies/python
python
@kind.setter def kind(self, kind): 'Sets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :param kind: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: str\n ' self._kind = kind
@property def metadata(self): 'Gets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n\n :return: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: V1ListMeta\n ' return self._metadata
-686,375,580,806,189,600
Gets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :return: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :rtype: V1ListMeta
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
metadata
mariusgheorghies/python
python
@property def metadata(self): 'Gets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n\n\n :return: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :rtype: V1ListMeta\n ' return self._metadata
@metadata.setter def metadata(self, metadata): 'Sets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n\n :param metadata: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: V1ListMeta\n ' self._metadata = metadata
8,255,552,889,854,053,000
Sets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. :param metadata: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501 :type: V1ListMeta
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
metadata
mariusgheorghies/python
python
@metadata.setter def metadata(self, metadata): 'Sets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.\n\n\n :param metadata: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501\n :type: V1ListMeta\n ' self._metadata = metadata
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
8,442,519,487,048,767,000
Returns the model properties as a dict
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
to_dict
mariusgheorghies/python
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
to_str
mariusgheorghies/python
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
__repr__
mariusgheorghies/python
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList)): return False return (self.to_dict() == other.to_dict())
7,353,354,793,238,505,000
Returns true if both objects are equal
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
__eq__
mariusgheorghies/python
python
def __eq__(self, other): if (not isinstance(other, IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList)): return False return (self.to_dict() == other.to_dict())
def __ne__(self, other): 'Returns true if both objects are not equal' if (not isinstance(other, IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList)): return True return (self.to_dict() != other.to_dict())
-324,507,109,502,987,460
Returns true if both objects are not equal
kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py
__ne__
mariusgheorghies/python
python
def __ne__(self, other): if (not isinstance(other, IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList)): return True return (self.to_dict() != other.to_dict())
def hashimg(self, im): ' Compute a hash for an image, useful for image comparisons ' return hashlib.md5(im.tostring()).digest()
4,447,783,696,848,638,000
Compute a hash for an image, useful for image comparisons
modules/python/test/tests_common.py
hashimg
552103917/opcv3.4
python
def hashimg(self, im): ' ' return hashlib.md5(im.tostring()).digest()
def hashimg(self, im): ' Compute a hash for an image, useful for image comparisons ' return hashlib.md5(im.tostring()).hexdigest()
8,186,755,235,098,888,000
Compute a hash for an image, useful for image comparisons
modules/python/test/tests_common.py
hashimg
552103917/opcv3.4
python
def hashimg(self, im): ' ' return hashlib.md5(im.tostring()).hexdigest()
def bbox_to_array(arr, label=0, max_bboxes=64, bbox_width=16): '\n Converts a 1-dimensional bbox array to an image-like\n 3-dimensional array CHW array\n ' arr = pad_bbox(arr, max_bboxes, bbox_width) return arr[np.newaxis, :, :]
-3,777,192,725,018,629,000
Converts a 1-dimensional bbox array to an image-like 3-dimensional array CHW array
digits/extensions/data/objectDetection/utils.py
bbox_to_array
dcmartin/digits
python
def bbox_to_array(arr, label=0, max_bboxes=64, bbox_width=16): '\n Converts a 1-dimensional bbox array to an image-like\n 3-dimensional array CHW array\n ' arr = pad_bbox(arr, max_bboxes, bbox_width) return arr[np.newaxis, :, :]
def pad_image(img, padding_image_height, padding_image_width): '\n pad a single image to the specified dimensions\n ' src_width = img.size[0] src_height = img.size[1] if (padding_image_width < src_width): raise ValueError(('Source image width %d is greater than padding width %d' % (src_width, padding_image_width))) if (padding_image_height < src_height): raise ValueError(('Source image height %d is greater than padding height %d' % (src_height, padding_image_height))) padded_img = PIL.Image.new(img.mode, (padding_image_width, padding_image_height), 'black') padded_img.paste(img, (0, 0)) return padded_img
120,234,168,022,130,060
pad a single image to the specified dimensions
digits/extensions/data/objectDetection/utils.py
pad_image
dcmartin/digits
python
def pad_image(img, padding_image_height, padding_image_width): '\n \n ' src_width = img.size[0] src_height = img.size[1] if (padding_image_width < src_width): raise ValueError(('Source image width %d is greater than padding width %d' % (src_width, padding_image_width))) if (padding_image_height < src_height): raise ValueError(('Source image height %d is greater than padding height %d' % (src_height, padding_image_height))) padded_img = PIL.Image.new(img.mode, (padding_image_width, padding_image_height), 'black') padded_img.paste(img, (0, 0)) return padded_img
@classmethod def lmdb_format_length(cls): '\n width of an LMDB datafield returned by the gt_to_lmdb_format function.\n :return:\n ' return 16
2,400,491,777,051,154,000
width of an LMDB datafield returned by the gt_to_lmdb_format function. :return:
digits/extensions/data/objectDetection/utils.py
lmdb_format_length
dcmartin/digits
python
@classmethod def lmdb_format_length(cls): '\n width of an LMDB datafield returned by the gt_to_lmdb_format function.\n :return:\n ' return 16
def gt_to_lmdb_format(self): '\n For storage of a bbox ground truth object into a float32 LMDB.\n Sort-by attribute is always the last value in the array.\n ' result = [self.bbox.xl, self.bbox.yt, (self.bbox.xr - self.bbox.xl), (self.bbox.yb - self.bbox.yt), self.angle, self.object, 0, self.roty, self.truncated, self.occlusion, self.length, self.width, self.height, self.locx, self.locy, self.locz] assert (len(result) is self.lmdb_format_length()) return result
3,714,215,943,155,052,000
For storage of a bbox ground truth object into a float32 LMDB. Sort-by attribute is always the last value in the array.
digits/extensions/data/objectDetection/utils.py
gt_to_lmdb_format
dcmartin/digits
python
def gt_to_lmdb_format(self): '\n For storage of a bbox ground truth object into a float32 LMDB.\n Sort-by attribute is always the last value in the array.\n ' result = [self.bbox.xl, self.bbox.yt, (self.bbox.xr - self.bbox.xl), (self.bbox.yb - self.bbox.yt), self.angle, self.object, 0, self.roty, self.truncated, self.occlusion, self.length, self.width, self.height, self.locx, self.locy, self.locz] assert (len(result) is self.lmdb_format_length()) return result