Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def read_data(self, offsets): # type: (Descriptor, Offsets) -> Tuple[bytes, Offsets] newoffset = None if not self.local_path.use_stdin: if offsets.num_bytes == 0: return None, None # compute start from view start = self.local_path.view.fd_start + offsets.range_start # encrypted offsets will read past the end of the file due # to padding, but will be accounted for after encryption+padding with self.local_path.absolute_path.open('rb') as fd: fd.seek(start, 0) data = fd.read(offsets.num_bytes) else: data = blobxfer.STDIN.read(self._chunk_size) if not data: with self._meta_lock: self._offset -= offsets.num_bytes self._ase.size -= offsets.num_bytes self._total_chunks -= 1 self._chunk_num -= 1 self._outstanding_ops -= 1 else: num_bytes = len(data) with self._meta_lock: self._offset -= offsets.num_bytes self._ase.size -= offsets.num_bytes newoffset = Offsets( chunk_num=self._chunk_num - 1, num_bytes=num_bytes, range_start=self._offset, range_end=self._offset + num_bytes - 1, pad=False, ) self._total_chunks += 1 self._outstanding_ops += 1 self._offset += num_bytes self._ase.size += num_bytes if self.must_compute_md5 and data: with self._hasher_lock: self.md5.update(data) if self.is_resumable: self._md5_cache[self._chunk_num - 1] = self.md5.hexdigest() return data, newoffset
[ "Read data from file\n :param Descriptor self: this\n :param Offsets offsets: offsets\n :rtype: tuple\n :return: (file data bytes, new Offsets if stdin)\n " ]
Please provide a description of the function:def generate_metadata(self): # type: (Descriptor) -> dict genmeta = {} encmeta = {} # page align md5 if (self.must_compute_md5 and self._ase.mode == blobxfer.models.azure.StorageModes.Page): aligned = blobxfer.util.page_align_content_length(self._offset) diff = aligned - self._offset if diff > 0: with self._hasher_lock: self.md5.update(b'\0' * diff) # generate encryption metadata if self._ase.is_encrypted: if self.must_compute_md5: md5digest = blobxfer.util.base64_encode_as_string( self.md5.digest()) else: md5digest = None if self.hmac is not None: hmacdigest = blobxfer.util.base64_encode_as_string( self.hmac.digest()) else: hmacdigest = None encmeta = self._ase.encryption_metadata.convert_to_json_with_mac( md5digest, hmacdigest) # generate file attribute metadata if self._store_file_attr and not self.local_path.use_stdin: merged = blobxfer.models.metadata.generate_fileattr_metadata( self.local_path, genmeta) if merged is not None: genmeta = merged # generate vectored io metadata if self.local_path.view.mode == VectoredIoDistributionMode.Stripe: merged = blobxfer.models.metadata.\ generate_vectored_io_stripe_metadata(self.local_path, genmeta) if merged is not None: genmeta = merged if len(encmeta) > 0: metadata = encmeta else: metadata = {} if len(genmeta) > 0: metadata[blobxfer.models.metadata.JSON_KEY_BLOBXFER_METADATA] = \ json.dumps(genmeta, ensure_ascii=False, sort_keys=True) if len(metadata) == 0: return None return metadata
[ "Generate metadata for descriptor\n :param Descriptor self: this\n :rtype: dict or None\n :return: kv metadata dict\n " ]
Please provide a description of the function:def add_cli_options(cli_options, action): # type: (dict, str) -> None cli_options['_action'] = action.name.lower() # if url is present, convert to constituent options if blobxfer.util.is_not_empty(cli_options.get('storage_url')): if (blobxfer.util.is_not_empty(cli_options.get('storage_account')) or blobxfer.util.is_not_empty(cli_options.get('mode')) or blobxfer.util.is_not_empty(cli_options.get('endpoint')) or blobxfer.util.is_not_empty(cli_options.get('remote_path'))): raise ValueError( 'Specified both --storage-url and --storage-account, ' '--mode, --endpoint, or --remote-path') cli_options['storage_account'], mode, \ cli_options['endpoint'], \ cli_options['remote_path'], \ sas = blobxfer.util.explode_azure_storage_url( cli_options['storage_url']) if blobxfer.util.is_not_empty(sas): if blobxfer.util.is_not_empty(cli_options['sas']): raise ValueError( 'Specified both --storage-url with a SAS token and --sas') cli_options['sas'] = sas if mode != 'blob' and mode != 'file': raise ValueError( 'Invalid derived mode from --storage-url: {}'.format(mode)) if mode == 'file': cli_options['mode'] = mode del mode del sas storage_account = cli_options.get('storage_account') azstorage = { 'endpoint': cli_options.get('endpoint') } if blobxfer.util.is_not_empty(storage_account): azstorage['accounts'] = { storage_account: ( cli_options.get('access_key') or cli_options.get('sas') ) } sa_rp = { storage_account: cli_options.get('remote_path') } local_resource = cli_options.get('local_resource') # construct "argument" from cli options if action == TransferAction.Download: arg = { 'source': [sa_rp] if sa_rp[storage_account] is not None else None, 'destination': local_resource if local_resource is not None else None, 'include': cli_options.get('include'), 'exclude': cli_options.get('exclude'), 'options': { 'check_file_md5': cli_options.get('file_md5'), 'chunk_size_bytes': cli_options.get('chunk_size_bytes'), 'delete_extraneous_destination': cli_options.get('delete'), 'max_single_object_concurrency': cli_options.get( 'max_single_object_concurrency'), 'mode': cli_options.get('mode'), 'overwrite': cli_options.get('overwrite'), 'recursive': cli_options.get('recursive'), 'rename': cli_options.get('rename'), 'rsa_private_key': cli_options.get('rsa_private_key'), 'rsa_private_key_passphrase': cli_options.get( 'rsa_private_key_passphrase'), 'restore_file_properties': { 'attributes': cli_options.get('file_attributes'), 'lmt': cli_options.get('restore_file_lmt'), 'md5': None, }, 'strip_components': cli_options.get('strip_components'), 'skip_on': { 'filesize_match': cli_options.get( 'skip_on_filesize_match'), 'lmt_ge': cli_options.get('skip_on_lmt_ge'), 'md5_match': cli_options.get('skip_on_md5_match'), }, }, } elif action == TransferAction.Synccopy: # if url is present, convert to constituent options if blobxfer.util.is_not_empty( cli_options.get('sync_copy_dest_storage_url')): if (blobxfer.util.is_not_empty( cli_options.get('sync_copy_dest_storage_account')) or blobxfer.util.is_not_empty( cli_options.get('sync_copy_dest_mode')) or blobxfer.util.is_not_empty( cli_options.get('sync_copy_dest_remote_path'))): raise ValueError( 'Specified both --sync-copy-dest-storage-url and ' '--sync-copy-dest-storage-account, ' '--sync-copy-dest-mode, or' '--sync-copy-dest-remote-path') cli_options['sync_copy_dest_storage_account'], mode, _, \ cli_options['sync_copy_dest_remote_path'], sas = \ blobxfer.util.explode_azure_storage_url( cli_options['sync_copy_dest_storage_url']) if blobxfer.util.is_not_empty(sas): if blobxfer.util.is_not_empty( cli_options['sync_copy_dest_sas']): raise ValueError( 'Specified both --sync-copy-dest-storage-url with ' 'a SAS token and --sync-copy-dest-sas') cli_options['sync_copy_dest_sas'] = sas if mode != 'blob' and mode != 'file': raise ValueError( 'Invalid derived destination mode from ' '--sync-copy-dest-storage-url: {}'.format(mode)) if mode == 'file': cli_options['dest_mode'] = mode del mode del sas sync_copy_dest_storage_account = cli_options.get( 'sync_copy_dest_storage_account') sync_copy_dest_remote_path = cli_options.get( 'sync_copy_dest_remote_path') if (sync_copy_dest_storage_account is not None and sync_copy_dest_remote_path is not None): sync_copy_dest = [ { sync_copy_dest_storage_account: sync_copy_dest_remote_path } ] azstorage['accounts'][sync_copy_dest_storage_account] = ( cli_options.get('sync_copy_dest_access_key') or cli_options.get('sync_copy_dest_sas') ) else: sync_copy_dest = None arg = { 'source': [sa_rp] if sa_rp[storage_account] is not None else None, 'destination': sync_copy_dest, 'include': cli_options.get('include'), 'exclude': cli_options.get('exclude'), 'options': { 'access_tier': cli_options.get('access_tier'), 'chunk_size_bytes': cli_options.get('chunk_size_bytes'), 'dest_mode': cli_options.get('sync_copy_dest_mode'), 'mode': cli_options.get('mode'), 'overwrite': cli_options.get('overwrite'), 'rename': cli_options.get('rename'), 'skip_on': { 'filesize_match': cli_options.get( 'skip_on_filesize_match'), 'lmt_ge': cli_options.get('skip_on_lmt_ge'), 'md5_match': cli_options.get('skip_on_md5_match'), }, }, } elif action == TransferAction.Upload: arg = { 'source': [local_resource] if local_resource is not None else None, 'destination': [sa_rp] if sa_rp[storage_account] is not None else None, 'include': cli_options.get('include'), 'exclude': cli_options.get('exclude'), 'options': { 'access_tier': cli_options.get('access_tier'), 'chunk_size_bytes': cli_options.get('chunk_size_bytes'), 'delete_extraneous_destination': cli_options.get('delete'), 'mode': cli_options.get('mode'), 'one_shot_bytes': cli_options.get('one_shot_bytes'), 'overwrite': cli_options.get('overwrite'), 'recursive': cli_options.get('recursive'), 'rename': cli_options.get('rename'), 'rsa_private_key': cli_options.get('rsa_private_key'), 'rsa_private_key_passphrase': cli_options.get( 'rsa_private_key_passphrase'), 'rsa_public_key': cli_options.get('rsa_public_key'), 'skip_on': { 'filesize_match': cli_options.get( 'skip_on_filesize_match'), 'lmt_ge': cli_options.get('skip_on_lmt_ge'), 'md5_match': cli_options.get('skip_on_md5_match'), }, 'stdin_as_page_blob_size': cli_options.get( 'stdin_as_page_blob_size'), 'store_file_properties': { 'attributes': cli_options.get('file_attributes'), 'cache_control': cli_options.get('file_cache_control'), 'lmt': None, 'md5': cli_options.get('file_md5'), }, 'strip_components': cli_options.get('strip_components'), 'vectored_io': { 'stripe_chunk_size_bytes': cli_options.get( 'stripe_chunk_size_bytes'), 'distribution_mode': cli_options.get('distribution_mode'), }, }, } count = 0 if arg['source'] is None: arg.pop('source') count += 1 if arg['destination'] is None: arg.pop('destination') count += 1 if count == 1: if action == TransferAction.Synccopy: raise ValueError( '--remote-path and --sync-copy-dest-remote-path must be ' 'specified together through the commandline') else: raise ValueError( '--local-path and --remote-path must be specified together ' 'through the commandline') if 'accounts' in azstorage: cli_options['azure_storage'] = azstorage cli_options[action.name.lower()] = arg
[ "Adds CLI options to the configuration object\n :param dict cli_options: CLI options dict\n :param TransferAction action: action\n " ]
Please provide a description of the function:def _merge_setting(cli_options, conf, name, name_cli=None, default=None): # type: (dict, dict, str, str, Any) -> Any val = cli_options.get(name_cli or name) if val is None: val = conf.get(name, default) return val
[ "Merge a setting, preferring the CLI option if set\n :param dict cli_options: cli options\n :param dict conf: configuration sub-block\n :param str name: key name\n :param str name_cli: override key name for cli_options\n :param Any default: default value to set if missing\n :rtype: Any\n :return: merged setting value\n " ]
Please provide a description of the function:def merge_global_settings(config, cli_options): # type: (dict, dict) -> None # check for valid version from YAML if (not blobxfer.util.is_none_or_empty(config) and ('version' not in config or config['version'] not in _SUPPORTED_YAML_CONFIG_VERSIONS)): raise ValueError('"version" not specified in YAML config or invalid') # get action action = cli_options['_action'] if (action != TransferAction.Upload.name.lower() and action != TransferAction.Download.name.lower() and action != TransferAction.Synccopy.name.lower()): raise ValueError('invalid action: {}'.format(action)) # merge credentials if 'azure_storage' in cli_options: if 'azure_storage' not in config: config['azure_storage'] = {} config['azure_storage'] = blobxfer.util.merge_dict( config['azure_storage'], cli_options['azure_storage']) if ('azure_storage' not in config or blobxfer.util.is_none_or_empty(config['azure_storage'])): raise ValueError('azure storage settings not specified') # create action options if action not in config: config[action] = [] # append full specs, if they exist if action in cli_options: if 'source' in cli_options[action]: srcdst = { 'source': cli_options[action]['source'], 'destination': cli_options[action]['destination'], } cli_options[action].pop('source') cli_options[action].pop('destination') config[action].append(srcdst) # merge general and concurrency options if 'options' not in config: config['options'] = {} if 'concurrency' not in config['options']: config['options']['concurrency'] = {} if 'timeout' not in config['options']: config['options']['timeout'] = {} if 'proxy' not in config['options']: config['options']['proxy'] = {} options = { 'enable_azure_storage_logger': _merge_setting( cli_options, config['options'], 'enable_azure_storage_logger'), 'log_file': _merge_setting(cli_options, config['options'], 'log_file'), 'progress_bar': _merge_setting( cli_options, config['options'], 'progress_bar', default=True), 'resume_file': _merge_setting( cli_options, config['options'], 'resume_file'), 'timeout': { # TODO deprecated timeout setting 'timeout': _merge_setting( cli_options, config['options']['timeout'], 'timeout', name_cli='timeout'), 'connect': _merge_setting( cli_options, config['options']['timeout'], 'connect', name_cli='connect_timeout'), 'read': _merge_setting( cli_options, config['options']['timeout'], 'read', name_cli='read_timeout'), 'max_retries': _merge_setting( cli_options, config['options']['timeout'], 'max_retries', default=1000), }, 'verbose': _merge_setting( cli_options, config['options'], 'verbose', default=False), 'quiet': _merge_setting( cli_options, config['options'], 'quiet', default=False), 'dry_run': _merge_setting( cli_options, config['options'], 'dry_run', default=False), 'concurrency': { 'crypto_processes': _merge_setting( cli_options, config['options']['concurrency'], 'crypto_processes', default=0), 'disk_threads': _merge_setting( cli_options, config['options']['concurrency'], 'disk_threads', default=0), 'md5_processes': _merge_setting( cli_options, config['options']['concurrency'], 'md5_processes', default=0), 'transfer_threads': _merge_setting( cli_options, config['options']['concurrency'], 'transfer_threads', default=0), }, 'proxy': { 'host': _merge_setting( cli_options, config['options']['proxy'], 'host', name_cli='proxy_host'), 'username': _merge_setting( cli_options, config['options']['proxy'], 'username', name_cli='proxy_username'), 'password': _merge_setting( cli_options, config['options']['proxy'], 'password', name_cli='proxy_password'), } } config['options'] = options
[ "Merge \"global\" CLI options into main config\n :param dict config: config dict\n :param dict cli_options: cli options\n " ]
Please provide a description of the function:def create_azure_storage_credentials(config, general_options): # type: (dict, blobxfer.models.options.General) -> # blobxfer.operations.azure.StorageCredentials creds = blobxfer.operations.azure.StorageCredentials(general_options) endpoint = config['azure_storage'].get('endpoint') or 'core.windows.net' for name in config['azure_storage']['accounts']: key = config['azure_storage']['accounts'][name] creds.add_storage_account(name, key, endpoint) return creds
[ "Create an Azure StorageCredentials object from configuration\n :param dict config: config dict\n :param blobxfer.models.options.General: general options\n :rtype: blobxfer.operations.azure.StorageCredentials\n :return: credentials object\n " ]
Please provide a description of the function:def create_general_options(config, action): # type: (dict, TransferAction) -> blobxfer.models.options.General conc = config['options']['concurrency'] # split http proxy host into host:port proxy = None if blobxfer.util.is_not_empty(config['options']['proxy']['host']): tmp = config['options']['proxy']['host'].split(':') if len(tmp) != 2: raise ValueError('Proxy host is malformed: host should be ip:port') username = config['options']['proxy']['username'] if blobxfer.util.is_none_or_empty(username): username = None password = config['options']['proxy']['password'] if blobxfer.util.is_none_or_empty(password): password = None proxy = blobxfer.models.options.HttpProxy( host=tmp[0], port=int(tmp[1]), username=username, password=password, ) return blobxfer.models.options.General( concurrency=blobxfer.models.options.Concurrency( crypto_processes=conc['crypto_processes'], disk_threads=conc['disk_threads'], md5_processes=conc['md5_processes'], transfer_threads=conc['transfer_threads'], action=action.value[0], ), log_file=config['options']['log_file'], progress_bar=config['options']['progress_bar'], resume_file=config['options']['resume_file'], # TODO deprecated timeout setting timeout=blobxfer.models.options.Timeout( connect=( config['options']['timeout']['connect'] or config['options']['timeout']['timeout'] ), read=( config['options']['timeout']['read'] or config['options']['timeout']['timeout'] ), max_retries=config['options']['timeout']['max_retries'], ), verbose=config['options']['verbose'], quiet=config['options']['quiet'], dry_run=config['options']['dry_run'], proxy=proxy, )
[ "Create a General Options object from configuration\n :param dict config: config dict\n :param TransferAction action: transfer action\n :rtype: blobxfer.models.options.General\n :return: general options object\n " ]
Please provide a description of the function:def create_download_specifications(ctx_cli_options, config): # type: (dict, dict) -> List[blobxfer.models.download.Specification] cli_conf = ctx_cli_options[ctx_cli_options['_action']] cli_options = cli_conf['options'] specs = [] for conf in config['download']: if 'options' in conf: conf_options = conf['options'] else: conf_options = {} # create download options mode = _merge_setting( cli_options, conf_options, 'mode', default='auto').lower() if mode == 'auto': mode = blobxfer.models.azure.StorageModes.Auto elif mode == 'append': mode = blobxfer.models.azure.StorageModes.Append elif mode == 'block': mode = blobxfer.models.azure.StorageModes.Block elif mode == 'file': mode = blobxfer.models.azure.StorageModes.File elif mode == 'page': mode = blobxfer.models.azure.StorageModes.Page else: raise ValueError('unknown mode: {}'.format(mode)) # load RSA private key PEM file if specified rpk = _merge_setting( cli_options, conf_options, 'rsa_private_key', default=None) if blobxfer.util.is_not_empty(rpk): rpkp = _merge_setting( cli_options, conf_options, 'rsa_private_key_passphrase', default=None) rpk = blobxfer.operations.crypto.load_rsa_private_key_file( rpk, rpkp) else: rpk = None # create specification conf_sod = conf_options.get('skip_on', {}) cli_sod = cli_options['skip_on'] conf_rfp = conf_options.get('restore_file_properties', {}) cli_rfp = cli_options['restore_file_properties'] ds = blobxfer.models.download.Specification( download_options=blobxfer.models.options.Download( check_file_md5=_merge_setting( cli_options, conf_options, 'check_file_md5', default=False), chunk_size_bytes=_merge_setting( cli_options, conf_options, 'chunk_size_bytes', default=0), delete_extraneous_destination=_merge_setting( cli_options, conf_options, 'delete_extraneous_destination', default=False), max_single_object_concurrency=_merge_setting( cli_options, conf_options, 'max_single_object_concurrency', default=8), mode=mode, overwrite=_merge_setting( cli_options, conf_options, 'overwrite', default=True), recursive=_merge_setting( cli_options, conf_options, 'recursive', default=True), rename=_merge_setting( cli_options, conf_options, 'rename', default=False), restore_file_properties=blobxfer.models.options.FileProperties( attributes=_merge_setting( cli_rfp, conf_rfp, 'attributes', default=False), cache_control=None, lmt=_merge_setting( cli_rfp, conf_rfp, 'lmt', default=False), md5=None, ), rsa_private_key=rpk, strip_components=_merge_setting( cli_options, conf_options, 'strip_components', default=0), ), skip_on_options=blobxfer.models.options.SkipOn( filesize_match=_merge_setting( cli_sod, conf_sod, 'filesize_match', default=False), lmt_ge=_merge_setting( cli_sod, conf_sod, 'lmt_ge', default=False), md5_match=_merge_setting( cli_sod, conf_sod, 'md5_match', default=False), ), local_destination_path=blobxfer.models.download. LocalDestinationPath( conf['destination'] ) ) # create remote source paths for src in conf['source']: if len(src) != 1: raise RuntimeError( 'invalid number of source pairs specified per entry') sa = next(iter(src)) asp = blobxfer.operations.azure.SourcePath() asp.add_path_with_storage_account(src[sa], sa) incl = _merge_setting(cli_conf, conf, 'include', default=None) if blobxfer.util.is_not_empty(incl): asp.add_includes(incl) excl = _merge_setting(cli_conf, conf, 'exclude', default=None) if blobxfer.util.is_not_empty(excl): asp.add_excludes(excl) ds.add_azure_source_path(asp) # append spec to list specs.append(ds) return specs
[ "Create a list of Download Specification objects from configuration\n :param dict ctx_cli_options: cli options\n :param dict config: config dict\n :rtype: list\n :return: list of Download Specification objects\n " ]
Please provide a description of the function:def create_synccopy_specifications(ctx_cli_options, config): # type: (dict, dict) -> List[blobxfer.models.synccopy.Specification] cli_conf = ctx_cli_options[ctx_cli_options['_action']] cli_options = cli_conf['options'] specs = [] for conf in config['synccopy']: if 'options' in conf: conf_options = conf['options'] else: conf_options = {} # get source mode mode = _merge_setting( cli_options, conf_options, 'mode', default='auto').lower() if mode == 'auto': mode = blobxfer.models.azure.StorageModes.Auto elif mode == 'append': mode = blobxfer.models.azure.StorageModes.Append elif mode == 'block': mode = blobxfer.models.azure.StorageModes.Block elif mode == 'file': mode = blobxfer.models.azure.StorageModes.File elif mode == 'page': mode = blobxfer.models.azure.StorageModes.Page else: raise ValueError('unknown source mode: {}'.format(mode)) # get destination mode destmode = _merge_setting( cli_options, conf_options, 'dest_mode', name_cli='dest_mode') if blobxfer.util.is_none_or_empty(destmode): destmode = mode else: destmode = destmode.lower() if destmode == 'auto': destmode = blobxfer.models.azure.StorageModes.Auto elif destmode == 'append': destmode = blobxfer.models.azure.StorageModes.Append elif destmode == 'block': destmode = blobxfer.models.azure.StorageModes.Block elif destmode == 'file': destmode = blobxfer.models.azure.StorageModes.File elif destmode == 'page': destmode = blobxfer.models.azure.StorageModes.Page else: raise ValueError('unknown dest mode: {}'.format(destmode)) # create specification conf_sod = conf_options.get('skip_on', {}) cli_sod = cli_options['skip_on'] scs = blobxfer.models.synccopy.Specification( synccopy_options=blobxfer.models.options.SyncCopy( access_tier=_merge_setting( cli_options, conf_options, 'access_tier', default=None), delete_extraneous_destination=_merge_setting( cli_options, conf_options, 'delete_extraneous_destination', default=False), dest_mode=destmode, mode=mode, overwrite=_merge_setting( cli_options, conf_options, 'overwrite', default=True), recursive=_merge_setting( cli_options, conf_options, 'recursive', default=True), rename=_merge_setting( cli_options, conf_options, 'rename', default=False), ), skip_on_options=blobxfer.models.options.SkipOn( filesize_match=_merge_setting( cli_sod, conf_sod, 'filesize_match', default=False), lmt_ge=_merge_setting( cli_sod, conf_sod, 'lmt_ge', default=False), md5_match=_merge_setting( cli_sod, conf_sod, 'md5_match', default=False), ), ) # create remote source paths for src in conf['source']: sa = next(iter(src)) asp = blobxfer.operations.azure.SourcePath() asp.add_path_with_storage_account(src[sa], sa) incl = _merge_setting(cli_conf, conf, 'include', default=None) if blobxfer.util.is_not_empty(incl): asp.add_includes(incl) excl = _merge_setting(cli_conf, conf, 'exclude', default=None) if blobxfer.util.is_not_empty(excl): asp.add_excludes(excl) scs.add_azure_source_path(asp) # create remote destination paths for dst in conf['destination']: if len(dst) != 1: raise RuntimeError( 'invalid number of destination pairs specified per entry') sa = next(iter(dst)) adp = blobxfer.operations.azure.DestinationPath() adp.add_path_with_storage_account(dst[sa], sa) scs.add_azure_destination_path(adp) # append spec to list specs.append(scs) return specs
[ "Create a list of SyncCopy Specification objects from configuration\n :param dict ctx_cli_options: cli options\n :param dict config: config dict\n :rtype: list\n :return: list of SyncCopy Specification objects\n " ]
Please provide a description of the function:def create_upload_specifications(ctx_cli_options, config): # type: (dict, dict) -> List[blobxfer.models.upload.Specification] cli_conf = ctx_cli_options[ctx_cli_options['_action']] cli_options = cli_conf['options'] specs = [] for conf in config['upload']: if 'options' in conf: conf_options = conf['options'] else: conf_options = {} # create upload options mode = _merge_setting( cli_options, conf_options, 'mode', default='auto').lower() if mode == 'auto': mode = blobxfer.models.azure.StorageModes.Auto elif mode == 'append': mode = blobxfer.models.azure.StorageModes.Append elif mode == 'block': mode = blobxfer.models.azure.StorageModes.Block elif mode == 'file': mode = blobxfer.models.azure.StorageModes.File elif mode == 'page': mode = blobxfer.models.azure.StorageModes.Page else: raise ValueError('unknown mode: {}'.format(mode)) # load RSA public key PEM if specified rpk = _merge_setting(cli_options, conf_options, 'rsa_public_key') if blobxfer.util.is_not_empty(rpk): rpk = blobxfer.operations.crypto.load_rsa_public_key_file(rpk) if rpk is None: # load RSA private key PEM file if specified rpk = _merge_setting( cli_options, conf_options, 'rsa_private_key') if blobxfer.util.is_not_empty(rpk): rpkp = _merge_setting( cli_options, conf_options, 'rsa_private_key_passphrase') rpk = blobxfer.operations.crypto.load_rsa_private_key_file( rpk, rpkp) rpk = rpk.public_key() else: rpk = None # create local source paths lsp = blobxfer.models.upload.LocalSourcePath() lsp.add_paths(conf['source']) incl = _merge_setting(cli_conf, conf, 'include', default=None) if blobxfer.util.is_not_empty(incl): lsp.add_includes(incl) excl = _merge_setting(cli_conf, conf, 'exclude', default=None) if blobxfer.util.is_not_empty(excl): lsp.add_excludes(excl) # create specification conf_sfp = conf_options.get('store_file_properties', {}) cli_sfp = cli_options['store_file_properties'] conf_vio = conf_options.get('vectored_io', {}) cli_vio = cli_options['vectored_io'] conf_sod = conf_options.get('skip_on', {}) cli_sod = cli_options['skip_on'] us = blobxfer.models.upload.Specification( upload_options=blobxfer.models.options.Upload( access_tier=_merge_setting( cli_options, conf_options, 'access_tier', default=None), chunk_size_bytes=_merge_setting( cli_options, conf_options, 'chunk_size_bytes', default=0), delete_extraneous_destination=_merge_setting( cli_options, conf_options, 'delete_extraneous_destination', default=False), mode=mode, one_shot_bytes=_merge_setting( cli_options, conf_options, 'one_shot_bytes', default=0), overwrite=_merge_setting( cli_options, conf_options, 'overwrite', default=True), recursive=_merge_setting( cli_options, conf_options, 'recursive', default=True), rename=_merge_setting( cli_options, conf_options, 'rename', default=False), rsa_public_key=rpk, store_file_properties=blobxfer.models.options.FileProperties( attributes=_merge_setting( cli_sfp, conf_sfp, 'attributes', default=False), cache_control=_merge_setting( cli_sfp, conf_sfp, 'cache_control', default=None), lmt=None, md5=_merge_setting( cli_sfp, conf_sfp, 'md5', default=False), ), stdin_as_page_blob_size=_merge_setting( cli_options, conf_options, 'stdin_as_page_blob_size', default=0), strip_components=_merge_setting( cli_options, conf_options, 'strip_components', default=0), vectored_io=blobxfer.models.options.VectoredIo( stripe_chunk_size_bytes=_merge_setting( cli_vio, conf_vio, 'stripe_chunk_size_bytes', default=1073741824), distribution_mode=blobxfer. models.upload.VectoredIoDistributionMode( _merge_setting( cli_vio, conf_vio, 'distribution_mode', default='disabled').lower()), ), ), skip_on_options=blobxfer.models.options.SkipOn( filesize_match=_merge_setting( cli_sod, conf_sod, 'filesize_match', default=False), lmt_ge=_merge_setting( cli_sod, conf_sod, 'lmt_ge', default=False), md5_match=_merge_setting( cli_sod, conf_sod, 'md5_match', default=False), ), local_source_path=lsp, ) # create remote destination paths for dst in conf['destination']: if len(dst) != 1: raise RuntimeError( 'invalid number of destination pairs specified per entry') sa = next(iter(dst)) adp = blobxfer.operations.azure.DestinationPath() adp.add_path_with_storage_account(dst[sa], sa) us.add_azure_destination_path(adp) # append spec to list specs.append(us) return specs
[ "Create a list of Upload Specification objects from configuration\n :param dict ctx_cli_options: cli options\n :param dict config: config dict\n :rtype: list\n :return: list of Upload Specification objects\n " ]
Please provide a description of the function:def _initialize_processes(self, target, num_workers, description): # type: (_MultiprocessOffload, function, int, str) -> None if num_workers is None or num_workers < 1: raise ValueError('invalid num_workers: {}'.format(num_workers)) logger.debug('initializing {}{} processes'.format( num_workers, ' ' + description if not None else '')) for _ in range(num_workers): proc = multiprocessing.Process(target=target) proc.start() self._procs.append(proc)
[ "Initialize processes\n :param _MultiprocessOffload self: this\n :param function target: target function for process\n :param int num_workers: number of worker processes\n :param str description: description\n " ]
Please provide a description of the function:def finalize_processes(self): # type: (_MultiprocessOffload) -> None self._term_signal.value = 1 if self._check_thread is not None: self._check_thread.join() for proc in self._procs: proc.join()
[ "Finalize processes\n :param _MultiprocessOffload self: this\n " ]
Please provide a description of the function:def initialize_check_thread(self, check_func): # type: (_MultiprocessOffload, function) -> None self._check_thread = threading.Thread(target=check_func) self._check_thread.start()
[ "Initialize the multiprocess done queue check thread\n :param Downloader self: this\n :param function check_func: check function\n " ]
Please provide a description of the function:def download(ctx): settings.add_cli_options(ctx.cli_options, settings.TransferAction.Download) ctx.initialize(settings.TransferAction.Download) specs = settings.create_download_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Downloader( ctx.general_options, ctx.credentials, spec ).start()
[ "Download blobs or files from Azure Storage" ]
Please provide a description of the function:def synccopy(ctx): settings.add_cli_options(ctx.cli_options, settings.TransferAction.Synccopy) ctx.initialize(settings.TransferAction.Synccopy) specs = settings.create_synccopy_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.SyncCopy( ctx.general_options, ctx.credentials, spec ).start()
[ "Synchronously copy blobs or files between Azure Storage accounts" ]
Please provide a description of the function:def upload(ctx): settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload) ctx.initialize(settings.TransferAction.Upload) specs = settings.create_upload_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Uploader( ctx.general_options, ctx.credentials, spec ).start()
[ "Upload files to Azure Storage" ]
Please provide a description of the function:def initialize(self, action): # type: (CliContext, settings.TransferAction) -> None self._init_config() self.general_options = settings.create_general_options( self.config, action) self.credentials = settings.create_azure_storage_credentials( self.config, self.general_options)
[ "Initialize context\n :param CliContext self: this\n :param settings.TransferAction action: transfer action\n " ]
Please provide a description of the function:def _read_yaml_file(self, yaml_file): # type: (CliContext, pathlib.Path) -> None with yaml_file.open('r') as f: if self.config is None: self.config = ruamel.yaml.load( f, Loader=ruamel.yaml.RoundTripLoader) else: self.config = blobxfer.util.merge_dict( ruamel.yaml.load(f, Loader=ruamel.yaml.RoundTripLoader), self.config)
[ "Read a yaml file into self.config\n :param CliContext self: this\n :param pathlib.Path yaml_file: yaml file to load\n " ]
Please provide a description of the function:def _init_config(self): # type: (CliContext) -> None # load yaml config file into memory if blobxfer.util.is_not_empty(self.cli_options['yaml_config']): yaml_config = pathlib.Path(self.cli_options['yaml_config']) self._read_yaml_file(yaml_config) if self.config is None: self.config = {} # merge "global" cli options with config settings.merge_global_settings(self.config, self.cli_options) # set log file if specified logfile = self.config['options'].get('log_file', None) blobxfer.util.setup_logger(logger, logfile) # set azure storage logging level azstorage_logger = logging.getLogger('azure.storage') if self.config['options'].get('enable_azure_storage_logger', False): blobxfer.util.setup_logger(azstorage_logger, logfile) azstorage_logger.setLevel(logging.INFO) else: # disable azure storage logging: setting logger level to CRITICAL # effectively disables logging from azure storage azstorage_logger.setLevel(logging.CRITICAL) # set verbose logging if self.config['options'].get('verbose', False): blobxfer.util.set_verbose_logger_handlers() # output mixed config if self.show_config: logger.debug('config: \n{}'.format( json.dumps(self.config, indent=4))) logger.debug('cli config: \n{}'.format( json.dumps( self.cli_options[self.cli_options['_action']], indent=4, sort_keys=True))) del self.show_config
[ "Initializes configuration of the context\n :param CliContext self: this\n " ]
Please provide a description of the function:def send_messages(self, emails): ''' Comments ''' if not emails: return count = 0 for email in emails: mail = self._build_sg_mail(email) try: self.sg.client.mail.send.post(request_body=mail) count += 1 except HTTPError as e: if not self.fail_silently: raise return count
[]
Please provide a description of the function:def get_idp_sso_supported_bindings(idp_entity_id=None, config=None): if config is None: # avoid circular import from djangosaml2.conf import get_config config = get_config() # load metadata store from config meta = getattr(config, 'metadata', {}) # if idp is None, assume only one exists so just use that if idp_entity_id is None: # .keys() returns dict_keys in python3.5+ try: idp_entity_id = list(available_idps(config).keys())[0] except IndexError: raise ImproperlyConfigured("No IdP configured!") try: return meta.service(idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service').keys() except UnknownSystemEntity: return []
[ "Returns the list of bindings supported by an IDP\n This is not clear in the pysaml2 code, so wrapping it in a util" ]
Please provide a description of the function:def fail_acs_response(request, *args, **kwargs): failure_function = import_string(get_custom_setting('SAML_ACS_FAILURE_RESPONSE_FUNCTION', 'djangosaml2.acs_failures.template_failure')) return failure_function(request, *args, **kwargs)
[ " Serves as a common mechanism for ending ACS in case of any SAML related failure.\n Handling can be configured by setting the SAML_ACS_FAILURE_RESPONSE_FUNCTION as\n suitable for the project.\n\n The default behavior uses SAML specific template that is rendered on any ACS error,\n but this can be simply changed so that PermissionDenied exception is raised instead.\n " ]
Please provide a description of the function:def login(request, config_loader_path=None, wayf_template='djangosaml2/wayf.html', authorization_error_template='djangosaml2/auth_error.html', post_binding_form_template='djangosaml2/post_binding_form.html'): logger.debug('Login process started') came_from = request.GET.get('next', settings.LOGIN_REDIRECT_URL) if not came_from: logger.warning('The next parameter exists but is empty') came_from = settings.LOGIN_REDIRECT_URL # Ensure the user-originating redirection url is safe. if not is_safe_url_compat(url=came_from, allowed_hosts={request.get_host()}): came_from = settings.LOGIN_REDIRECT_URL # if the user is already authenticated that maybe because of two reasons: # A) He has this URL in two browser windows and in the other one he # has already initiated the authenticated session. # B) He comes from a view that (incorrectly) send him here because # he does not have enough permissions. That view should have shown # an authorization error in the first place. # We can only make one thing here and that is configurable with the # SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN setting. If that setting # is True (default value) we will redirect him to the came_from view. # Otherwise, we will show an (configurable) authorization error. if callable_bool(request.user.is_authenticated): redirect_authenticated_user = getattr(settings, 'SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN', True) if redirect_authenticated_user: return HttpResponseRedirect(came_from) else: logger.debug('User is already logged in') return render(request, authorization_error_template, { 'came_from': came_from, }) selected_idp = request.GET.get('idp', None) conf = get_config(config_loader_path, request) # is a embedded wayf needed? idps = available_idps(conf) if selected_idp is None and len(idps) > 1: logger.debug('A discovery process is needed') return render(request, wayf_template, { 'available_idps': idps.items(), 'came_from': came_from, }) # choose a binding to try first sign_requests = getattr(conf, '_sp_authn_requests_signed', False) binding = BINDING_HTTP_POST if sign_requests else BINDING_HTTP_REDIRECT logger.debug('Trying binding %s for IDP %s', binding, selected_idp) # ensure our selected binding is supported by the IDP supported_bindings = get_idp_sso_supported_bindings(selected_idp, config=conf) if binding not in supported_bindings: logger.debug('Binding %s not in IDP %s supported bindings: %s', binding, selected_idp, supported_bindings) if binding == BINDING_HTTP_POST: logger.warning('IDP %s does not support %s, trying %s', selected_idp, binding, BINDING_HTTP_REDIRECT) binding = BINDING_HTTP_REDIRECT else: logger.warning('IDP %s does not support %s, trying %s', selected_idp, binding, BINDING_HTTP_POST) binding = BINDING_HTTP_POST # if switched binding still not supported, give up if binding not in supported_bindings: raise UnsupportedBinding('IDP %s does not support %s or %s', selected_idp, BINDING_HTTP_POST, BINDING_HTTP_REDIRECT) client = Saml2Client(conf) http_response = None logger.debug('Redirecting user to the IdP via %s binding.', binding) if binding == BINDING_HTTP_REDIRECT: try: # do not sign the xml itself, instead use the sigalg to # generate the signature as a URL param sig_alg_option_map = {'sha1': SIG_RSA_SHA1, 'sha256': SIG_RSA_SHA256} sig_alg_option = getattr(conf, '_sp_authn_requests_signed_alg', 'sha1') sigalg = sig_alg_option_map[sig_alg_option] if sign_requests else None nsprefix = get_namespace_prefixes() session_id, result = client.prepare_for_authenticate( entityid=selected_idp, relay_state=came_from, binding=binding, sign=False, sigalg=sigalg, nsprefix=nsprefix) except TypeError as e: logger.error('Unable to know which IdP to use') return HttpResponse(text_type(e)) else: http_response = HttpResponseRedirect(get_location(result)) elif binding == BINDING_HTTP_POST: if post_binding_form_template: # get request XML to build our own html based on the template try: location = client.sso_location(selected_idp, binding) except TypeError as e: logger.error('Unable to know which IdP to use') return HttpResponse(text_type(e)) session_id, request_xml = client.create_authn_request( location, binding=binding) try: if PY3: saml_request = base64.b64encode(binary_type(request_xml, 'UTF-8')) else: saml_request = base64.b64encode(binary_type(request_xml)) http_response = render(request, post_binding_form_template, { 'target_url': location, 'params': { 'SAMLRequest': saml_request, 'RelayState': came_from, }, }) except TemplateDoesNotExist: pass if not http_response: # use the html provided by pysaml2 if no template was specified or it didn't exist try: session_id, result = client.prepare_for_authenticate( entityid=selected_idp, relay_state=came_from, binding=binding) except TypeError as e: logger.error('Unable to know which IdP to use') return HttpResponse(text_type(e)) else: http_response = HttpResponse(result['data']) else: raise UnsupportedBinding('Unsupported binding: %s', binding) # success, so save the session ID and return our response logger.debug('Saving the session_id in the OutstandingQueries cache') oq_cache = OutstandingQueriesCache(request.session) oq_cache.set(session_id, came_from) return http_response
[ "SAML Authorization Request initiator\n\n This view initiates the SAML2 Authorization handshake\n using the pysaml2 library to create the AuthnRequest.\n It uses the SAML 2.0 Http Redirect protocol binding.\n\n * post_binding_form_template - path to a template containing HTML form with\n hidden input elements, used to send the SAML message data when HTTP POST\n binding is being used. You can customize this template to include custom\n branding and/or text explaining the automatic redirection process. Please\n see the example template in\n templates/djangosaml2/example_post_binding_form.html\n If set to None or nonexistent template, default form from the saml2 library\n will be rendered.\n " ]
Please provide a description of the function:def assertion_consumer_service(request, config_loader_path=None, attribute_mapping=None, create_unknown_user=None): attribute_mapping = attribute_mapping or get_custom_setting('SAML_ATTRIBUTE_MAPPING', {'uid': ('username', )}) create_unknown_user = create_unknown_user if create_unknown_user is not None else \ get_custom_setting('SAML_CREATE_UNKNOWN_USER', True) conf = get_config(config_loader_path, request) try: xmlstr = request.POST['SAMLResponse'] except KeyError: logger.warning('Missing "SAMLResponse" parameter in POST data.') raise SuspiciousOperation client = Saml2Client(conf, identity_cache=IdentityCache(request.session)) oq_cache = OutstandingQueriesCache(request.session) outstanding_queries = oq_cache.outstanding_queries() try: response = client.parse_authn_request_response(xmlstr, BINDING_HTTP_POST, outstanding_queries) except (StatusError, ToEarly): logger.exception("Error processing SAML Assertion.") return fail_acs_response(request) except ResponseLifetimeExceed: logger.info("SAML Assertion is no longer valid. Possibly caused by network delay or replay attack.", exc_info=True) return fail_acs_response(request) except SignatureError: logger.info("Invalid or malformed SAML Assertion.", exc_info=True) return fail_acs_response(request) except StatusAuthnFailed: logger.info("Authentication denied for user by IdP.", exc_info=True) return fail_acs_response(request) except StatusRequestDenied: logger.warning("Authentication interrupted at IdP.", exc_info=True) return fail_acs_response(request) except StatusNoAuthnContext: logger.warning("Missing Authentication Context from IdP.", exc_info=True) return fail_acs_response(request) except MissingKey: logger.exception("SAML Identity Provider is not configured correctly: certificate key is missing!") return fail_acs_response(request) except UnsolicitedResponse: logger.exception("Received SAMLResponse when no request has been made.") return fail_acs_response(request) if response is None: logger.warning("Invalid SAML Assertion received (unknown error).") return fail_acs_response(request, status=400, exc_class=SuspiciousOperation) session_id = response.session_id() oq_cache.delete(session_id) # authenticate the remote user session_info = response.session_info() if callable(attribute_mapping): attribute_mapping = attribute_mapping() if callable(create_unknown_user): create_unknown_user = create_unknown_user() logger.debug('Trying to authenticate the user. Session info: %s', session_info) user = auth.authenticate(request=request, session_info=session_info, attribute_mapping=attribute_mapping, create_unknown_user=create_unknown_user) if user is None: logger.warning("Could not authenticate user received in SAML Assertion. Session info: %s", session_info) raise PermissionDenied auth.login(request, user) _set_subject_id(request.session, session_info['name_id']) logger.debug("User %s authenticated via SSO.", user) logger.debug('Sending the post_authenticated signal') post_authenticated.send_robust(sender=user, session_info=session_info) # redirect the user to the view where he came from default_relay_state = get_custom_setting('ACS_DEFAULT_REDIRECT_URL', settings.LOGIN_REDIRECT_URL) relay_state = request.POST.get('RelayState', default_relay_state) if not relay_state: logger.warning('The RelayState parameter exists but is empty') relay_state = default_relay_state if not is_safe_url_compat(url=relay_state, allowed_hosts={request.get_host()}): relay_state = settings.LOGIN_REDIRECT_URL logger.debug('Redirecting to the RelayState: %s', relay_state) return HttpResponseRedirect(relay_state)
[ "SAML Authorization Response endpoint\n\n The IdP will send its response to this view, which\n will process it with pysaml2 help and log the user\n in using the custom Authorization backend\n djangosaml2.backends.Saml2Backend that should be\n enabled in the settings.py\n " ]
Please provide a description of the function:def echo_attributes(request, config_loader_path=None, template='djangosaml2/echo_attributes.html'): state = StateCache(request.session) conf = get_config(config_loader_path, request) client = Saml2Client(conf, state_cache=state, identity_cache=IdentityCache(request.session)) subject_id = _get_subject_id(request.session) try: identity = client.users.get_identity(subject_id, check_not_on_or_after=False) except AttributeError: return HttpResponse("No active SAML identity found. Are you sure you have logged in via SAML?") return render(request, template, {'attributes': identity[0]})
[ "Example view that echo the SAML attributes of an user" ]
Please provide a description of the function:def logout(request, config_loader_path=None): state = StateCache(request.session) conf = get_config(config_loader_path, request) client = Saml2Client(conf, state_cache=state, identity_cache=IdentityCache(request.session)) subject_id = _get_subject_id(request.session) if subject_id is None: logger.warning( 'The session does not contain the subject id for user %s', request.user) result = client.global_logout(subject_id) state.sync() if not result: logger.error("Looks like the user %s is not logged in any IdP/AA", subject_id) return HttpResponseBadRequest("You are not logged in any IdP/AA") if len(result) > 1: logger.error('Sorry, I do not know how to logout from several sources. I will logout just from the first one') for entityid, logout_info in result.items(): if isinstance(logout_info, tuple): binding, http_info = logout_info if binding == BINDING_HTTP_POST: logger.debug('Returning form to the IdP to continue the logout process') body = ''.join(http_info['data']) return HttpResponse(body) elif binding == BINDING_HTTP_REDIRECT: logger.debug('Redirecting to the IdP to continue the logout process') return HttpResponseRedirect(get_location(http_info)) else: logger.error('Unknown binding: %s', binding) return HttpResponseServerError('Failed to log out') else: # We must have had a soap logout return finish_logout(request, logout_info) logger.error('Could not logout because there only the HTTP_REDIRECT is supported') return HttpResponseServerError('Logout Binding not supported')
[ "SAML Logout Request initiator\n\n This view initiates the SAML2 Logout request\n using the pysaml2 library to create the LogoutRequest.\n " ]
Please provide a description of the function:def do_logout_service(request, data, binding, config_loader_path=None, next_page=None, logout_error_template='djangosaml2/logout_error.html'): logger.debug('Logout service started') conf = get_config(config_loader_path, request) state = StateCache(request.session) client = Saml2Client(conf, state_cache=state, identity_cache=IdentityCache(request.session)) if 'SAMLResponse' in data: # we started the logout logger.debug('Receiving a logout response from the IdP') response = client.parse_logout_request_response(data['SAMLResponse'], binding) state.sync() return finish_logout(request, response, next_page=next_page) elif 'SAMLRequest' in data: # logout started by the IdP logger.debug('Receiving a logout request from the IdP') subject_id = _get_subject_id(request.session) if subject_id is None: logger.warning( 'The session does not contain the subject id for user %s. Performing local logout', request.user) auth.logout(request) return render(request, logout_error_template, status=403) else: http_info = client.handle_logout_request( data['SAMLRequest'], subject_id, binding, relay_state=data.get('RelayState', '')) state.sync() auth.logout(request) return HttpResponseRedirect(get_location(http_info)) else: logger.error('No SAMLResponse or SAMLRequest parameter found') raise Http404('No SAMLResponse or SAMLRequest parameter found')
[ "SAML Logout Response endpoint\n\n The IdP will send the logout response to this view,\n which will process it with pysaml2 help and log the user\n out.\n Note that the IdP can request a logout even when\n we didn't initiate the process as a single logout\n request started by another SP.\n " ]
Please provide a description of the function:def metadata(request, config_loader_path=None, valid_for=None): conf = get_config(config_loader_path, request) metadata = entity_descriptor(conf) return HttpResponse(content=text_type(metadata).encode('utf-8'), content_type="text/xml; charset=utf8")
[ "Returns an XML with the SAML 2.0 metadata for this\n SP as configured in the settings.py file.\n " ]
Please provide a description of the function:def configure_user(self, user, attributes, attribute_mapping): user.set_unusable_password() return self.update_user(user, attributes, attribute_mapping, force_save=True)
[ "Configures a user after creation and returns the updated user.\n\n By default, returns the user with his attributes updated.\n " ]
Please provide a description of the function:def update_user(self, user, attributes, attribute_mapping, force_save=False): if not attribute_mapping: return user user_modified = False for saml_attr, django_attrs in attribute_mapping.items(): attr_value_list = attributes.get(saml_attr) if not attr_value_list: logger.debug( 'Could not find value for "%s", not updating fields "%s"', saml_attr, django_attrs) continue for attr in django_attrs: if hasattr(user, attr): user_attr = getattr(user, attr) if callable(user_attr): modified = user_attr(attr_value_list) else: modified = self._set_attribute(user, attr, attr_value_list[0]) user_modified = user_modified or modified else: logger.debug( 'Could not find attribute "%s" on user "%s"', attr, user) logger.debug('Sending the pre_save signal') signal_modified = any( [response for receiver, response in pre_user_save.send_robust(sender=user.__class__, instance=user, attributes=attributes, user_modified=user_modified)] ) if user_modified or signal_modified or force_save: user.save() return user
[ "Update a user with a set of attributes and returns the updated user.\n\n By default it uses a mapping defined in the settings constant\n SAML_ATTRIBUTE_MAPPING. For each attribute, if the user object has\n that field defined it will be set.\n " ]
Please provide a description of the function:def _set_attribute(self, obj, attr, value): field = obj._meta.get_field(attr) if field.max_length is not None and len(value) > field.max_length: cleaned_value = value[:field.max_length] logger.warn('The attribute "%s" was trimmed from "%s" to "%s"', attr, value, cleaned_value) else: cleaned_value = value old_value = getattr(obj, attr) if cleaned_value != old_value: setattr(obj, attr, cleaned_value) return True return False
[ "Set an attribute of an object to a specific value.\n\n Return True if the attribute was changed and False otherwise.\n " ]
Please provide a description of the function:def config_settings_loader(request=None): conf = SPConfig() conf.load(copy.deepcopy(settings.SAML_CONFIG)) return conf
[ "Utility function to load the pysaml2 configuration.\n\n This is also the default config loader.\n " ]
Please provide a description of the function:def mkpath(*segments, **query): # Remove empty segments (e.g. no key specified) segments = [bytes_to_str(s) for s in segments if s is not None] # Join the segments into a path pathstring = '/'.join(segments) # Remove extra slashes pathstring = re.sub('/+', '/', pathstring) # Add the query string if it exists _query = {} for key in query: if query[key] in [False, True]: _query[key] = str(query[key]).lower() elif query[key] is not None: if PY2 and isinstance(query[key], unicode): # noqa _query[key] = query[key].encode('utf-8') else: _query[key] = query[key] if len(_query) > 0: pathstring += "?" + urlencode(_query) if not pathstring.startswith('/'): pathstring = '/' + pathstring return pathstring
[ "\n Constructs the path & query portion of a URI from path segments\n and a dict.\n " ]
Please provide a description of the function:def search_index_path(self, index=None, **options): if not self.yz_wm_index: raise RiakError("Yokozuna search is unsupported by this Riak node") if index: quote_plus(index) return mkpath(self.yz_wm_index, "index", index, **options)
[ "\n Builds a Yokozuna search index URL.\n\n :param index: optional name of a yz index\n :type index: string\n :param options: optional list of additional arguments\n :type index: dict\n :rtype URL string\n " ]
Please provide a description of the function:def search_schema_path(self, index, **options): if not self.yz_wm_schema: raise RiakError("Yokozuna search is unsupported by this Riak node") return mkpath(self.yz_wm_schema, "schema", quote_plus(index), **options)
[ "\n Builds a Yokozuna search Solr schema URL.\n\n :param index: a name of a yz solr schema\n :type index: string\n :param options: optional list of additional arguments\n :type index: dict\n :rtype URL string\n " ]
Please provide a description of the function:def preflist_path(self, bucket, key, bucket_type=None, **options): if not self.riak_kv_wm_preflist: raise RiakError("Preflists are unsupported by this Riak node") if self.riak_kv_wm_bucket_type and bucket_type: return mkpath("/types", quote_plus(bucket_type), "buckets", quote_plus(bucket), "keys", quote_plus(key), "preflist", **options) else: return mkpath("/buckets", quote_plus(bucket), "keys", quote_plus(key), "preflist", **options)
[ "\n Generate the URL for bucket/key preflist information\n\n :param bucket: Name of a Riak bucket\n :type bucket: string\n :param key: Name of a Key\n :type key: string\n :param bucket_type: Optional Riak Bucket Type\n :type bucket_type: None or string\n :rtype URL string\n " ]
Please provide a description of the function:def deep_merge(a, b): assert quacks_like_dict(a), quacks_like_dict(b) dst = a.copy() stack = [(dst, b)] while stack: current_dst, current_src = stack.pop() for key in current_src: if key not in current_dst: current_dst[key] = current_src[key] else: if (quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key])): stack.append((current_dst[key], current_src[key])) else: current_dst[key] = current_src[key] return dst
[ "Merge two deep dicts non-destructively\n\n Uses a stack to avoid maximum recursion depth exceptions\n\n >>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}\n >>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}\n >>> c = deep_merge(a, b)\n >>> from pprint import pprint; pprint(c)\n {'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}\n " ]
Please provide a description of the function:def to_op(self): if not self._adds: return None changes = {} if self._adds: changes['adds'] = list(self._adds) return changes
[ "\n Extracts the modification operation from the Hll.\n\n :rtype: dict, None\n " ]
Please provide a description of the function:def add(self, element): if not isinstance(element, six.string_types): raise TypeError("Hll elements can only be strings") self._adds.add(element)
[ "\n Adds an element to the HyperLogLog. Datatype cardinality will\n be updated when the object is saved.\n\n :param element: the element to add\n :type element: str\n " ]
Please provide a description of the function:def ping(self): status, _, body = self._request('GET', self.ping_path()) return(status is not None) and (bytes_to_str(body) == 'OK')
[ "\n Check server is alive over HTTP\n " ]
Please provide a description of the function:def stats(self): status, _, body = self._request('GET', self.stats_path(), {'Accept': 'application/json'}) if status == 200: return json.loads(bytes_to_str(body)) else: return None
[ "\n Gets performance statistics and server information\n " ]
Please provide a description of the function:def get_resources(self): status, _, body = self._request('GET', '/', {'Accept': 'application/json'}) if status == 200: tmp, resources = json.loads(bytes_to_str(body)), {} for k in tmp: # The keys and values returned by json.loads() are unicode, # which will cause problems when passed into httplib later # (expecting bytes both in Python 2.x and 3.x). # We just encode the resource paths into bytes, with an # encoding consistent with what the resources module expects. resources[k] = tmp[k].encode('utf-8') return resources else: return {}
[ "\n Gets a JSON mapping of server-side resource names to paths\n :rtype dict\n " ]
Please provide a description of the function:def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): # We could detect quorum_controls here but HTTP ignores # unknown flags/params. params = {'r': r, 'pr': pr, 'timeout': timeout, 'basic_quorum': basic_quorum, 'notfound_ok': notfound_ok} bucket_type = self._get_bucket_type(robj.bucket.bucket_type) url = self.object_path(robj.bucket.name, robj.key, bucket_type=bucket_type, **params) response = self._request('GET', url) return self._parse_body(robj, response, [200, 300, 404])
[ "\n Get a bucket/key from the server\n " ]
Please provide a description of the function:def put(self, robj, w=None, dw=None, pw=None, return_body=True, if_none_match=False, timeout=None): # We could detect quorum_controls here but HTTP ignores # unknown flags/params. params = {'returnbody': return_body, 'w': w, 'dw': dw, 'pw': pw, 'timeout': timeout} bucket_type = self._get_bucket_type(robj.bucket.bucket_type) url = self.object_path(robj.bucket.name, robj.key, bucket_type=bucket_type, **params) headers = self._build_put_headers(robj, if_none_match=if_none_match) if PY2: content = bytearray(robj.encoded_data) else: content = robj.encoded_data if robj.key is None: expect = [201] method = 'POST' else: expect = [204] method = 'PUT' response = self._request(method, url, headers, content) if return_body: return self._parse_body(robj, response, [200, 201, 204, 300]) else: self.check_http_code(response[0], expect) return None
[ "\n Puts a (possibly new) object.\n " ]
Please provide a description of the function:def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): # We could detect quorum_controls here but HTTP ignores # unknown flags/params. params = {'rw': rw, 'r': r, 'w': w, 'dw': dw, 'pr': pr, 'pw': pw, 'timeout': timeout} headers = {} bucket_type = self._get_bucket_type(robj.bucket.bucket_type) url = self.object_path(robj.bucket.name, robj.key, bucket_type=bucket_type, **params) use_vclocks = (self.tombstone_vclocks() and hasattr(robj, 'vclock') and robj.vclock is not None) if use_vclocks: headers['X-Riak-Vclock'] = robj.vclock.encode('base64') response = self._request('DELETE', url, headers) self.check_http_code(response[0], [204, 404]) return self
[ "\n Delete an object.\n " ]
Please provide a description of the function:def get_keys(self, bucket, timeout=None): bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.key_list_path(bucket.name, bucket_type=bucket_type, timeout=timeout) status, _, body = self._request('GET', url) if status == 200: props = json.loads(bytes_to_str(body)) return props['keys'] else: raise RiakError('Error listing keys.')
[ "\n Fetch a list of keys for the bucket\n " ]
Please provide a description of the function:def get_buckets(self, bucket_type=None, timeout=None): bucket_type = self._get_bucket_type(bucket_type) url = self.bucket_list_path(bucket_type=bucket_type, timeout=timeout) status, headers, body = self._request('GET', url) if status == 200: props = json.loads(bytes_to_str(body)) return props['buckets'] else: raise RiakError('Error getting buckets.')
[ "\n Fetch a list of all buckets\n " ]
Please provide a description of the function:def stream_buckets(self, bucket_type=None, timeout=None): if not self.bucket_stream(): raise NotImplementedError('Streaming list-buckets is not ' "supported on %s" % self.server_version.vstring) bucket_type = self._get_bucket_type(bucket_type) url = self.bucket_list_path(bucket_type=bucket_type, buckets="stream", timeout=timeout) status, headers, response = self._request('GET', url, stream=True) if status == 200: return HttpBucketStream(response) else: raise RiakError('Error listing buckets.')
[ "\n Stream list of buckets through an iterator\n " ]
Please provide a description of the function:def get_bucket_props(self, bucket): bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.bucket_properties_path(bucket.name, bucket_type=bucket_type) status, headers, body = self._request('GET', url) if status == 200: props = json.loads(bytes_to_str(body)) return props['props'] else: raise RiakError('Error getting bucket properties.')
[ "\n Get properties for a bucket\n " ]
Please provide a description of the function:def set_bucket_props(self, bucket, props): bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.bucket_properties_path(bucket.name, bucket_type=bucket_type) headers = {'Content-Type': 'application/json'} content = json.dumps({'props': props}) # Run the request... status, _, body = self._request('PUT', url, headers, content) if status == 401: raise SecurityError('Not authorized to set bucket properties.') elif status != 204: raise RiakError('Error setting bucket properties.') return True
[ "\n Set the properties on the bucket object given\n " ]
Please provide a description of the function:def clear_bucket_props(self, bucket): bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.bucket_properties_path(bucket.name, bucket_type=bucket_type) url = self.bucket_properties_path(bucket.name) headers = {'Content-Type': 'application/json'} # Run the request... status, _, _ = self._request('DELETE', url, headers, None) if status == 204: return True elif status == 405: return False else: raise RiakError('Error %s clearing bucket properties.' % status)
[ "\n reset the properties on the bucket object given\n " ]
Please provide a description of the function:def get_bucket_type_props(self, bucket_type): self._check_bucket_types(bucket_type) url = self.bucket_type_properties_path(bucket_type.name) status, headers, body = self._request('GET', url) if status == 200: props = json.loads(bytes_to_str(body)) return props['props'] else: raise RiakError('Error getting bucket-type properties.')
[ "\n Get properties for a bucket-type\n " ]
Please provide a description of the function:def set_bucket_type_props(self, bucket_type, props): self._check_bucket_types(bucket_type) url = self.bucket_type_properties_path(bucket_type.name) headers = {'Content-Type': 'application/json'} content = json.dumps({'props': props}) # Run the request... status, _, _ = self._request('PUT', url, headers, content) if status != 204: raise RiakError('Error setting bucket-type properties.') return True
[ "\n Set the properties on the bucket-type\n " ]
Please provide a description of the function:def mapred(self, inputs, query, timeout=None): # Construct the job, optionally set the timeout... content = self._construct_mapred_json(inputs, query, timeout) # Do the request... url = self.mapred_path() headers = {'Content-Type': 'application/json'} status, headers, body = self._request('POST', url, headers, content) # Make sure the expected status code came back... if status != 200: raise RiakError( 'Error running MapReduce operation. Headers: %s Body: %s' % (repr(headers), repr(body))) result = json.loads(bytes_to_str(body)) return result
[ "\n Run a MapReduce query.\n " ]
Please provide a description of the function:def get_index(self, bucket, index, startkey, endkey=None, return_terms=None, max_results=None, continuation=None, timeout=None, term_regex=None): if term_regex and not self.index_term_regex(): raise NotImplementedError("Secondary index term_regex is not " "supported on %s" % self.server_version.vstring) if timeout == 'infinity': timeout = 0 params = {'return_terms': return_terms, 'max_results': max_results, 'continuation': continuation, 'timeout': timeout, 'term_regex': term_regex} bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.index_path(bucket.name, index, startkey, endkey, bucket_type=bucket_type, **params) status, headers, body = self._request('GET', url) self.check_http_code(status, [200]) json_data = json.loads(bytes_to_str(body)) if return_terms and u'results' in json_data: results = [] for result in json_data[u'results'][:]: term, key = list(result.items())[0] results.append((decode_index_value(index, term), key),) else: results = json_data[u'keys'][:] if max_results and u'continuation' in json_data: return (results, json_data[u'continuation']) else: return (results, None)
[ "\n Performs a secondary index query.\n " ]
Please provide a description of the function:def stream_index(self, bucket, index, startkey, endkey=None, return_terms=None, max_results=None, continuation=None, timeout=None, term_regex=None): if not self.stream_indexes(): raise NotImplementedError("Secondary index streaming is not " "supported on %s" % self.server_version.vstring) if term_regex and not self.index_term_regex(): raise NotImplementedError("Secondary index term_regex is not " "supported on %s" % self.server_version.vstring) if timeout == 'infinity': timeout = 0 params = {'return_terms': return_terms, 'stream': True, 'max_results': max_results, 'continuation': continuation, 'timeout': timeout, 'term_regex': term_regex} bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.index_path(bucket.name, index, startkey, endkey, bucket_type=bucket_type, **params) status, headers, response = self._request('GET', url, stream=True) if status == 200: return HttpIndexStream(response, index, return_terms) else: raise RiakError('Error streaming secondary index.')
[ "\n Streams a secondary index query.\n " ]
Please provide a description of the function:def create_search_index(self, index, schema=None, n_val=None, timeout=None): if not self.yz_wm_index: raise NotImplementedError("Search 2.0 administration is not " "supported for this version") url = self.search_index_path(index) headers = {'Content-Type': 'application/json'} content_dict = dict() if schema: content_dict['schema'] = schema if n_val: content_dict['n_val'] = n_val if timeout: content_dict['timeout'] = timeout content = json.dumps(content_dict) # Run the request... status, _, _ = self._request('PUT', url, headers, content) if status != 204: raise RiakError('Error setting Search 2.0 index.') return True
[ "\n Create a Solr search index for Yokozuna.\n\n :param index: a name of a yz index\n :type index: string\n :param schema: XML of Solr schema\n :type schema: string\n :param n_val: N value of the write\n :type n_val: int\n :param timeout: optional timeout (in ms)\n :type timeout: integer, None\n\n :rtype boolean\n " ]
Please provide a description of the function:def get_search_index(self, index): if not self.yz_wm_index: raise NotImplementedError("Search 2.0 administration is not " "supported for this version") url = self.search_index_path(index) # Run the request... status, headers, body = self._request('GET', url) if status == 200: return json.loads(bytes_to_str(body)) else: raise RiakError('Error getting Search 2.0 index.')
[ "\n Fetch the specified Solr search index for Yokozuna.\n\n :param index: a name of a yz index\n :type index: string\n\n :rtype string\n " ]
Please provide a description of the function:def list_search_indexes(self): if not self.yz_wm_index: raise NotImplementedError("Search 2.0 administration is not " "supported for this version") url = self.search_index_path() # Run the request... status, headers, body = self._request('GET', url) if status == 200: json_data = json.loads(bytes_to_str(body)) # Return a list of dictionaries return json_data else: raise RiakError('Error getting Search 2.0 index.')
[ "\n Return a list of Solr search indexes from Yokozuna.\n\n :rtype list of dicts\n " ]
Please provide a description of the function:def delete_search_index(self, index): if not self.yz_wm_index: raise NotImplementedError("Search 2.0 administration is not " "supported for this version") url = self.search_index_path(index) # Run the request... status, _, _ = self._request('DELETE', url) if status != 204: raise RiakError('Error setting Search 2.0 index.') return True
[ "\n Fetch the specified Solr search index for Yokozuna.\n\n :param index: a name of a yz index\n :type index: string\n\n :rtype boolean\n " ]
Please provide a description of the function:def create_search_schema(self, schema, content): if not self.yz_wm_schema: raise NotImplementedError("Search 2.0 administration is not " "supported for this version") url = self.search_schema_path(schema) headers = {'Content-Type': 'application/xml'} # Run the request... status, header, body = self._request('PUT', url, headers, content) if status != 204: raise RiakError('Error creating Search 2.0 schema.') return True
[ "\n Create a new Solr schema for Yokozuna.\n\n :param schema: name of Solr schema\n :type schema: string\n :param content: actual defintion of schema (XML)\n :type content: string\n\n :rtype boolean\n " ]
Please provide a description of the function:def get_search_schema(self, schema): if not self.yz_wm_schema: raise NotImplementedError("Search 2.0 administration is not " "supported for this version") url = self.search_schema_path(schema) # Run the request... status, _, body = self._request('GET', url) if status == 200: result = {} result['name'] = schema result['content'] = bytes_to_str(body) return result else: raise RiakError('Error getting Search 2.0 schema.')
[ "\n Fetch a Solr schema from Yokozuna.\n\n :param schema: name of Solr schema\n :type schema: string\n\n :rtype dict\n " ]
Please provide a description of the function:def search(self, index, query, **params): if index is None: index = 'search' options = {} if 'op' in params: op = params.pop('op') options['q.op'] = op options.update(params) url = self.solr_select_path(index, query, **options) status, headers, data = self._request('GET', url) self.check_http_code(status, [200]) if 'json' in headers['content-type']: results = json.loads(bytes_to_str(data)) return self._normalize_json_search_response(results) elif 'xml' in headers['content-type']: return self._normalize_xml_search_response(data) else: raise ValueError("Could not decode search response")
[ "\n Performs a search query.\n " ]
Please provide a description of the function:def fulltext_add(self, index, docs): xml = Document() root = xml.createElement('add') for doc in docs: doc_element = xml.createElement('doc') for key in doc: value = doc[key] field = xml.createElement('field') field.setAttribute("name", key) text = xml.createTextNode(value) field.appendChild(text) doc_element.appendChild(field) root.appendChild(doc_element) xml.appendChild(root) self._request('POST', self.solr_update_path(index), {'Content-Type': 'text/xml'}, xml.toxml().encode('utf-8'))
[ "\n Adds documents to the search index.\n " ]
Please provide a description of the function:def fulltext_delete(self, index, docs=None, queries=None): xml = Document() root = xml.createElement('delete') if docs: for doc in docs: doc_element = xml.createElement('id') text = xml.createTextNode(doc) doc_element.appendChild(text) root.appendChild(doc_element) if queries: for query in queries: query_element = xml.createElement('query') text = xml.createTextNode(query) query_element.appendChild(text) root.appendChild(query_element) xml.appendChild(root) self._request('POST', self.solr_update_path(index), {'Content-Type': 'text/xml'}, xml.toxml().encode('utf-8'))
[ "\n Removes documents from the full-text index.\n " ]
Please provide a description of the function:def get_preflist(self, bucket, key): if not self.preflists(): raise NotImplementedError("fetching preflists is not supported.") bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.preflist_path(bucket.name, key, bucket_type=bucket_type) status, headers, body = self._request('GET', url) if status == 200: preflist = json.loads(bytes_to_str(body)) return preflist['preflist'] else: raise RiakError('Error getting bucket/key preflist.')
[ "\n Get the preflist for a bucket/key\n\n :param bucket: Riak Bucket\n :type bucket: :class:`~riak.bucket.RiakBucket`\n :param key: Riak Key\n :type key: string\n :rtype: list of dicts\n " ]
Please provide a description of the function:def release(self): if self.errored: self.pool.delete_resource(self) else: self.pool.release(self)
[ "\n Releases this resource back to the pool it came from.\n " ]
Please provide a description of the function:def acquire(self, _filter=None, default=None): if not _filter: def _filter(obj): return True elif not callable(_filter): raise TypeError("_filter is not a callable") resource = None with self.lock: for e in self.resources: if not e.claimed and _filter(e.object): resource = e break if resource is None: if default is not None: resource = Resource(default, self) else: resource = Resource(self.create_resource(), self) self.resources.append(resource) resource.claimed = True return resource
[ "\n acquire(_filter=None, default=None)\n\n Claims a resource from the pool for manual use. Resources are\n created as needed when all members of the pool are claimed or\n the pool is empty. Most of the time you will want to use\n :meth:`transaction`.\n\n :param _filter: a filter that can be used to select a member\n of the pool\n :type _filter: callable\n :param default: a value that will be used instead of calling\n :meth:`create_resource` if a new resource needs to be created\n :rtype: Resource\n " ]
Please provide a description of the function:def release(self, resource): with self.releaser: resource.claimed = False self.releaser.notify_all()
[ "release(resource)\n\n Returns a resource to the pool. Most of the time you will want\n to use :meth:`transaction`, but if you use :meth:`acquire`,\n you must release the acquired resource back to the pool when\n finished. Failure to do so could result in deadlock.\n\n :param resource: Resource\n " ]
Please provide a description of the function:def transaction(self, _filter=None, default=None, yield_resource=False): resource = self.acquire(_filter=_filter, default=default) try: if yield_resource: yield resource else: yield resource.object if resource.errored: self.delete_resource(resource) except BadResource: self.delete_resource(resource) raise finally: self.release(resource)
[ "\n transaction(_filter=None, default=None)\n\n Claims a resource from the pool for use in a thread-safe,\n reentrant manner (as part of a with statement). Resources are\n created as needed when all members of the pool are claimed or\n the pool is empty.\n\n :param _filter: a filter that can be used to select a member\n of the pool\n :type _filter: callable\n :param default: a value that will be used instead of calling\n :meth:`create_resource` if a new resource needs to be created\n :param yield_resource: set to True to yield the Resource object\n itself\n :type yield_resource: boolean\n " ]
Please provide a description of the function:def delete_resource(self, resource): with self.lock: self.resources.remove(resource) self.destroy_resource(resource.object) del resource
[ "\n Deletes the resource from the pool and destroys the associated\n resource. Not usually needed by users of the pool, but called\n internally when BadResource is raised.\n\n :param resource: the resource to remove\n :type resource: Resource\n " ]
Please provide a description of the function:def encode_timeseries_put(self, tsobj): ''' Returns an Erlang-TTB encoded tuple with the appropriate data and metadata from a TsObject. :param tsobj: a TsObject :type tsobj: TsObject :rtype: term-to-binary encoded object ''' if tsobj.columns: raise NotImplementedError('columns are not used') if tsobj.rows and isinstance(tsobj.rows, list): req_rows = [] for row in tsobj.rows: req_r = [] for cell in row: req_r.append(self.encode_to_ts_cell(cell)) req_rows.append(tuple(req_r)) req = tsputreq_a, tsobj.table.name, [], req_rows mc = MSG_CODE_TS_TTB_MSG rc = MSG_CODE_TS_TTB_MSG return Msg(mc, encode(req), rc) else: raise RiakError("TsObject requires a list of rows")
[]
Please provide a description of the function:def decode_timeseries(self, resp_ttb, tsobj, convert_timestamp=False): if resp_ttb is None: return tsobj self.maybe_err_ttb(resp_ttb) # NB: some queries return a BARE 'tsqueryresp' atom # catch that here: if resp_ttb == tsqueryresp_a: return tsobj # The response atom is the first element in the response tuple resp_a = resp_ttb[0] if resp_a == tsputresp_a: return elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a: resp_data = resp_ttb[1] if len(resp_data) == 0: return elif len(resp_data) == 3: resp_colnames = resp_data[0] resp_coltypes = resp_data[1] tsobj.columns = self.decode_timeseries_cols( resp_colnames, resp_coltypes) resp_rows = resp_data[2] tsobj.rows = [] for resp_row in resp_rows: tsobj.rows.append( self.decode_timeseries_row(resp_row, resp_coltypes, convert_timestamp)) else: raise RiakError( "Expected 3-tuple in response, got: {}".format(resp_data)) else: raise RiakError("Unknown TTB response type: {}".format(resp_a))
[ "\n Fills an TsObject with the appropriate data and\n metadata from a TTB-encoded TsGetResp / TsQueryResp.\n\n :param resp_ttb: the decoded TTB data\n :type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp\n :param tsobj: a TsObject\n :type tsobj: TsObject\n :param convert_timestamp: Convert timestamps to datetime objects\n :type tsobj: boolean\n " ]
Please provide a description of the function:def decode_timeseries_row(self, tsrow, tsct, convert_timestamp=False): row = [] for i, cell in enumerate(tsrow): if cell is None: row.append(None) elif isinstance(cell, list) and len(cell) == 0: row.append(None) else: if convert_timestamp and tsct[i] == timestamp_a: row.append(datetime_from_unix_time_millis(cell)) else: row.append(cell) return row
[ "\n Decodes a TTB-encoded TsRow into a list\n\n :param tsrow: the TTB decoded TsRow to decode.\n :type tsrow: TTB dncoded row\n :param tsct: the TTB decoded column types (atoms).\n :type tsct: list\n :param convert_timestamp: Convert timestamps to datetime objects\n :type tsobj: boolean\n :rtype list\n " ]
Please provide a description of the function:def to_op(self): if not self._adds and not self._removes: return None changes = {} if self._adds: changes['adds'] = list(self._adds) if self._removes: changes['removes'] = list(self._removes) return changes
[ "\n Extracts the modification operation from the set.\n\n :rtype: dict, None\n " ]
Please provide a description of the function:def discard(self, element): _check_element(element) self._require_context() self._removes.add(element)
[ "\n Removes an element from the set.\n\n .. note: You may remove elements from the set that are not\n present, but a context from the server is required.\n\n :param element: the element to remove\n :type element: str\n " ]
Please provide a description of the function:def getall(self, key): result = [] for k, v in self._items: if key == k: result.append(v) return result
[ "\n Return a list of all values matching the key (may be an empty list)\n " ]
Please provide a description of the function:def getone(self, key): v = self.getall(key) if not v: raise KeyError('Key not found: %r' % key) if len(v) > 1: raise KeyError('Multiple values match %r: %r' % (key, v)) return v[0]
[ "\n Get one value matching the key, raising a KeyError if multiple\n values were found.\n " ]
Please provide a description of the function:def mixed(self): result = {} multi = {} for key, value in self._items: if key in result: # We do this to not clobber any lists that are # *actual* values in this dictionary: if key in multi: result[key].append(value) else: result[key] = [result[key], value] multi[key] = None else: result[key] = value return result
[ "\n Returns a dictionary where the values are either single\n values, or a list of values when a key/value appears more than\n once in this dictionary. This is similar to the kind of\n dictionary often used to represent the variables in a web\n request.\n " ]
Please provide a description of the function:def dict_of_lists(self): result = {} for key, value in self._items: if key in result: result[key].append(value) else: result[key] = [value] return result
[ "\n Returns a dictionary where each key is associated with a\n list of values.\n " ]
Please provide a description of the function:def multiget(client, keys, **options): transient_pool = False outq = Queue() if 'pool' in options: pool = options['pool'] del options['pool'] else: pool = MultiGetPool() transient_pool = True try: pool.start() for bucket_type, bucket, key in keys: task = Task(client, outq, bucket_type, bucket, key, None, options) pool.enq(task) results = [] for _ in range(len(keys)): if pool.stopped(): raise RuntimeError( 'Multi-get operation interrupted by pool ' 'stopping!') results.append(outq.get()) outq.task_done() finally: if transient_pool: pool.stop() return results
[ "Executes a parallel-fetch across multiple threads. Returns a list\n containing :class:`~riak.riak_object.RiakObject` or\n :class:`~riak.datatypes.Datatype` instances, or 4-tuples of\n bucket-type, bucket, key, and the exception raised.\n\n If a ``pool`` option is included, the request will use the given worker\n pool and not a transient :class:`~riak.client.multi.MultiGetPool`. This\n option will be passed by the client if the ``multiget_pool_size``\n option was set on client initialization.\n\n :param client: the client to use\n :type client: :class:`~riak.client.RiakClient`\n :param keys: the keys to fetch in parallel\n :type keys: list of three-tuples -- bucket_type/bucket/key\n :param options: request options to\n :meth:`RiakBucket.get <riak.bucket.RiakBucket.get>`\n :type options: dict\n :rtype: list\n\n " ]
Please provide a description of the function:def multiput(client, objs, **options): transient_pool = False outq = Queue() if 'pool' in options: pool = options['pool'] del options['pool'] else: pool = MultiPutPool() transient_pool = True try: pool.start() for obj in objs: task = PutTask(client, outq, obj, options) pool.enq(task) results = [] for _ in range(len(objs)): if pool.stopped(): raise RuntimeError( 'Multi-put operation interrupted by pool ' 'stopping!') results.append(outq.get()) outq.task_done() finally: if transient_pool: pool.stop() return results
[ "Executes a parallel-store across multiple threads. Returns a list\n containing booleans or :class:`~riak.riak_object.RiakObject`\n\n If a ``pool`` option is included, the request will use the given worker\n pool and not a transient :class:`~riak.client.multi.MultiPutPool`. This\n option will be passed by the client if the ``multiput_pool_size``\n option was set on client initialization.\n\n :param client: the client to use\n :type client: :class:`RiakClient <riak.client.RiakClient>`\n :param objs: the objects to store in parallel\n :type objs: list of `RiakObject <riak.riak_object.RiakObject>` or\n `TsObject <riak.ts_object.TsObject>`\n :param options: request options to\n :meth:`RiakClient.put <riak.client.RiakClient.put>`\n :type options: dict\n :rtype: list\n " ]
Please provide a description of the function:def enq(self, task): if not self._stop.is_set(): self._inq.put(task) else: raise RuntimeError("Attempted to enqueue an operation while " "multi pool was shutdown!")
[ "\n Enqueues a fetch task to the pool of workers. This will raise\n a RuntimeError if the pool is stopped or in the process of\n stopping.\n\n :param task: the Task object\n :type task: Task or PutTask\n " ]
Please provide a description of the function:def start(self): # Check whether we are already started, skip if we are. if not self._started.is_set(): # If we are not started, try to capture the lock. if self._lock.acquire(False): # If we got the lock, go ahead and start the worker # threads, set the started flag, and release the lock. for i in range(self._size): name = "riak.client.multi-worker-{0}-{1}".format( self._name, i) worker = Thread(target=self._worker_method, name=name) worker.daemon = False worker.start() self._workers.append(worker) self._started.set() self._lock.release() else: # We didn't get the lock, so someone else is already # starting the worker threads. Wait until they have # signaled that the threads are started. self._started.wait()
[ "\n Starts the worker threads if they are not already started.\n This method is thread-safe and will be called automatically\n when executing an operation.\n " ]
Please provide a description of the function:def stop(self): if not self.stopped(): self._stop.set() for worker in self._workers: worker.join()
[ "\n Signals the worker threads to exit and waits on them.\n " ]
Please provide a description of the function:def _worker_method(self): while not self._should_quit(): try: task = self._inq.get(block=True, timeout=0.25) except TypeError: if self._should_quit(): break else: raise except Empty: continue try: btype = task.client.bucket_type(task.bucket_type) obj = btype.bucket(task.bucket).get(task.key, **task.options) task.outq.put(obj) except KeyboardInterrupt: raise except Exception as err: errdata = (task.bucket_type, task.bucket, task.key, err) task.outq.put(errdata) finally: self._inq.task_done()
[ "\n The body of the multi-get worker. Loops until\n :meth:`_should_quit` returns ``True``, taking tasks off the\n input queue, fetching the object, and putting them on the\n output queue.\n " ]
Please provide a description of the function:def _worker_method(self): while not self._should_quit(): try: task = self._inq.get(block=True, timeout=0.25) except TypeError: if self._should_quit(): break else: raise except Empty: continue try: obj = task.object if isinstance(obj, RiakObject): rv = task.client.put(obj, **task.options) elif isinstance(obj, TsObject): rv = task.client.ts_put(obj, **task.options) else: raise ValueError('unknown obj type: %s'.format(type(obj))) task.outq.put(rv) except KeyboardInterrupt: raise except Exception as err: errdata = (task.object, err) task.outq.put(errdata) finally: self._inq.task_done()
[ "\n The body of the multi-put worker. Loops until\n :meth:`_should_quit` returns ``True``, taking tasks off the\n input queue, storing the object, and putting the result on\n the output queue.\n " ]
Please provide a description of the function:def _check_key(self, key): if not len(key) == 2: raise TypeError('invalid key: %r' % key) elif key[1] not in TYPES: raise TypeError('invalid datatype: %s' % key[1])
[ "\n Ensures well-formedness of a key.\n " ]
Please provide a description of the function:def value(self): pvalue = {} for key in self._value: pvalue[key] = self._value[key].value return pvalue
[ "\n Returns a copy of the original map's value. Nested values are\n pure Python values as returned by :attr:`Datatype.value` from\n the nested types.\n\n :rtype: dict\n " ]
Please provide a description of the function:def modified(self): if self._removes: return True for v in self._value: if self._value[v].modified: return True for v in self._updates: if self._updates[v].modified: return True return False
[ "\n Whether the map has staged local modifications.\n " ]
Please provide a description of the function:def to_op(self): removes = [('remove', r) for r in self._removes] value_updates = list(self._extract_updates(self._value)) new_updates = list(self._extract_updates(self._updates)) all_updates = removes + value_updates + new_updates if all_updates: return all_updates else: return None
[ "\n Extracts the modification operation(s) from the map.\n\n :rtype: list, None\n " ]
Please provide a description of the function:def _format_python2_or_3(self): pb_files = set() with open(self.source, 'r', buffering=1) as csvfile: reader = csv.reader(csvfile) for row in reader: _, _, proto = row pb_files.add('riak/pb/{0}_pb2.py'.format(proto)) for im in sorted(pb_files): with open(im, 'r', buffering=1) as pbfile: contents = 'from six import *\n' + pbfile.read() contents = re.sub(r'riak_pb2', r'riak.pb.riak_pb2', contents) # Look for this pattern in the protoc-generated file: # # class RpbCounterGetResp(_message.Message): # __metaclass__ = _reflection.GeneratedProtocolMessageType # # and convert it to: # # @add_metaclass(_reflection.GeneratedProtocolMessageType) # class RpbCounterGetResp(_message.Message): contents = re.sub( r'class\s+(\S+)\((\S+)\):\s*\n' '\s+__metaclass__\s+=\s+(\S+)\s*\n', r'@add_metaclass(\3)\nclass \1(\2):\n', contents) with open(im, 'w', buffering=1) as pbfile: pbfile.write(contents)
[ "\n Change the PB files to use full pathnames for Python 3.x\n and modify the metaclasses to be version agnostic\n " ]
Please provide a description of the function:def reload(self, **params): if not self.bucket: raise ValueError('bucket property not assigned') if not self.key: raise ValueError('key property not assigned') dtype, value, context = self.bucket._client._fetch_datatype( self.bucket, self.key, **params) if not dtype == self.type_name: raise TypeError("Expected datatype {} but " "got datatype {}".format(self.__class__, TYPES[dtype])) self.clear() self._context = context self._set_value(value) return self
[ "\n Reloads the datatype from Riak.\n\n .. warning: This clears any local modifications you might have\n made.\n\n :param r: the read quorum\n :type r: integer, string, None\n :param pr: the primary read quorum\n :type pr: integer, string, None\n :param basic_quorum: whether to use the \"basic quorum\" policy\n for not-founds\n :type basic_quorum: bool\n :param notfound_ok: whether to treat not-found responses as successful\n :type notfound_ok: bool\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :param include_context: whether to return the opaque context\n as well as the value, which is useful for removal operations\n on sets and maps\n :type include_context: bool\n :rtype: :class:`Datatype`\n " ]
Please provide a description of the function:def delete(self, **params): self.clear() self._context = None self._set_value(self._default_value()) self.bucket._client.delete(self, **params) return self
[ "\n Deletes the datatype from Riak. See :meth:`RiakClient.delete()\n <riak.client.RiakClient.delete>` for options.\n " ]
Please provide a description of the function:def update(self, **params): if not self.modified: raise ValueError("No operation to perform") params.setdefault('return_body', True) self.bucket._client.update_datatype(self, **params) self.clear() return self
[ "\n Sends locally staged mutations to Riak.\n\n :param w: W-value, wait for this many partitions to respond\n before returning to client.\n :type w: integer\n :param dw: DW-value, wait for this many partitions to\n confirm the write before returning to client.\n :type dw: integer\n :param pw: PW-value, require this many primary partitions to\n be available before performing the put\n :type pw: integer\n :param return_body: if the newly stored object should be\n retrieved, defaults to True\n :type return_body: bool\n :param include_context: whether to return the new opaque\n context when `return_body` is `True`\n :type include_context: bool\n :param timeout: a timeout value in milliseconds\n :type timeout: int\n :rtype: a subclass of :class:`~riak.datatypes.Datatype`\n " ]
Please provide a description of the function:def encode_quorum(self, rw): if rw in QUORUM_TO_PB: return QUORUM_TO_PB[rw] elif type(rw) is int and rw >= 0: return rw else: return None
[ "\n Converts a symbolic quorum value into its on-the-wire\n equivalent.\n\n :param rw: the quorum\n :type rw: string, integer\n :rtype: integer\n " ]
Please provide a description of the function:def decode_contents(self, contents, obj): obj.siblings = [self.decode_content(c, RiakContent(obj)) for c in contents] # Invoke sibling-resolution logic if len(obj.siblings) > 1 and obj.resolver is not None: obj.resolver(obj) return obj
[ "\n Decodes the list of siblings from the protobuf representation\n into the object.\n\n :param contents: a list of RpbContent messages\n :type contents: list\n :param obj: a RiakObject\n :type obj: RiakObject\n :rtype RiakObject\n " ]
Please provide a description of the function:def decode_content(self, rpb_content, sibling): if rpb_content.HasField("deleted") and rpb_content.deleted: sibling.exists = False else: sibling.exists = True if rpb_content.HasField("content_type"): sibling.content_type = bytes_to_str(rpb_content.content_type) if rpb_content.HasField("charset"): sibling.charset = bytes_to_str(rpb_content.charset) if rpb_content.HasField("content_encoding"): sibling.content_encoding = \ bytes_to_str(rpb_content.content_encoding) if rpb_content.HasField("vtag"): sibling.etag = bytes_to_str(rpb_content.vtag) sibling.links = [self.decode_link(link) for link in rpb_content.links] if rpb_content.HasField("last_mod"): sibling.last_modified = float(rpb_content.last_mod) if rpb_content.HasField("last_mod_usecs"): sibling.last_modified += rpb_content.last_mod_usecs / 1000000.0 sibling.usermeta = dict([(bytes_to_str(usermd.key), bytes_to_str(usermd.value)) for usermd in rpb_content.usermeta]) sibling.indexes = set([(bytes_to_str(index.key), decode_index_value(index.key, index.value)) for index in rpb_content.indexes]) sibling.encoded_data = rpb_content.value return sibling
[ "\n Decodes a single sibling from the protobuf representation into\n a RiakObject.\n\n :param rpb_content: a single RpbContent message\n :type rpb_content: riak.pb.riak_pb2.RpbContent\n :param sibling: a RiakContent sibling container\n :type sibling: RiakContent\n :rtype: RiakContent\n " ]
Please provide a description of the function:def encode_content(self, robj, rpb_content): if robj.content_type: rpb_content.content_type = str_to_bytes(robj.content_type) if robj.charset: rpb_content.charset = str_to_bytes(robj.charset) if robj.content_encoding: rpb_content.content_encoding = str_to_bytes(robj.content_encoding) for uk in robj.usermeta: pair = rpb_content.usermeta.add() pair.key = str_to_bytes(uk) pair.value = str_to_bytes(robj.usermeta[uk]) for link in robj.links: pb_link = rpb_content.links.add() try: bucket, key, tag = link except ValueError: raise RiakError("Invalid link tuple %s" % link) pb_link.bucket = str_to_bytes(bucket) pb_link.key = str_to_bytes(key) if tag: pb_link.tag = str_to_bytes(tag) else: pb_link.tag = str_to_bytes('') for field, value in robj.indexes: pair = rpb_content.indexes.add() pair.key = str_to_bytes(field) pair.value = str_to_bytes(str(value)) # Python 2.x data is stored in a string if six.PY2: rpb_content.value = str(robj.encoded_data) else: rpb_content.value = robj.encoded_data
[ "\n Fills an RpbContent message with the appropriate data and\n metadata from a RiakObject.\n\n :param robj: a RiakObject\n :type robj: RiakObject\n :param rpb_content: the protobuf message to fill\n :type rpb_content: riak.pb.riak_pb2.RpbContent\n " ]
Please provide a description of the function:def decode_link(self, link): if link.HasField("bucket"): bucket = bytes_to_str(link.bucket) else: bucket = None if link.HasField("key"): key = bytes_to_str(link.key) else: key = None if link.HasField("tag"): tag = bytes_to_str(link.tag) else: tag = None return (bucket, key, tag)
[ "\n Decodes an RpbLink message into a tuple\n\n :param link: an RpbLink message\n :type link: riak.pb.riak_pb2.RpbLink\n :rtype tuple\n " ]