Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def add_localfile_for_md5_check( self, key, lpath, fpath, remote_md5, mode, lpview): # type: (LocalFileMd5Offload, str, str, str, str, # blobxfer.models.azure.StorageModes, object) -> None if blobxfer.util.is_none_or_empty(remote_md5): raise ValueError('comparison MD5 is empty for file {}'.format( lpath)) if mode == blobxfer.models.azure.StorageModes.Page: pagealign = True else: pagealign = False self._task_queue.put( (key, lpath, fpath, remote_md5, pagealign, lpview) )
[ "Add a local file to MD5 check queue\n :param LocalFileMd5Offload self: this\n :param str key: md5 map key\n :param str lpath: \"local\" path for descriptor\n :param str fpath: \"final\" path for/where file\n :param str remote_md5: remote MD5 to compare against\n :param blobxfer.models.azure.StorageModes mode: mode\n :param object lpview: local path view\n " ]
Please provide a description of the function:def populate_from_blob(self, sa, blob, vio=None, store_raw_metadata=False): # type: (StorageEntity, blobxfer.operations.azure.StorageAccount, # azure.storage.blob.models.Blob) -> None if store_raw_metadata: self._raw_metadata = blob.metadata else: self._fileattr = blobxfer.models.metadata.fileattr_from_metadata( blob.metadata) self._vio = vio self._can_create_containers = sa.can_create_containers self._name = blob.name self._snapshot = blob.snapshot self._lmt = blob.properties.last_modified self._size = blob.properties.content_length self._md5 = blob.properties.content_settings.content_md5 self._cache_control = blob.properties.content_settings.cache_control if blob.properties.blob_type == BlobTypes.AppendBlob: self._mode = StorageModes.Append self._client = sa.append_blob_client elif blob.properties.blob_type == BlobTypes.BlockBlob: self._access_tier = blob.properties.blob_tier self._mode = StorageModes.Block self._client = sa.block_blob_client elif blob.properties.blob_type == BlobTypes.PageBlob: self._mode = StorageModes.Page self._client = sa.page_blob_client
[ "Populate properties from Blob\n :param StorageEntity self: this\n :param blobxfer.operations.azure.StorageAccount sa: storage account\n :param azure.storage.blob.models.Blob blob: blob to populate from\n :param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe\n :param bool store_raw_metadata: store raw metadata\n " ]
Please provide a description of the function:def populate_from_file( self, sa, file, path, vio=None, store_raw_metadata=False, snapshot=None): # type: (StorageEntity, blobxfer.operations.azure.StorageAccount, # azure.storage.file.models.File, str, # blobxfer.models.metadata.VectoredStripe, bool, str) -> None if store_raw_metadata: self._raw_metadata = file.metadata else: self._fileattr = blobxfer.models.metadata.fileattr_from_metadata( file.metadata) self._vio = vio self._can_create_containers = sa.can_create_containers if path is not None: self._name = str(pathlib.Path(path) / file.name) else: self._name = file.name self._snapshot = snapshot self._lmt = file.properties.last_modified self._size = file.properties.content_length self._md5 = file.properties.content_settings.content_md5 self._cache_control = file.properties.content_settings.cache_control self._mode = StorageModes.File self._client = sa.file_client
[ "Populate properties from File\n :param StorageEntity self: this\n :param blobxfer.operations.azure.StorageAccount sa: storage account\n :param azure.storage.file.models.File file: file to populate from\n :param str path: full path to file\n :param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe\n :param bool store_raw_metadata: store raw metadata\n :param str snapshot: snapshot\n " ]
Please provide a description of the function:def populate_from_local(self, sa, container, path, mode, cache_control): # type: (StorageEntity, blobxfer.operations.azure.StorageAccount # str, str, blobxfer.models.azure.StorageModes, str) -> None self._can_create_containers = sa.can_create_containers self._container = container self._name = path self._mode = mode self._cache_control = cache_control self._from_local = True if mode == StorageModes.Append: self._client = sa.append_blob_client elif mode == StorageModes.Block: self._client = sa.block_blob_client elif mode == StorageModes.File: self._client = sa.file_client elif mode == StorageModes.Page: self._client = sa.page_blob_client elif mode == StorageModes.Auto: name = self.name.lower() if name.endswith('.vhd') or name.endswith('.vhdx'): self._client = sa.page_blob_client self._mode = StorageModes.Page else: self._client = sa.block_blob_client self._mode = StorageModes.Block
[ "Populate properties from local\n :param StorageEntity self: this\n :param blobxfer.operations.azure.StorageAccount sa: storage account\n :param str container: container\n :param str path: full path to file\n :param blobxfer.models.azure.StorageModes mode: storage mode\n :param str cache_control: cache control\n " ]
Please provide a description of the function:def add_includes(self, includes): # type: (_BaseSourcePaths, list) -> None if not isinstance(includes, list): if isinstance(includes, tuple): includes = list(includes) else: includes = [includes] # remove any starting rglob spec incl = [] for inc in includes: tmp = pathlib.Path(inc).parts if tmp[0] == '**': if len(tmp) == 1: continue else: incl.append(str(pathlib.Path(*tmp[1:]))) else: incl.append(inc) # check for any remaining rglob specs if any(['**' in x for x in incl]): raise ValueError('invalid include specification containing "**"') if self._include is None: self._include = incl else: self._include.extend(incl)
[ "Add a list of includes\n :param _BaseSourcePaths self: this\n :param list includes: list of includes\n " ]
Please provide a description of the function:def add_excludes(self, excludes): # type: (_BaseSourcePaths, list) -> None if not isinstance(excludes, list): if isinstance(excludes, tuple): excludes = list(excludes) else: excludes = [excludes] # remove any starting rglob spec excl = [] for exc in excludes: tmp = pathlib.Path(exc).parts if tmp[0] == '**': if len(tmp) == 1: continue else: excl.append(str(pathlib.Path(*tmp[1:]))) else: excl.append(exc) # check for any remaining rglob specs if any(['**' in x for x in excl]): raise ValueError('invalid exclude specification containing "**"') if self._exclude is None: self._exclude = excl else: self._exclude.extend(excl)
[ "Add a list of excludes\n :param _BaseSourcePaths self: this\n :param list excludes: list of excludes\n " ]
Please provide a description of the function:def add_path(self, path): # type: (_BaseSourcePaths, str) -> None if isinstance(path, pathlib.Path): self._paths.append(path) else: self._paths.append(pathlib.Path(path))
[ "Add a local path\n :param _BaseSourcePaths self: this\n :param str path: path to add\n " ]
Please provide a description of the function:def _inclusion_check(self, path): # type: (_BaseSourcePaths, pathlib.Path) -> bool _spath = str(path) inc = True if self._include is not None: inc = any([fnmatch.fnmatch(_spath, x) for x in self._include]) if inc and self._exclude is not None: inc = not any([fnmatch.fnmatch(_spath, x) for x in self._exclude]) return inc
[ "Check file for inclusion against filters\n :param _BaseSourcePaths self: this\n :param pathlib.Path path: path to check\n :rtype: bool\n :return: if file should be included\n " ]
Please provide a description of the function:def add_storage_account(self, name, key, endpoint): # type: (StorageCredentials, str, str, str) -> None if name in self._storage_accounts: raise ValueError( '{} already exists in storage accounts'.format(name)) self._storage_accounts[name] = StorageAccount( name, key, endpoint, self._general_options.concurrency.transfer_threads, self._general_options.timeout, self._general_options.proxy, )
[ "Add a storage account\n :param StorageCredentials self: this\n :param str name: name of storage account to store\n :param str key: storage key or sas\n :param str endpoint: endpoint\n " ]
Please provide a description of the function:def endpoint(self, value): # type: (StorageAccount, str) -> None tmp = value.split('.') if (len(tmp) <= 1 or not tmp[0].isalnum()): raise ValueError('endpoint is invalid: {}'.format(value)) self._endpoint = value
[ "Set endpoint\n :param StorageAccount self: this\n :param str value: endpoint\n " ]
Please provide a description of the function:def _key_is_sas(key): # type: (str) -> bool # keys starting with ? are sas keys as ? is not in the base-64 # character range if key.startswith('?'): return True else: # & is not in the base-64 character range, so technically # the presence of this character means the key is a sas. however, # perform a stronger check for the sig= parameter. tmp = key.split('&') if len(tmp) == 1: return False elif any(x.startswith('sig=') for x in tmp): return True return False
[ "Determine if key is a sas\n :param str key: key to parse\n :rtype: bool\n :return: if key is a sas\n " ]
Please provide a description of the function:def _container_manipulation_allowed(self): # type: (StorageAccount) -> bool if self.is_sas: # search for account sas "c" resource sasparts = self.key.split('&') for part in sasparts: tmp = part.split('=') if tmp[0] == 'srt': return 'c' in tmp[1] # this is a sas without the srt parameter, so this must be # a service-level sas which doesn't allow container manipulation return False else: # storage account key always allows container manipulation return True
[ "Check if container manipulation is allowed\n :param StorageAccount self: this\n :rtype: bool\n :return: if container manipulation is allowed\n " ]
Please provide a description of the function:def _ensure_object_manipulation_allowed(self): # type: (StorageAccount) -> bool if self.is_sas: # search for account sas "o" resource sasparts = self.key.split('&') for part in sasparts: tmp = part.split('=') if tmp[0] == 'srt': return 'o' in tmp[1] # this is a sas without the srt parameter, so this must be # a service-level sas which always allows object manipulation return True else: # storage account key always allows object manipulation return True
[ "Check if object manipulation is allowed\n :param StorageAccount self: this\n :rtype: bool\n :return: if object manipulation is allowed\n " ]
Please provide a description of the function:def _credential_allows_container_list(self): # type: (StorageAccount) -> bool if self.is_sas: sasparts = self.key.split('&') caccess = self.can_create_containers # search for container signed resource for service level sas if not caccess: for part in sasparts: tmp = part.split('=') if tmp[0] == 'sr': caccess = 'c' in tmp[1] or 's' in tmp[1] break elif tmp[0] == 'si': # assume sas policies allow container list return True # search for list permission if caccess: for part in sasparts: tmp = part.split('=') if tmp[0] == 'sp': return 'l' in tmp[1] # sas doesn't allow container level list return False else: # storage account key always allows container list return True
[ "Check if container list is allowed\n :param StorageAccount self: this\n :rtype: bool\n :return: if container list is allowed\n " ]
Please provide a description of the function:def _create_clients(self, timeout, proxy): # type: (StorageAccount, blobxfer.models.options.Timeout, # blobxfer.models.options.HttpProxy) -> None self._append_blob_client = \ blobxfer.operations.azure.blob.append.create_client( self, timeout, proxy) self._block_blob_client = \ blobxfer.operations.azure.blob.block.create_client( self, timeout, proxy) self._file_client = blobxfer.operations.azure.file.create_client( self, timeout, proxy) self._page_blob_client = \ blobxfer.operations.azure.blob.page.create_client( self, timeout, proxy)
[ "Create Azure Storage clients\n :param StorageAccount self: this\n :param blobxfer.models.options.Timeout timeout: timeout\n :param blobxfer.models.options.HttpProxy proxy: proxy\n " ]
Please provide a description of the function:def add_path_with_storage_account(self, remote_path, storage_account): # type: (SourcePath, str, str) -> None if len(self._path_map) >= 1: raise RuntimeError( 'cannot add multiple remote paths to SourcePath objects') rpath = blobxfer.util.normalize_azure_path(remote_path) self.add_path(rpath) self._path_map[rpath] = storage_account
[ "Add a path with an associated storage account\n :param SourcePath self: this\n :param str remote_path: remote path\n :param str storage_account: storage account to associate with path\n " ]
Please provide a description of the function:def files(self, creds, options, dry_run): # type: (SourcePath, StorageCredentials, # blobxfer.models.options.Download, bool) -> StorageEntity if options.mode == blobxfer.models.azure.StorageModes.File: for file in self._populate_from_list_files( creds, options, dry_run): yield file else: for blob in self._populate_from_list_blobs( creds, options, dry_run): yield blob
[ "Generator of Azure remote files or blobs\n :param SourcePath self: this\n :param StorageCredentials creds: storage creds\n :param blobxfer.models.options.Download options: download options\n :param bool dry_run: dry run\n :rtype: StorageEntity\n :return: Azure storage entity object\n " ]
Please provide a description of the function:def _convert_to_storage_entity_with_encryption_metadata( self, options, store_raw_metadata, sa, entity, vio, is_file, container, dir, file_snapshot): # type: (SourcePath, StorageCredentials, any, bool, StorageAccount, # any, blobxfer.models.metadata.VectoredStripe, bool, str, # str) -> StorageEntity if (not store_raw_metadata and blobxfer.models.crypto.EncryptionMetadata. encryption_metadata_exists(entity.metadata)): ed = blobxfer.models.crypto.EncryptionMetadata() ed.convert_from_json( entity.metadata, entity.name, options.rsa_private_key) else: ed = None ase = blobxfer.models.azure.StorageEntity(container, ed) if is_file: ase.populate_from_file( sa, entity, dir, vio=vio, store_raw_metadata=store_raw_metadata, snapshot=file_snapshot) else: ase.populate_from_blob( sa, entity, vio=vio, store_raw_metadata=store_raw_metadata) return ase
[ "Convert entity into StorageEntity with encryption metadata if avail\n :param SourcePath self: this\n :param StorageCredentials creds: storage creds\n :param object options: download or synccopy options\n :param bool store_raw_metadata: store raw metadata\n :param StorageAccount sa: storage account\n :param object entity: Storage File or Blob object\n :param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe\n :param bool is_file: is a file object\n :param str container: container\n :param str dir: Azure File directory structure\n :rtype: StorageEntity\n :return: Azure storage entity object\n " ]
Please provide a description of the function:def _handle_vectored_io_stripe( self, creds, options, store_raw_metadata, sa, entity, is_file, container, dir=None, file_snapshot=None): # type: (SourcePath, StorageCredentials, any, bool, StorageAccount, # any, bool, str, str) -> StorageEntity vio = blobxfer.models.metadata.vectored_io_from_metadata( entity.metadata) if not isinstance(vio, blobxfer.models.metadata.VectoredStripe): ase = self._convert_to_storage_entity_with_encryption_metadata( options, store_raw_metadata, sa, entity, None, is_file, container, dir, file_snapshot) yield ase return # if this slice is not the first, ignore. the reason for this is # 1. Ensures direct get on a slice does nothing unless the # zero-th blob is retrieved/accessed (eliminates partial data # download), which will reconstruct all of the stripes via next # pointers # 2. Data is not retrieved multiple times for the same slice without # having to maintain a fetched map if vio.slice_id != 0: yield None return # yield this entity ase = self._convert_to_storage_entity_with_encryption_metadata( options, store_raw_metadata, sa, entity, vio, is_file, container, dir, file_snapshot) yield ase # iterate all slices while vio.next is not None: # follow next pointer sa = creds.get_storage_account(vio.next.storage_account_name) if is_file: entity = blobxfer.operations.azure.file.get_file_properties( sa.file_client, vio.next.container, vio.next.name, snapshot=file_snapshot) _, dir = blobxfer.util.explode_azure_path(vio.next.name) else: entity = blobxfer.operations.azure.blob.get_blob_properties( sa.block_blob_client, vio.next.container, vio.next.name, ase.mode) vio = blobxfer.models.metadata.vectored_io_from_metadata( entity.metadata) # yield next ase = self._convert_to_storage_entity_with_encryption_metadata( options, store_raw_metadata, sa, entity, vio, is_file, container, dir, file_snapshot) yield ase
[ "Handle Vectored IO stripe entries\n :param SourcePath self: this\n :param StorageCredentials creds: storage creds\n :param object options: download or synccopy options\n :param bool store_raw_metadata: store raw metadata\n :param StorageAccount sa: storage account\n :param object entity: Storage File or Blob object\n :param bool is_file: is a file object\n :param str container: container\n :param str dir: Azure File directory structure\n :rtype: StorageEntity\n :return: Azure storage entity object\n " ]
Please provide a description of the function:def _populate_from_list_files(self, creds, options, dry_run): # type: (SourcePath, StorageCredentials, Any, bool) -> StorageEntity store_raw_metadata = isinstance( options, blobxfer.models.options.SyncCopy) for _path in self._paths: rpath = str(_path) sa = creds.get_storage_account(self.lookup_storage_account(rpath)) # ensure at least read permissions if not sa.can_read_object: raise RuntimeError( 'unable to populate sources for remote path {} as ' 'credential for storage account {} does not permit read ' 'access'.format(rpath, sa.name)) cont, dir = blobxfer.util.explode_azure_path(rpath) snapshot = None if dir is not None: # parse out snapshot if part of url a, b, snapshot = \ blobxfer.operations.azure.file.parse_file_path(dir) # check if "dir" is a file or a vdir chk = blobxfer.operations.azure.file.check_if_single_file( sa.file_client, cont, dir) if chk[0]: dir = a else: if blobxfer.util.is_not_empty(a): dir = '/'.join((a, b)) else: dir = b if snapshot is None: _, cont, snapshot = \ blobxfer.operations.azure.file.parse_file_path(cont) if sa.can_list_container_objects: for file in blobxfer.operations.azure.file.list_files( sa.file_client, cont, dir, options.recursive, snapshot=snapshot): if not self._inclusion_check(file.name): if dry_run: logger.info( '[DRY RUN] skipping due to filters: ' '{}/{}'.format(cont, file.name)) continue for ase in self._handle_vectored_io_stripe( creds, options, store_raw_metadata, sa, file, True, cont, dir=None, file_snapshot=snapshot): if ase is None: continue yield ase else: file = blobxfer.operations.azure.file.get_file_properties( sa.file_client, cont, dir, snapshot=snapshot) if file is None: logger.error( 'file {} not found in storage account {}'.format( rpath, sa.name)) return if not self._inclusion_check(file.name): if dry_run: logger.info( '[DRY RUN] skipping due to filters: {}/{}'.format( cont, file.name)) return for ase in self._handle_vectored_io_stripe( creds, options, store_raw_metadata, sa, file, True, cont, dir=None, file_snapshot=snapshot): if ase is None: continue yield ase
[ "Internal generator for Azure remote files\n :param SourcePath self: this\n :param StorageCredentials creds: storage creds\n :param object options: download or synccopy options\n :param bool dry_run: dry run\n :rtype: StorageEntity\n :return: Azure storage entity object\n " ]
Please provide a description of the function:def _populate_from_list_blobs(self, creds, options, dry_run): # type: (SourcePath, StorageCredentials, Any, bool) -> StorageEntity is_synccopy = isinstance(options, blobxfer.models.options.SyncCopy) for _path in self._paths: rpath = str(_path) sa = creds.get_storage_account(self.lookup_storage_account(rpath)) # ensure at least read permissions if not sa.can_read_object: raise RuntimeError( 'unable to populate sources for remote path {} as ' 'credential for storage account {} does not permit read ' 'access'.format(rpath, sa.name)) cont, dir = blobxfer.util.explode_azure_path(rpath) if sa.can_list_container_objects: for blob in blobxfer.operations.azure.blob.list_blobs( sa.block_blob_client, cont, dir, options.mode, options.recursive): # check for virtual directory placeholder if not is_synccopy: try: if (blob.metadata[ _METADATA_VIRTUAL_DIRECTORY] == 'true'): continue except KeyError: pass if not self._inclusion_check(blob.name): if dry_run: logger.info( '[DRY RUN] skipping due to filters: ' '{}/{}'.format(cont, blob.name)) continue for ase in self._handle_vectored_io_stripe( creds, options, is_synccopy, sa, blob, False, cont): if ase is None: continue yield ase else: blob = blobxfer.operations.azure.blob.get_blob_properties( sa.block_blob_client, cont, dir, options.mode) if blob is None: logger.error( 'blob {} not found in storage account {}'.format( rpath, sa.name)) return if not self._inclusion_check(blob.name): if dry_run: logger.info( '[DRY RUN] skipping due to filters: {}/{}'.format( cont, blob.name)) return for ase in self._handle_vectored_io_stripe( creds, options, is_synccopy, sa, blob, False, cont): if ase is None: continue yield ase
[ "Internal generator for Azure remote blobs\n :param SourcePath self: this\n :param StorageCredentials creds: storage creds\n :param object options: download or synccopy options\n :param bool dry_run: dry run\n :rtype: StorageEntity\n :return: Azure storage entity object\n " ]
Please provide a description of the function:def create_client(storage_account, timeout, proxy): # type: (blobxfer.operations.azure.StorageAccount, # blobxfer.models.options.Timeout, # blobxfer.models.options.HttpProxy) -> FileService if storage_account.is_sas: client = azure.storage.file.FileService( account_name=storage_account.name, sas_token=storage_account.key, endpoint_suffix=storage_account.endpoint, request_session=storage_account.session, socket_timeout=timeout.timeout) else: client = azure.storage.file.FileService( account_name=storage_account.name, account_key=storage_account.key, endpoint_suffix=storage_account.endpoint, request_session=storage_account.session, socket_timeout=timeout.timeout) # set proxy if proxy is not None: client.set_proxy( proxy.host, proxy.port, proxy.username, proxy.password) # set retry policy client.retry = blobxfer.retry.ExponentialRetryWithMaxWait( max_retries=timeout.max_retries).retry return client
[ "Create file client\n :param blobxfer.operations.azure.StorageAccount storage_account:\n storage account\n :param blobxfer.models.options.Timeout timeout: timeout\n :param blobxfer.models.options.HttpProxy proxy: proxy\n :rtype: FileService\n :return: file service client\n " ]
Please provide a description of the function:def parse_file_path(filepath): # type: (pathlib.Path) -> Tuple[str, str, str] if not isinstance(filepath, pathlib.Path): filepath = pathlib.Path(filepath) dirname = '/'.join(filepath.parts[:len(filepath.parts) - 1]) if len(dirname) == 0: dirname = None if len(filepath.parts) > 0: fname = filepath.parts[-1] else: fname = None fname, snapshot = blobxfer.util.parse_fileshare_or_file_snapshot_parameter( fname) return (dirname, fname, snapshot)
[ "Parse file path from file path\n :param str filepath: file path\n :rtype: tuple\n :return: (dirname, rest of path, snapshot)\n " ]
Please provide a description of the function:def get_file_properties( client, fileshare, prefix, timeout=None, snapshot=None): # type: (azure.storage.file.FileService, str, str, int, str) -> # azure.storage.file.models.File dirname, fname, ss = parse_file_path(prefix) if ss is not None: if snapshot is not None: raise RuntimeError( 'snapshot specified as {} but parsed {} from prefix {}'.format( snapshot, ss, prefix)) else: snapshot = ss try: return client.get_file_properties( share_name=fileshare, directory_name=dirname, file_name=fname, timeout=timeout, snapshot=snapshot, ) except azure.common.AzureMissingResourceHttpError: return None
[ "Get file properties\n :param FileService client: blob client\n :param str fileshare: file share name\n :param str prefix: path prefix\n :param int timeout: timeout\n :param str snapshot: snapshot\n :rtype: azure.storage.file.models.File\n :return: file properties\n " ]
Please provide a description of the function:def check_if_single_file(client, fileshare, prefix, timeout=None): # type: (azure.storage.file.FileService, str, str, int) -> # Tuple[bool, azure.storage.file.models.File] if blobxfer.util.is_none_or_empty(prefix): return (False, None) file = get_file_properties(client, fileshare, prefix, timeout) if file is None: return (False, file) else: return (True, file)
[ "Check if prefix is a single file or multiple files\n :param FileService client: blob client\n :param str fileshare: file share name\n :param str prefix: path prefix\n :param int timeout: timeout\n :rtype: tuple\n :return: (if prefix in fileshare is a single file, file)\n " ]
Please provide a description of the function:def list_files( client, fileshare, prefix, recursive, timeout=None, snapshot=None): # type: (azure.storage.file.FileService, str, str, bool, int, str) -> # azure.storage.file.models.File # if single file, then yield file and return _check = check_if_single_file(client, fileshare, prefix, timeout) if _check[0]: yield _check[1] return # get snapshot from fileshare if snapshot is None: fileshare, snapshot = \ blobxfer.util.parse_fileshare_or_file_snapshot_parameter(fileshare) # get snapshot from prefix if snapshot is None: prefix, snapshot = \ blobxfer.util.parse_fileshare_or_file_snapshot_parameter( prefix) # else recursively list from prefix path dirs = [prefix] while len(dirs) > 0: dir = dirs.pop() files = client.list_directories_and_files( share_name=fileshare, directory_name=dir, timeout=timeout, snapshot=snapshot, ) for file in files: fspath = str( pathlib.Path(dir if dir is not None else '') / file.name) if type(file) == azure.storage.file.models.File: fsprop = client.get_file_properties( share_name=fileshare, directory_name=None, file_name=fspath, timeout=timeout, snapshot=snapshot, ) yield fsprop else: if recursive: dirs.append(fspath)
[ "List files in path\n :param azure.storage.file.FileService client: file client\n :param str fileshare: file share\n :param str prefix: path prefix\n :param bool recursive: recursive\n :param int timeout: timeout\n :param str snapshot: snapshot\n :rtype: azure.storage.file.models.File\n :return: generator of files\n " ]
Please provide a description of the function:def list_all_files(client, fileshare, timeout=None): # type: (azure.storage.file.FileService, str, int) -> str dirs = [None] while len(dirs) > 0: dir = dirs.pop() files = client.list_directories_and_files( share_name=fileshare, directory_name=dir, timeout=timeout, ) for file in files: fspath = str( pathlib.Path(dir if dir is not None else '') / file.name) if type(file) == azure.storage.file.models.File: yield fspath else: dirs.append(fspath)
[ "List all files in share\n :param azure.storage.file.FileService client: file client\n :param str fileshare: file share\n :param int timeout: timeout\n :rtype: str\n :return: file name\n " ]
Please provide a description of the function:def delete_file(client, fileshare, name, timeout=None): # type: (azure.storage.file.FileService, str, str, int) -> None dir, fpath, snapshot = parse_file_path(name) if blobxfer.util.is_not_empty(snapshot): raise RuntimeError( 'attempting to delete single file snapshot: {}/{}'.format( fileshare, name)) client.delete_file( share_name=fileshare, directory_name=dir, file_name=fpath, timeout=timeout, )
[ "Delete file from share\n :param azure.storage.file.FileService client: file client\n :param str fileshare: file share\n :param str name: file name\n :param int timeout: timeout\n " ]
Please provide a description of the function:def get_file_range(ase, offsets, timeout=None): # type: (blobxfer.models.azure.StorageEntity, # blobxfer.models.download.Offsets, int) -> bytes dir, fpath, _ = parse_file_path(ase.name) return ase.client._get_file( share_name=ase.container, directory_name=dir, file_name=fpath, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, # HTTPS takes care of integrity during xfer timeout=timeout, snapshot=ase.snapshot, ).content
[ "Retrieve file range\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param blobxfer.models.download.Offsets offsets: download offsets\n :param int timeout: timeout\n :rtype: bytes\n :return: content for file range\n " ]
Please provide a description of the function:def create_share(ase, containers_created, timeout=None): # type: (blobxfer.models.azure.StorageEntity, dict, int) -> None # check if auth allows create container if not ase.can_create_containers: return key = ase.client.account_name + ':file=' + ase.container if key in containers_created: return if ase.client.create_share( share_name=ase.container, fail_on_exist=False, timeout=timeout): logger.info('created file share {} on storage account {}'.format( ase.container, ase.client.account_name)) # always add to set (as it could be pre-existing) containers_created.add(key)
[ "Create file share\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param dict containers_created: containers already created map\n :param int timeout: timeout\n " ]
Please provide a description of the function:def create_all_parent_directories(ase, dirs_created, timeout=None): # type: (blobxfer.models.azure.StorageEntity, dict, int) -> None dirs = pathlib.Path(ase.name).parts if len(dirs) <= 1: return # remove last part (which is the file) dirs = dirs[:-1] dk = ase.client.account_name + ':' + ase.container for i in range(0, len(dirs)): dir = str(pathlib.Path(*(dirs[0:i + 1]))) if dk not in dirs_created or dir not in dirs_created[dk]: ase.client.create_directory( share_name=ase.container, directory_name=dir, fail_on_exist=False, timeout=timeout) if dk not in dirs_created: dirs_created[dk] = set() dirs_created[dk].add(dir)
[ "Create all parent directories for a file\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param dict dirs_created: directories already created map\n :param int timeout: timeout\n " ]
Please provide a description of the function:def create_file(ase, timeout=None): # type: (blobxfer.models.azure.StorageEntity, int) -> None dir, fpath, _ = parse_file_path(ase.name) ase.client.create_file( share_name=ase.container, directory_name=dir, file_name=fpath, content_length=ase.size, content_settings=azure.storage.file.models.ContentSettings( content_type=blobxfer.util.get_mime_type(fpath) ), timeout=timeout)
[ "Create file remotely\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param int timeout: timeout\n " ]
Please provide a description of the function:def put_file_range(ase, offsets, data, timeout=None): # type: (blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes, int) -> None dir, fpath, _ = parse_file_path(ase.name) ase.client.update_range( share_name=ase.container, directory_name=dir, file_name=fpath, data=data, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, # integrity is enforced with HTTPS timeout=timeout)
[ "Puts a range of bytes into the remote file\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param blobxfer.models.upload.Offsets offsets: upload offsets\n :param bytes data: data\n :param int timeout: timeout\n " ]
Please provide a description of the function:def set_file_properties(ase, md5, timeout=None): # type: (blobxfer.models.azure.StorageEntity, str, int) -> None dir, fpath, _ = parse_file_path(ase.name) ase.client.set_file_properties( share_name=ase.container, directory_name=dir, file_name=fpath, content_settings=azure.storage.file.models.ContentSettings( content_type=blobxfer.util.get_mime_type(fpath), content_md5=md5, cache_control=ase.cache_control, ), timeout=timeout)
[ "Set file properties\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param str md5: md5 as base64\n :param int timeout: timeout\n " ]
Please provide a description of the function:def set_file_metadata(ase, metadata, timeout=None): # type: (blobxfer.models.azure.StorageEntity, dict, int) -> None dir, fpath, _ = parse_file_path(ase.name) ase.client.set_file_metadata( share_name=ase.container, directory_name=dir, file_name=fpath, metadata=metadata, timeout=timeout)
[ "Set file metadata\n :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity\n :param dict metadata: metadata kv pairs\n :param int timeout: timeout\n " ]
Please provide a description of the function:def get_md5_from_metadata(ase): # type: (blobxfer.models.azure.StorageEntity) -> str # if encryption metadata is present, check for pre-encryption # md5 in blobxfer extensions md5 = None if ase.is_encrypted: try: md5 = ase.encryption_metadata.blobxfer_extensions.\ pre_encrypted_content_md5 except AttributeError: # this can happen if partial metadata is present md5 = None if blobxfer.util.is_none_or_empty(md5): md5 = ase.md5 return md5
[ "Get MD5 from properties or metadata\n :param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity\n :rtype: str or None\n :return: md5\n " ]
Please provide a description of the function:def generate_fileattr_metadata(local_path, metadata): # type: (blobxfer.models.upload.LocalPath, dict) -> dict if blobxfer.util.on_windows(): global _FILEATTR_WARNED_ON_WINDOWS if not _FILEATTR_WARNED_ON_WINDOWS: _FILEATTR_WARNED_ON_WINDOWS = True logger.warning( 'file attributes store/restore on Windows is not ' 'supported yet') return None else: md = { _JSON_KEY_FILE_ATTRIBUTES: { _JSON_KEY_FILE_ATTRIBUTES_POSIX: { _JSON_KEY_FILE_ATTRIBUTES_MODE: local_path.mode, _JSON_KEY_FILE_ATTRIBUTES_UID: local_path.uid, _JSON_KEY_FILE_ATTRIBUTES_GID: local_path.gid, } } } return blobxfer.util.merge_dict(metadata, md)
[ "Generate file attribute metadata dict\n :param blobxfer.models.upload.LocalPath local_path: local path\n :param dict metadata: existing metadata dict\n :rtype: dict\n :return: merged metadata dictionary\n " ]
Please provide a description of the function:def fileattr_from_metadata(md): # type: (dict) -> collections.namedtuple try: mdattr = json.loads( md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_FILE_ATTRIBUTES] except (KeyError, TypeError): return None else: if blobxfer.util.on_windows(): global _FILEATTR_WARNED_ON_WINDOWS if not _FILEATTR_WARNED_ON_WINDOWS: _FILEATTR_WARNED_ON_WINDOWS = True logger.warning( 'file attributes store/restore on Windows is not ' 'supported yet') fileattr = None else: try: fileattr = PosixFileAttr( mode=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][ _JSON_KEY_FILE_ATTRIBUTES_MODE], uid=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][ _JSON_KEY_FILE_ATTRIBUTES_UID], gid=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][ _JSON_KEY_FILE_ATTRIBUTES_GID], ) except KeyError: fileattr = None return fileattr
[ "Convert fileattr metadata in json metadata\n :param dict md: metadata dictionary\n :rtype: PosixFileAttr or WindowsFileAttr or None\n :return: fileattr metadata\n " ]
Please provide a description of the function:def create_vectored_io_next_entry(ase): # type: (blobxfer.models.azure.StorageEntity) -> str return ';'.join( (ase.client.primary_endpoint, ase.container, ase.name) )
[ "Create Vectored IO next entry id\n :param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity\n :rtype: str\n :return: vectored io next entry\n " ]
Please provide a description of the function:def explode_vectored_io_next_entry(entry): # type: (str, int) -> str tmp = entry.split(';') _sa = tmp[0].split('.') return VectoredNextEntry( storage_account_name=_sa[0], endpoint='.'.join(_sa[2:]), container=tmp[1], name=tmp[2], )
[ "Explode next vectored io entry\n :param str entry: next entry\n :rtype: VectoredNextEntry\n :return: vectored next entry\n " ]
Please provide a description of the function:def remove_vectored_io_slice_suffix_from_name(name, slice): # type: (str, int) -> str suffix = '.bxslice-{}'.format(slice) if name.endswith(suffix): return name[:-len(suffix)] else: return name
[ "Remove vectored io (stripe) slice suffix from a given name\n :param str name: entity name\n :param int slice: slice num\n :rtype: str\n :return: name without suffix\n " ]
Please provide a description of the function:def generate_vectored_io_stripe_metadata(local_path, metadata): # type: (blobxfer.models.upload.LocalPath, dict) -> dict md = { _JSON_KEY_VECTORED_IO: { _JSON_KEY_VECTORED_IO_MODE: _JSON_KEY_VECTORED_IO_STRIPE, _JSON_KEY_VECTORED_IO_STRIPE: { _JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE: local_path.total_size, _JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START: local_path.view.fd_start, _JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES: local_path.view.total_slices, _JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID: local_path.view.slice_num, _JSON_KEY_VECTORED_IO_STRIPE_NEXT: local_path.view.next, } } } return blobxfer.util.merge_dict(metadata, md)
[ "Generate vectored io stripe metadata dict\n :param blobxfer.models.upload.LocalPath local_path: local path\n :param dict metadata: existing metadata dict\n :rtype: dict\n :return: merged metadata dictionary\n " ]
Please provide a description of the function:def vectored_io_from_metadata(md): # type: (dict) -> collections.namedtuple try: mdattr = json.loads( md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_VECTORED_IO] except (KeyError, TypeError): pass else: if mdattr[_JSON_KEY_VECTORED_IO_MODE] == _JSON_KEY_VECTORED_IO_STRIPE: mdstripe = mdattr[_JSON_KEY_VECTORED_IO_STRIPE] try: nextptr = explode_vectored_io_next_entry( mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_NEXT]) except (KeyError, AttributeError): nextptr = None vio = VectoredStripe( total_size=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE], offset_start=mdstripe[ _JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START], total_slices=mdstripe[ _JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES], slice_id=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID], next=nextptr, ) return vio else: raise RuntimeError('Cannot handle Vectored IO mode: {}'.format( mdattr[_JSON_KEY_VECTORED_IO_MODE])) return None
[ "Convert vectored io metadata in json metadata\n :param dict md: metadata dictionary\n :rtype: VectoredStripe or None\n :return: vectored io metadata\n " ]
Please provide a description of the function:def load_rsa_private_key_file(rsakeyfile, passphrase): # type: (str, str) -> # cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey keypath = os.path.expandvars(os.path.expanduser(rsakeyfile)) with open(keypath, 'rb') as keyfile: return cryptography.hazmat.primitives.serialization.\ load_pem_private_key( keyfile.read(), passphrase.encode('utf8') if passphrase is not None else None, backend=cryptography.hazmat.backends.default_backend() )
[ "Load an RSA Private key PEM file with passphrase if specified\n :param str rsakeyfile: RSA private key PEM file to load\n :param str passphrase: optional passphrase\n :rtype: cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey\n :return: RSAPrivateKey\n " ]
Please provide a description of the function:def load_rsa_public_key_file(rsakeyfile): # type: (str, str) -> # cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey keypath = os.path.expandvars(os.path.expanduser(rsakeyfile)) with open(keypath, 'rb') as keyfile: return cryptography.hazmat.primitives.serialization.\ load_pem_public_key( keyfile.read(), backend=cryptography.hazmat.backends.default_backend() )
[ "Load an RSA Public key PEM file\n :param str rsakeyfile: RSA public key PEM file to load\n :rtype: cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey\n :return: RSAPublicKey\n " ]
Please provide a description of the function:def rsa_decrypt_base64_encoded_key(rsaprivatekey, enckey): # type: (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey, # str) -> bytes return rsaprivatekey.decrypt( base64.b64decode(enckey), cryptography.hazmat.primitives.asymmetric.padding.OAEP( mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1( algorithm=cryptography.hazmat.primitives.hashes.SHA1() ), algorithm=cryptography.hazmat.primitives.hashes.SHA1(), label=None, ) )
[ "Decrypt an RSA encrypted key encoded as base64\n :param rsaprivatekey: RSA private key\n :type rsaprivatekey:\n cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey\n :param str enckey: base64-encoded key\n :rtype: bytes\n :return: decrypted key\n " ]
Please provide a description of the function:def rsa_encrypt_key_base64_encoded(rsaprivatekey, rsapublickey, plainkey): # type: (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey, # cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey, # bytes) -> str if rsapublickey is None: rsapublickey = rsaprivatekey.public_key() enckey = rsapublickey.encrypt( plainkey, cryptography.hazmat.primitives.asymmetric.padding.OAEP( mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1( algorithm=cryptography.hazmat.primitives.hashes.SHA1()), algorithm=cryptography.hazmat.primitives.hashes.SHA1(), label=None)) return blobxfer.util.base64_encode_as_string(enckey)
[ "Encrypt a plaintext key using RSA and PKCS1_OAEP padding\n :param rsaprivatekey: RSA private key\n :type rsaprivatekey:\n cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey\n :param rsapublickey: RSA public key\n :type rsapublickey:\n cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey\n :param bytes plainkey: plain key\n :rtype: str\n :return: encrypted key\n " ]
Please provide a description of the function:def pkcs7_pad(buf): # type: (bytes) -> bytes padder = cryptography.hazmat.primitives.padding.PKCS7( cryptography.hazmat.primitives.ciphers. algorithms.AES.block_size).padder() return padder.update(buf) + padder.finalize()
[ "Appends PKCS7 padding to an input buffer\n :param bytes buf: buffer to add padding\n :rtype: bytes\n :return: buffer with PKCS7_PADDING\n " ]
Please provide a description of the function:def pkcs7_unpad(buf): # type: (bytes) -> bytes unpadder = cryptography.hazmat.primitives.padding.PKCS7( cryptography.hazmat.primitives.ciphers. algorithms.AES.block_size).unpadder() return unpadder.update(buf) + unpadder.finalize()
[ "Removes PKCS7 padding a decrypted object\n :param bytes buf: buffer to remove padding\n :rtype: bytes\n :return: buffer without PKCS7_PADDING\n " ]
Please provide a description of the function:def aes_cbc_decrypt_data(symkey, iv, encdata, unpad): # type: (bytes, bytes, bytes, bool) -> bytes cipher = cryptography.hazmat.primitives.ciphers.Cipher( cryptography.hazmat.primitives.ciphers.algorithms.AES(symkey), cryptography.hazmat.primitives.ciphers.modes.CBC(iv), backend=cryptography.hazmat.backends.default_backend()).decryptor() decrypted = cipher.update(encdata) + cipher.finalize() if unpad: return pkcs7_unpad(decrypted) else: return decrypted
[ "Decrypt data using AES CBC\n :param bytes symkey: symmetric key\n :param bytes iv: initialization vector\n :param bytes encdata: data to decrypt\n :param bool unpad: unpad data\n :rtype: bytes\n :return: decrypted data\n " ]
Please provide a description of the function:def aes_cbc_encrypt_data(symkey, iv, data, pad): # type: (bytes, bytes, bytes, bool) -> bytes cipher = cryptography.hazmat.primitives.ciphers.Cipher( cryptography.hazmat.primitives.ciphers.algorithms.AES(symkey), cryptography.hazmat.primitives.ciphers.modes.CBC(iv), backend=cryptography.hazmat.backends.default_backend()).encryptor() if pad: return cipher.update(pkcs7_pad(data)) + cipher.finalize() else: return cipher.update(data) + cipher.finalize()
[ "Encrypt data using AES CBC\n :param bytes symkey: symmetric key\n :param bytes iv: initialization vector\n :param bytes data: data to encrypt\n :param bool pad: pad data\n :rtype: bytes\n :return: encrypted data\n " ]
Please provide a description of the function:def _worker_process(self): # type: (CryptoOffload) -> None while not self.terminated: try: inst = self._task_queue.get(True, 0.1) except queue.Empty: continue # UNUSED due to AES256-CBC FullBlob mode if inst[0] == CryptoAction.Encrypt: # noqa local_file, offsets, symkey, iv = \ inst[1], inst[2], inst[3], inst[4] with open(local_file, 'rb') as fd: data = fd.read() encdata = blobxfer.operations.crypto.aes_cbc_encrypt_data( symkey, iv, data, offsets.pad) with tempfile.NamedTemporaryFile( mode='wb', delete=False) as fd: fpath = fd.name fd.write(encdata) self._done_cv.acquire() self._done_queue.put(fpath) elif inst[0] == CryptoAction.Decrypt: final_path, internal_fdstart, offsets, symkey, iv, \ hmac_datafile = \ inst[1], inst[2], inst[3], inst[4], inst[5], inst[6] # read encrypted data from disk with open(hmac_datafile, 'rb') as fd: encdata = fd.read() data = blobxfer.operations.crypto.aes_cbc_decrypt_data( symkey, iv, encdata, offsets.unpad) # write decrypted data to disk if len(data) > 0: with open(final_path, 'r+b') as fd: fd.seek(internal_fdstart + offsets.fd_start, 0) fd.write(data) self._done_cv.acquire() self._done_queue.put((final_path, offsets)) # notify and release condition var self._done_cv.notify() self._done_cv.release()
[ "Crypto worker\n :param CryptoOffload self: this\n " ]
Please provide a description of the function:def add_decrypt_chunk( self, final_path, internal_fdstart, offsets, symkey, iv, hmac_datafile): # type: (CryptoOffload, str, int, blobxfer.models.download.Offsets, # bytes, bytes, str) -> None self._task_queue.put( (CryptoAction.Decrypt, final_path, internal_fdstart, offsets, symkey, iv, hmac_datafile) )
[ "Add a chunk to decrypt\n :param CryptoOffload self: this\n :param str final_path: final path\n :param int internal_fdstart: internal fd offset start\n :param blobxfer.models.download.Offsets offsets: offsets\n :param bytes symkey: symmetric key\n :param bytes iv: initialization vector\n :param str hmac_datafile: encrypted data file\n " ]
Please provide a description of the function:def add_encrypt_chunk(self, local_file, offsets, symkey, iv): # noqa # type: (CryptoOffload, pathlib.Path, blobxfer.models.upload.Offsets, # bytes, bytes) -> None self._task_queue.put( (CryptoAction.Encrypt, str(local_file), offsets, symkey, iv) )
[ "Add a chunk to encrypt\n :param CryptoOffload self: this\n :param pathlib.Path local_file: local file\n :param blobxfer.models.upload.Offsets offsets: offsets\n :param bytes symkey: symmetric key\n :param bytes iv: initialization vector\n " ]
Please provide a description of the function:def termination_check(self): # type: (Downloader) -> bool with self._transfer_lock: with self._disk_operation_lock: return (self._download_terminate or len(self._exceptions) > 0 or (self._all_remote_files_processed and len(self._transfer_set) == 0 and len(self._disk_set) == 0))
[ "Check if terminated\n :param Downloader self: this\n :rtype: bool\n :return: if terminated\n " ]
Please provide a description of the function:def termination_check_md5(self): # type: (Downloader) -> bool with self._md5_meta_lock: with self._transfer_lock: return (self._download_terminate or (self._all_remote_files_processed and len(self._md5_map) == 0 and len(self._transfer_set) == 0))
[ "Check if terminated from MD5 context\n :param Downloader self: this\n :rtype: bool\n :return: if terminated from MD5 context\n " ]
Please provide a description of the function:def ensure_local_destination(creds, spec, dry_run): # type: (blobxfer.operations.azure.StorageCredentials, # blobxfer.models.download.Specification, bool) -> None # ensure destination path is writable given the source if len(spec.sources) < 1: raise RuntimeError('no sources to download from specified') # set is_dir for destination spec.destination.is_dir = True if len(spec.sources) == 1: # we need to query the source to see if this is a directory rpath = str(spec.sources[0].paths[0]) cont, dir = blobxfer.util.explode_azure_path(rpath) if not blobxfer.util.is_none_or_empty(dir): sa = creds.get_storage_account( spec.sources[0].lookup_storage_account(rpath)) # ensure at least read permissions if not sa.can_read_object: raise RuntimeError( 'unable to prepare for remote path {} as credential ' 'for storage account {} does not permit read ' 'access'.format(rpath, sa.name)) if (spec.options.mode == blobxfer.models.azure.StorageModes.File): if (blobxfer.operations.azure.file.check_if_single_file( sa.file_client, cont, dir)[0] and spec.options.rename): spec.destination.is_dir = False else: if (blobxfer.operations.azure.blob.check_if_single_blob( sa.block_blob_client, cont, dir) and spec.options.rename): spec.destination.is_dir = False logger.debug('dest is_dir={} for {} specs'.format( spec.destination.is_dir, len(spec.sources))) # ensure destination path if not dry_run: spec.destination.ensure_path_exists()
[ "Ensure a local destination path given a download spec\n :param blobxfer.operations.azure.StorageCredentials creds: creds\n :param blobxfer.models.download.Specification spec: download spec\n :param bool dry_run: dry run\n " ]
Please provide a description of the function:def create_unique_transfer_operation_id(ase): # type: (blobxfer.models.azure.StorageEntity) -> str return ';'.join( (ase._client.primary_endpoint, ase.path, str(ase.vectored_io)) )
[ "Create a unique transfer operation id\n :param blobxfer.models.azure.StorageEntity ase: storage entity\n :rtype: str\n :return: unique transfer id\n " ]
Please provide a description of the function:def create_unique_disk_operation_id(dd, offsets): # type: (blobxfer.models.download.Descriptor, # blobxfer.models.download.Offsets) -> str return ';'.join( (str(dd.final_path), dd.entity._client.primary_endpoint, dd.entity.path, str(offsets.range_start)) )
[ "Create a unique disk operation id\n :param blobxfer.models.download.Descriptor dd: download descriptor\n :param blobxfer.models.download.Offsets offsets: download offsets\n :rtype: str\n :return: unique disk id\n " ]
Please provide a description of the function:def _update_progress_bar(self): # type: (Downloader) -> None blobxfer.operations.progress.update_progress_bar( self._general_options, 'download', self._download_start_time, self._download_total, self._download_sofar, self._download_bytes_total, self._download_bytes_sofar, )
[ "Update progress bar\n :param Downloader self: this\n " ]
Please provide a description of the function:def _check_download_conditions(self, lpath, rfile): # type: (Downloader, pathlib.Path, # blobxfer.models.azure.StorageEntity) -> DownloadAction if not lpath.exists(): if rfile.vectored_io is not None: fpath = blobxfer.models.download.Descriptor.\ convert_vectored_io_slice_to_final_path_name(lpath, rfile) if not fpath.exists(): return DownloadAction.Download else: return DownloadAction.Download if not self._spec.options.overwrite: logger.info( 'not overwriting local file: {} (remote: {})'.format( lpath, rfile.path)) return DownloadAction.Skip # check skip on options, MD5 match takes priority md5 = blobxfer.models.metadata.get_md5_from_metadata(rfile) if self._spec.skip_on.md5_match and blobxfer.util.is_not_empty(md5): return DownloadAction.CheckMd5 # if neither of the remaining skip on actions are activated, download if (not self._spec.skip_on.filesize_match and not self._spec.skip_on.lmt_ge): return DownloadAction.Download # check skip on file size match dl_fs = None if self._spec.skip_on.filesize_match: lsize = lpath.stat().st_size if rfile.mode == blobxfer.models.azure.StorageModes.Page: lsize = blobxfer.util.page_align_content_length(lsize) if rfile.size == lsize: dl_fs = False if self._general_options.verbose: logger.debug('filesize match: {} == {} size={}'.format( lpath, rfile.path, lsize)) else: dl_fs = True # check skip on lmt ge dl_lmt = None if self._spec.skip_on.lmt_ge: mtime = blobxfer.util.datetime_from_timestamp( lpath.stat().st_mtime, as_utc=True) if mtime >= rfile.lmt: dl_lmt = False if self._general_options.verbose: logger.debug('lmt ge match: {} lmt={} >= {} lmt={}'.format( lpath, mtime, rfile.path, rfile.lmt)) else: dl_lmt = True # download if either skip on mismatch is True if dl_fs or dl_lmt: return DownloadAction.Download else: return DownloadAction.Skip
[ "Check for download conditions\n :param Downloader self: this\n :param pathlib.Path lpath: local path\n :param blobxfer.models.azure.StorageEntity rfile: remote file\n :rtype: DownloadAction\n :return: download action\n " ]
Please provide a description of the function:def _pre_md5_skip_on_check(self, lpath, rfile): # type: (Downloader, pathlib.Path, # blobxfer.models.azure.StorageEntity) -> None md5 = blobxfer.models.metadata.get_md5_from_metadata(rfile) key = blobxfer.operations.download.Downloader.\ create_unique_transfer_operation_id(rfile) with self._md5_meta_lock: self._md5_map[key] = rfile slpath = str(lpath) # temporarily create a download descriptor view for vectored io if rfile.vectored_io is not None: view, _ = blobxfer.models.download.Descriptor.generate_view(rfile) fpath = str( blobxfer.models.download.Descriptor. convert_vectored_io_slice_to_final_path_name(lpath, rfile) ) else: view = None fpath = slpath self._md5_offload.add_localfile_for_md5_check( key, slpath, fpath, md5, rfile.mode, view)
[ "Perform pre MD5 skip on check\n :param Downloader self: this\n :param pathlib.Path lpath: local path\n :param blobxfer.models.azure.StorageEntity rfile: remote file\n " ]
Please provide a description of the function:def _post_md5_skip_on_check(self, key, filename, size, md5_match): # type: (Downloader, str, str, int, bool) -> None with self._md5_meta_lock: rfile = self._md5_map.pop(key) lpath = pathlib.Path(filename) if md5_match: if size is None: size = lpath.stat().st_size with self._transfer_lock: self._transfer_set.remove( blobxfer.operations.download.Downloader. create_unique_transfer_operation_id(rfile)) self._download_total -= 1 self._download_bytes_total -= size if self._general_options.dry_run: logger.info('[DRY RUN] MD5 match, skipping: {} -> {}'.format( rfile.path, lpath)) else: if self._general_options.dry_run: with self._transfer_lock: self._transfer_set.remove( blobxfer.operations.download.Downloader. create_unique_transfer_operation_id(rfile)) self._download_total -= 1 self._download_bytes_total -= size logger.info( '[DRY RUN] MD5 mismatch, download: {} -> {}'.format( rfile.path, lpath)) else: self._add_to_download_queue(lpath, rfile)
[ "Perform post MD5 skip on check\n :param Downloader self: this\n :param str key: md5 map key\n :param str filename: local filename\n :param int size: size of checked data\n :param bool md5_match: if MD5 matches\n " ]
Please provide a description of the function:def _check_for_crypto_done(self): # type: (Downloader) -> None cv = self._crypto_offload.done_cv while not self.termination_check: result = None cv.acquire() while True: result = self._crypto_offload.pop_done_queue() if result is None: # use cv timeout due to possible non-wake while running cv.wait(0.1) # check for terminating conditions if self.termination_check: break else: break cv.release() if result is not None: try: final_path, offsets = result with self._transfer_lock: dd = self._dd_map[final_path] self._finalize_chunk(dd, offsets) except KeyError: # this can happen if all of the last integrity # chunks are processed at once pass
[ "Check queue for crypto done\n :param Downloader self: this\n " ]
Please provide a description of the function:def _add_to_download_queue(self, lpath, rfile): # type: (Downloader, pathlib.Path, # blobxfer.models.azure.StorageEntity) -> None # prepare remote file for download dd = blobxfer.models.download.Descriptor( lpath, rfile, self._spec.options, self._general_options, self._resume) with self._transfer_lock: self._transfer_cc[dd.entity.path] = 0 if dd.entity.is_encrypted: self._dd_map[str(dd.final_path)] = dd # add download descriptor to queue self._transfer_queue.put(dd) if self._download_start_time is None: with self._transfer_lock: if self._download_start_time is None: self._download_start_time = blobxfer.util.datetime_now()
[ "Add remote file to download queue\n :param Downloader self: this\n :param pathlib.Path lpath: local path\n :param blobxfer.models.azure.StorageEntity rfile: remote file\n " ]
Please provide a description of the function:def _initialize_transfer_threads(self): # type: (Downloader) -> None logger.debug('spawning {} transfer threads'.format( self._general_options.concurrency.transfer_threads)) for _ in range(self._general_options.concurrency.transfer_threads): thr = threading.Thread(target=self._worker_thread_transfer) self._transfer_threads.append(thr) thr.start()
[ "Initialize transfer threads\n :param Downloader self: this\n " ]
Please provide a description of the function:def _wait_for_disk_threads(self, terminate): # type: (Downloader, bool) -> None if terminate: self._download_terminate = terminate for thr in self._disk_threads: blobxfer.util.join_thread(thr)
[ "Wait for disk threads\n :param Downloader self: this\n :param bool terminate: terminate threads\n " ]
Please provide a description of the function:def _worker_thread_transfer(self): # type: (Downloader) -> None max_set_len = self._general_options.concurrency.disk_threads << 2 while not self.termination_check: try: if len(self._disk_set) > max_set_len: time.sleep(0.1) continue else: dd = self._transfer_queue.get(block=False, timeout=0.1) except queue.Empty: continue try: self._process_download_descriptor(dd) except Exception as e: with self._transfer_lock: self._exceptions.append(e)
[ "Worker thread download\n :param Downloader self: this\n " ]
Please provide a description of the function:def _worker_thread_disk(self): # type: (Downloader) -> None while not self.termination_check: try: dd, offsets, data = self._disk_queue.get( block=False, timeout=0.1) except queue.Empty: continue try: self._process_data(dd, offsets, data) except Exception as e: with self._transfer_lock: self._exceptions.append(e)
[ "Worker thread for disk\n :param Downloader self: this\n " ]
Please provide a description of the function:def _process_download_descriptor(self, dd): # type: (Downloader, blobxfer.models.download.Descriptor) -> None # update progress bar self._update_progress_bar() # get download offsets offsets, resume_bytes = dd.next_offsets() # add resume bytes to counter if resume_bytes is not None: with self._disk_operation_lock: self._download_bytes_sofar += resume_bytes logger.debug('adding {} sofar {} from {}'.format( resume_bytes, self._download_bytes_sofar, dd.entity.name)) del resume_bytes # check if all operations completed if offsets is None and dd.all_operations_completed: finalize = True sfpath = str(dd.final_path) # finalize integrity dd.finalize_integrity() # vectored io checks if dd.entity.vectored_io is not None: with self._transfer_lock: if sfpath not in self._vio_map: self._vio_map[sfpath] = 1 else: self._vio_map[sfpath] += 1 if (self._vio_map[sfpath] == dd.entity.vectored_io.total_slices): self._vio_map.pop(sfpath) else: finalize = False # finalize file if finalize: dd.finalize_file() # accounting with self._transfer_lock: self._download_sofar += 1 if dd.entity.is_encrypted: self._dd_map.pop(sfpath) self._transfer_set.remove( blobxfer.operations.download.Downloader. create_unique_transfer_operation_id(dd.entity)) self._transfer_cc.pop(dd.entity.path, None) return # re-enqueue for other threads to download if offsets is None: self._transfer_queue.put(dd) return # ensure forthcoming disk operation is accounted for with self._disk_operation_lock: self._disk_set.add( blobxfer.operations.download.Downloader. create_unique_disk_operation_id(dd, offsets)) # check if there are too many concurrent connections with self._transfer_lock: self._transfer_cc[dd.entity.path] += 1 cc_xfer = self._transfer_cc[dd.entity.path] if cc_xfer <= self._spec.options.max_single_object_concurrency: self._transfer_queue.put(dd) # issue get range if dd.entity.mode == blobxfer.models.azure.StorageModes.File: data = blobxfer.operations.azure.file.get_file_range( dd.entity, offsets) else: data = blobxfer.operations.azure.blob.get_blob_range( dd.entity, offsets) with self._transfer_lock: self._transfer_cc[dd.entity.path] -= 1 if cc_xfer > self._spec.options.max_single_object_concurrency: self._transfer_queue.put(dd) # enqueue data for processing self._disk_queue.put((dd, offsets, data))
[ "Process download descriptor\n :param Downloader self: this\n :param blobxfer.models.download.Descriptor dd: download descriptor\n " ]
Please provide a description of the function:def _process_data(self, dd, offsets, data): # type: (Downloader, blobxfer.models.download.Descriptor, # blobxfer.models.download.Offsets, bytes) -> None # decrypt if necessary if dd.entity.is_encrypted: # slice data to proper bounds and get iv for chunk if offsets.chunk_num == 0: # set iv iv = dd.entity.encryption_metadata.content_encryption_iv # set data to decrypt encdata = data # send iv through hmac dd.hmac_iv(iv) else: # set iv iv = data[:blobxfer.models.crypto.AES256_BLOCKSIZE_BYTES] # set data to decrypt encdata = data[blobxfer.models.crypto.AES256_BLOCKSIZE_BYTES:] # write encdata to disk for hmac later _hmac_datafile = dd.write_unchecked_hmac_data( offsets, encdata) # decrypt data if self._crypto_offload is not None: self._crypto_offload.add_decrypt_chunk( str(dd.final_path), dd.view.fd_start, offsets, dd.entity.encryption_metadata.symmetric_key, iv, _hmac_datafile) # data will be integrity checked and written once # retrieved from crypto queue return else: data = blobxfer.operations.crypto.aes_cbc_decrypt_data( dd.entity.encryption_metadata.symmetric_key, iv, encdata, offsets.unpad) dd.write_data(offsets, data) else: # write data to disk dd.write_unchecked_data(offsets, data) # finalize chunk self._finalize_chunk(dd, offsets)
[ "Process downloaded data for disk\n :param Downloader self: this\n :param blobxfer.models.download.Descriptor dd: download descriptor\n :param blobxfer.models.download.Offsets offsets: offsets\n :param bytes data: data to process\n " ]
Please provide a description of the function:def _finalize_chunk(self, dd, offsets): # type: (Downloader, blobxfer.models.download.Descriptor, # blobxfer.models.download.Offsets) -> None if dd.entity.is_encrypted: dd.mark_unchecked_chunk_decrypted(offsets.chunk_num) # integrity check data and write to disk (this is called # regardless of md5/hmac enablement for resume purposes) dd.perform_chunked_integrity_check() # remove from disk set and add bytes to counter with self._disk_operation_lock: self._disk_set.remove( blobxfer.operations.download.Downloader. create_unique_disk_operation_id(dd, offsets)) self._download_bytes_sofar += offsets.num_bytes
[ "Finalize written chunk\n :param Downloader self: this\n :param blobxfer.models.download.Descriptor dd: download descriptor\n :param blobxfer.models.download.Offsets offsets: offsets\n " ]
Please provide a description of the function:def _cleanup_temporary_files(self): # type: (Downloader) -> None # iterate through dd map and cleanup files for key in self._dd_map: dd = self._dd_map[key] try: dd.cleanup_all_temporary_files() except Exception as e: logger.exception(e)
[ "Cleanup temporary files in case of an exception or interrupt.\n This function is not thread-safe.\n :param Downloader self: this\n " ]
Please provide a description of the function:def _catalog_local_files_for_deletion(self): # type: (Downloader) -> None if not (self._spec.options.delete_extraneous_destination and self._spec.destination.is_dir): return dst = str(self._spec.destination.path) for file in blobxfer.util.scantree(dst): self._delete_after.add(pathlib.Path(file.path))
[ "Catalog all local files if delete extraneous enabled\n :param Downloader self: this\n " ]
Please provide a description of the function:def _delete_extraneous_files(self): # type: (Downloader) -> None logger.info('attempting to delete {} extraneous files'.format( len(self._delete_after))) for file in self._delete_after: if self._general_options.dry_run: logger.info('[DRY RUN] deleting local file: {}'.format( file)) else: if self._general_options.verbose: logger.debug('deleting local file: {}'.format(file)) try: file.unlink() except OSError as e: logger.error('error deleting local file: {}'.format( str(e)))
[ "Delete extraneous files cataloged\n :param Downloader self: this\n " ]
Please provide a description of the function:def _run(self): # type: (Downloader) -> None # mark start self._start_time = blobxfer.util.datetime_now() logger.info('blobxfer start time: {0}'.format(self._start_time)) # ensure destination path blobxfer.operations.download.Downloader.ensure_local_destination( self._creds, self._spec, self._general_options.dry_run) logger.info('downloading blobs/files to local path: {}'.format( self._spec.destination.path)) self._catalog_local_files_for_deletion() # initialize resume db if specified if self._general_options.resume_file is not None: self._resume = blobxfer.operations.resume.DownloadResumeManager( self._general_options.resume_file) # initialize MD5 processes if (self._spec.options.check_file_md5 and self._general_options.concurrency.md5_processes > 0): self._md5_offload = blobxfer.operations.md5.LocalFileMd5Offload( num_workers=self._general_options.concurrency.md5_processes) self._md5_offload.initialize_check_thread( self._check_for_downloads_from_md5) # initialize crypto processes if self._general_options.concurrency.crypto_processes > 0: self._crypto_offload = blobxfer.operations.crypto.CryptoOffload( num_workers=self._general_options.concurrency.crypto_processes) self._crypto_offload.initialize_check_thread( self._check_for_crypto_done) # initialize download threads self._initialize_transfer_threads() self._initialize_disk_threads() # initialize local counters files_processed = 0 skipped_files = 0 skipped_size = 0 # iterate through source paths to download for src in self._spec.sources: for rfile in src.files( self._creds, self._spec.options, self._general_options.dry_run): # form local path for remote file if (not self._spec.destination.is_dir and self._spec.options.rename): lpath = pathlib.Path(self._spec.destination.path) else: lpath = None if self._spec.options.strip_components > 0: _lparts = pathlib.Path(rfile.name).parts _strip = min( (len(_lparts) - 1, self._spec.options.strip_components) ) if _strip > 0: lpath = pathlib.Path(*_lparts[_strip:]) if lpath is None: lpath = pathlib.Path(rfile.name) lpath = pathlib.Path(self._spec.destination.path) / lpath files_processed += 1 # check on download conditions action = self._check_download_conditions(lpath, rfile) # remove from delete after set try: self._delete_after.remove(lpath) except KeyError: pass if action == DownloadAction.Skip: skipped_files += 1 skipped_size += rfile.size if self._general_options.dry_run: logger.info('[DRY RUN] skipping: {} -> {}'.format( lpath, rfile.path)) continue # add potential download to set dlid = ( blobxfer.operations.download.Downloader. create_unique_transfer_operation_id(rfile) ) with self._transfer_lock: self._transfer_set.add(dlid) self._download_total += 1 self._download_bytes_total += rfile.size # either MD5 check or download now if action == DownloadAction.CheckMd5: self._pre_md5_skip_on_check(lpath, rfile) elif action == DownloadAction.Download: if self._general_options.dry_run: logger.info( '[DRY RUN] download: {} -> {}'.format( rfile.path, lpath)) with self._transfer_lock: self._transfer_set.remove(dlid) self._download_total -= 1 self._download_bytes_total -= rfile.size else: self._add_to_download_queue(lpath, rfile) # set remote files processed with self._md5_meta_lock: self._all_remote_files_processed = True with self._transfer_lock: download_size_mib = ( self._download_bytes_total / blobxfer.util.MEGABYTE ) logger.debug( ('{0} files {1:.4f} MiB filesize and/or lmt_ge ' 'skipped').format( skipped_files, skipped_size / blobxfer.util.MEGABYTE)) logger.debug( ('{0} remote files processed, waiting for download ' 'completion of approx. {1:.4f} MiB').format( files_processed, download_size_mib)) del files_processed del skipped_files del skipped_size # wait for downloads to complete self._wait_for_transfer_threads(terminate=False) self._wait_for_disk_threads(terminate=False) end_time = blobxfer.util.datetime_now() # update progress bar self._update_progress_bar() # check for exceptions if len(self._exceptions) > 0: logger.error('exceptions encountered while downloading') # raise the first one raise self._exceptions[0] # check for mismatches if (self._download_sofar != self._download_total or self._download_bytes_sofar != self._download_bytes_total): raise RuntimeError( 'download mismatch: [count={}/{} bytes={}/{}]'.format( self._download_sofar, self._download_total, self._download_bytes_sofar, self._download_bytes_total)) # delete all remaining local files not accounted for if # delete extraneous enabled self._delete_extraneous_files() # delete resume file if we've gotten this far if self._resume is not None: self._resume.delete() # output throughput if self._download_start_time is not None: dltime = (end_time - self._download_start_time).total_seconds() if dltime == 0: # noqa dltime = 1e-9 download_size_mib = ( self._download_bytes_total / blobxfer.util.MEGABYTE ) dlmibspeed = download_size_mib / dltime logger.info( ('elapsed download + verify time and throughput of {0:.4f} ' 'GiB: {1:.3f} sec, {2:.4f} Mbps ({3:.3f} MiB/sec)').format( download_size_mib / 1024, dltime, dlmibspeed * 8, dlmibspeed)) end_time = blobxfer.util.datetime_now() logger.info('blobxfer end time: {0} (elapsed: {1:.3f} sec)'.format( end_time, (end_time - self._start_time).total_seconds()))
[ "Execute Downloader\n :param Downloader self: this\n " ]
Please provide a description of the function:def encryption_metadata_exists(md): # type: (dict) -> bool try: if blobxfer.util.is_not_empty( md[EncryptionMetadata._METADATA_KEY_NAME]): return True except (KeyError, TypeError): pass return False
[ "Check if encryption metadata exists in json metadata\n :param dict md: metadata dictionary\n :rtype: bool\n :return: if encryption metadata exists\n " ]
Please provide a description of the function:def create_new_metadata(self, rsa_public_key): # type: (EncryptionMetadata, # cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey) # -> None self._rsa_public_key = rsa_public_key self._symkey = os.urandom( blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES) self._signkey = os.urandom( blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES) self.content_encryption_iv = os.urandom(AES256_BLOCKSIZE_BYTES) self.encryption_agent = EncryptionAgent( encryption_algorithm=EncryptionMetadata._ENCRYPTION_ALGORITHM, protocol=EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION, ) self.encryption_mode = EncryptionMetadata._ENCRYPTION_MODE
[ "Create new metadata entries for encryption (upload)\n :param EncryptionMetadata self: this\n :param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:\n rsa public key\n " ]
Please provide a description of the function:def convert_from_json(self, md, entityname, rsaprivatekey): # type: (EncryptionMetadata, dict, str, # cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey) # -> None # populate from encryption data ed = json.loads(md[EncryptionMetadata._METADATA_KEY_NAME]) try: self.blobxfer_extensions = EncryptionBlobxferExtensions( pre_encrypted_content_md5=ed[ EncryptionMetadata._JSON_KEY_BLOBXFER_EXTENSIONS][ EncryptionMetadata._JSON_KEY_PREENCRYPTED_MD5], ) except KeyError: pass self.content_encryption_iv = base64.b64decode( ed[EncryptionMetadata._JSON_KEY_CONTENT_IV]) self.encryption_agent = EncryptionAgent( encryption_algorithm=ed[ EncryptionMetadata._JSON_KEY_ENCRYPTION_AGENT][ EncryptionMetadata._JSON_KEY_ENCRYPTION_ALGORITHM], protocol=ed[ EncryptionMetadata._JSON_KEY_ENCRYPTION_AGENT][ EncryptionMetadata._JSON_KEY_PROTOCOL], ) if (self.encryption_agent.encryption_algorithm != EncryptionMetadata._ENCRYPTION_ALGORITHM): raise RuntimeError('{}: unknown block cipher: {}'.format( entityname, self.encryption_agent.encryption_algorithm)) if (self.encryption_agent.protocol != EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION): raise RuntimeError('{}: unknown encryption protocol: {}'.format( entityname, self.encryption_agent.protocol)) self.encryption_authentication = EncryptionAuthentication( algorithm=ed[ EncryptionMetadata._JSON_KEY_INTEGRITY_AUTH][ EncryptionMetadata._JSON_KEY_ALGORITHM], message_authentication_code=ed[ EncryptionMetadata._JSON_KEY_INTEGRITY_AUTH][ EncryptionMetadata._JSON_KEY_MAC], ) if (self.encryption_authentication.algorithm != EncryptionMetadata._AUTH_ALGORITHM): raise RuntimeError( '{}: unknown integrity/auth method: {}'.format( entityname, self.encryption_authentication.algorithm)) self.encryption_mode = ed[ EncryptionMetadata._JSON_KEY_ENCRYPTION_MODE] if self.encryption_mode != EncryptionMetadata._ENCRYPTION_MODE: raise RuntimeError( '{}: unknown encryption mode: {}'.format( entityname, self.encryption_mode)) try: _eak = ed[EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_ENCRYPTED_AUTHKEY] except KeyError: _eak = None self.wrapped_content_key = EncryptionWrappedContentKey( algorithm=ed[ EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_ALGORITHM], encrypted_authentication_key=_eak, encrypted_key=ed[ EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_ENCRYPTED_KEY], key_id=ed[ EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY][ EncryptionMetadata._JSON_KEY_KEYID], ) if (self.wrapped_content_key.algorithm != EncryptionMetadata._ENCRYPTED_KEY_SCHEME): raise RuntimeError('{}: unknown key encryption scheme: {}'.format( entityname, self.wrapped_content_key.algorithm)) # if RSA key is a public key, stop here as keys cannot be decrypted if rsaprivatekey is None: return # decrypt symmetric key self._symkey = blobxfer.operations.crypto.\ rsa_decrypt_base64_encoded_key( rsaprivatekey, self.wrapped_content_key.encrypted_key) # decrypt signing key, if it exists if blobxfer.util.is_not_empty( self.wrapped_content_key.encrypted_authentication_key): self._signkey = blobxfer.operations.crypto.\ rsa_decrypt_base64_encoded_key( rsaprivatekey, self.wrapped_content_key.encrypted_authentication_key) else: self._signkey = None # populate from encryption data authentication try: eda = json.loads(md[EncryptionMetadata._METADATA_KEY_AUTH_NAME]) except KeyError: pass else: self.encryption_metadata_authentication = \ EncryptionMetadataAuthentication( algorithm=eda[ EncryptionMetadata._JSON_KEY_AUTH_METAAUTH][ EncryptionMetadata._JSON_KEY_ALGORITHM], encoding=eda[ EncryptionMetadata._JSON_KEY_AUTH_METAAUTH][ EncryptionMetadata._JSON_KEY_AUTH_ENCODING], message_authentication_code=eda[ EncryptionMetadata._JSON_KEY_AUTH_METAAUTH][ EncryptionMetadata._JSON_KEY_MAC], ) if (self.encryption_metadata_authentication.algorithm != EncryptionMetadata._AUTH_ALGORITHM): raise RuntimeError( '{}: unknown integrity/auth method: {}'.format( entityname, self.encryption_metadata_authentication.algorithm)) # verify hmac authhmac = base64.b64decode( self.encryption_metadata_authentication. message_authentication_code) bmeta = md[EncryptionMetadata._METADATA_KEY_NAME].encode( self.encryption_metadata_authentication.encoding) hmacsha256 = hmac.new(self._signkey, digestmod=hashlib.sha256) hmacsha256.update(bmeta) if hmacsha256.digest() != authhmac: raise RuntimeError( '{}: encryption metadata authentication failed'.format( entityname))
[ "Read metadata json into objects\n :param EncryptionMetadata self: this\n :param dict md: metadata dictionary\n :param str entityname: entity name\n :param rsaprivatekey: RSA private key\n :type rsaprivatekey:\n cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey\n " ]
Please provide a description of the function:def convert_to_json_with_mac(self, md5digest, hmacdigest): # type: (EncryptionMetadata, str, str) -> dict # encrypt keys enc_content_key = blobxfer.operations.crypto.\ rsa_encrypt_key_base64_encoded( None, self._rsa_public_key, self.symmetric_key) enc_sign_key = blobxfer.operations.crypto.\ rsa_encrypt_key_base64_encoded( None, self._rsa_public_key, self.signing_key) # generate json encjson = { EncryptionMetadata._JSON_KEY_ENCRYPTION_MODE: EncryptionMetadata._ENCRYPTION_MODE, EncryptionMetadata._JSON_KEY_CONTENT_IV: blobxfer.util.base64_encode_as_string(self.content_encryption_iv), EncryptionMetadata._JSON_KEY_WRAPPEDCONTENTKEY: { EncryptionMetadata._JSON_KEY_KEYID: 'private:pem', EncryptionMetadata._JSON_KEY_ENCRYPTED_KEY: enc_content_key, EncryptionMetadata._JSON_KEY_ENCRYPTED_AUTHKEY: enc_sign_key, EncryptionMetadata._JSON_KEY_ALGORITHM: EncryptionMetadata._ENCRYPTED_KEY_SCHEME, }, EncryptionMetadata._JSON_KEY_ENCRYPTION_AGENT: { EncryptionMetadata._JSON_KEY_PROTOCOL: EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION, EncryptionMetadata._JSON_KEY_ENCRYPTION_ALGORITHM: EncryptionMetadata._ENCRYPTION_ALGORITHM, }, EncryptionMetadata._JSON_KEY_INTEGRITY_AUTH: { EncryptionMetadata._JSON_KEY_ALGORITHM: EncryptionMetadata._AUTH_ALGORITHM, }, EncryptionMetadata._JSON_KEY_KEY_WRAPPING_METADATA: {}, } if md5digest is not None: encjson[EncryptionMetadata._JSON_KEY_BLOBXFER_EXTENSIONS] = { EncryptionMetadata._JSON_KEY_PREENCRYPTED_MD5: md5digest } if hmacdigest is not None: encjson[EncryptionMetadata._JSON_KEY_INTEGRITY_AUTH][ EncryptionMetadata._JSON_KEY_MAC] = hmacdigest bencjson = json.dumps( encjson, sort_keys=True, ensure_ascii=False).encode( EncryptionMetadata._AUTH_ENCODING_TYPE) encjson = { EncryptionMetadata._METADATA_KEY_NAME: json.dumps(encjson, sort_keys=True) } # compute MAC over encjson hmacsha256 = hmac.new(self._signkey, digestmod=hashlib.sha256) hmacsha256.update(bencjson) authjson = { EncryptionMetadata._JSON_KEY_AUTH_METAAUTH: { EncryptionMetadata._JSON_KEY_ALGORITHM: EncryptionMetadata._AUTH_ALGORITHM, EncryptionMetadata._JSON_KEY_AUTH_ENCODING: EncryptionMetadata._AUTH_ENCODING_TYPE, EncryptionMetadata._JSON_KEY_MAC: blobxfer.util.base64_encode_as_string(hmacsha256.digest()), } } encjson[EncryptionMetadata._METADATA_KEY_AUTH_NAME] = json.dumps( authjson, sort_keys=True) return encjson
[ "Constructs metadata for encryption\n :param EncryptionMetadata self: this\n :param str md5digest: md5 digest\n :param str hmacdigest: hmac-sha256 digest (data)\n :rtype: dict\n :return: encryption metadata\n " ]
Please provide a description of the function:def initialize_hmac(self): # type: (EncryptionMetadata) -> hmac.HMAC if self._signkey is not None: return hmac.new(self._signkey, digestmod=hashlib.sha256) else: return None
[ "Initialize an hmac from a signing key if it exists\n :param EncryptionMetadata self: this\n :rtype: hmac.HMAC or None\n :return: hmac\n " ]
Please provide a description of the function:def remote_is_file(self): # type: (Descriptor) -> bool return self.dst_entity.mode == blobxfer.models.azure.StorageModes.File
[ "Remote destination is an Azure File\n :param Descriptor self: this\n :rtype: bool\n :return: remote is an Azure File\n " ]
Please provide a description of the function:def remote_is_page_blob(self): # type: (Descriptor) -> bool return self.dst_entity.mode == blobxfer.models.azure.StorageModes.Page
[ "Remote destination is an Azure Page Blob\n :param Descriptor self: this\n :rtype: bool\n :return: remote is an Azure Page Blob\n " ]
Please provide a description of the function:def remote_is_block_blob(self): # type: (Descriptor) -> bool return self.dst_entity.mode == blobxfer.models.azure.StorageModes.Block
[ "Remote destination is an Azure Block Blob\n :param Descriptor self: this\n :rtype: bool\n :return: remote is an Azure Block Blob\n " ]
Please provide a description of the function:def complete_offset_upload(self, chunk_num): # type: (Descriptor, int) -> None with self._meta_lock: self._outstanding_ops -= 1 # save resume state if self.is_resumable: # only set resumable completed if all replicas for this # chunk are complete if blobxfer.util.is_not_empty(self._dst_ase.replica_targets): if chunk_num not in self._replica_counters: # start counter at -1 since we need 1 "extra" for the # primary in addition to the replica targets self._replica_counters[chunk_num] = -1 self._replica_counters[chunk_num] += 1 if (self._replica_counters[chunk_num] != len(self._dst_ase.replica_targets)): return else: self._replica_counters.pop(chunk_num) self._completed_chunks.set(True, chunk_num) completed = self._outstanding_ops == 0 self._resume_mgr.add_or_update_record( self._dst_ase, self._src_block_list, self._offset, self._chunk_size, self._total_chunks, self._completed_chunks.int, completed, )
[ "Complete the upload for the offset\n :param Descriptor self: this\n :param int chunk_num: chunk num completed\n " ]
Please provide a description of the function:def _compute_chunk_size(self): # type: (Descriptor) -> int if self._src_block_list is not None: blen = len(self._src_block_list) if blen == 0: # this is a one-shot block blob return self._src_ase.size elif blen == 1: return self._src_block_list[0].size else: return -1 else: return _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES
[ "Compute chunk size given block list\n :param Descriptor self: this\n :rtype: int\n :return: chunk size bytes\n " ]
Please provide a description of the function:def _compute_total_chunks(self, chunk_size): # type: (Descriptor, int) -> int try: if self._src_block_list is not None: blen = len(self._src_block_list) if blen > 0: return blen else: return 1 else: return int(math.ceil(self._src_ase.size / chunk_size)) except ZeroDivisionError: return 1
[ "Compute total number of chunks for entity\n :param Descriptor self: this\n :param int chunk_size: chunk size\n :rtype: int\n :return: num chunks\n " ]
Please provide a description of the function:def _resume(self): # type: (Descriptor) -> int if self._resume_mgr is None or self._offset > 0: return None # check if path exists in resume db rr = self._resume_mgr.get_record(self._dst_ase) if rr is None: logger.debug('no resume record for {}'.format(self._dst_ase.path)) return None # ensure lengths are the same if rr.length != self._src_ase.size: logger.warning('resume length mismatch {} -> {}'.format( rr.length, self._src_ase.size)) return None # compute replica factor if blobxfer.util.is_not_empty(self._dst_ase.replica_targets): replica_factor = 1 + len(self._dst_ase.replica_targets) else: replica_factor = 1 # set offsets if completed if rr.completed: with self._meta_lock: logger.debug('{} upload already completed'.format( self._dst_ase.path)) self._offset = rr.offset self._src_block_list = rr.src_block_list self._chunk_num = rr.total_chunks self._chunk_size = rr.chunk_size self._total_chunks = rr.total_chunks self._completed_chunks.int = rr.completed_chunks self._outstanding_ops = 0 return self._src_ase.size * replica_factor # re-hash from 0 to offset if needed _cc = bitstring.BitArray(length=rr.total_chunks) _cc.int = rr.completed_chunks curr_chunk = _cc.find('0b0')[0] del _cc # set values from resume with self._meta_lock: self._offset = rr.offset self._src_block_list = rr.src_block_list self._chunk_num = curr_chunk self._chunk_size = rr.chunk_size self._total_chunks = rr.total_chunks self._completed_chunks = bitstring.BitArray(length=rr.total_chunks) self._completed_chunks.set(True, range(0, curr_chunk + 1)) self._outstanding_ops = ( (rr.total_chunks - curr_chunk) * replica_factor ) logger.debug( ('resuming file {} from byte={} chunk={} chunk_size={} ' 'total_chunks={} outstanding_ops={}').format( self._src_ase.path, self._offset, self._chunk_num, self._chunk_size, self._total_chunks, self._outstanding_ops)) return rr.offset * replica_factor
[ "Resume a download, if possible\n :param Descriptor self: this\n :rtype: int or None\n :return: verified download offset\n " ]
Please provide a description of the function:def next_offsets(self): # type: (Descriptor) -> Offsets resume_bytes = self._resume() with self._meta_lock: if self._chunk_num >= self._total_chunks: return None, resume_bytes if self._chunk_size == -1 and self._src_block_list is not None: num_bytes = self._src_block_list[self._chunk_num].size else: if self._offset + self._chunk_size > self._src_ase.size: num_bytes = self._src_ase.size - self._offset else: num_bytes = self._chunk_size chunk_num = self._chunk_num range_start = self._offset range_end = self._offset + num_bytes - 1 self._offset += num_bytes self._chunk_num += 1 return Offsets( chunk_num=chunk_num, num_bytes=num_bytes, range_start=range_start, range_end=range_end, ), resume_bytes
[ "Retrieve the next offsets\n :param Descriptor self: this\n :rtype: Offsets\n :return: download offsets\n " ]
Please provide a description of the function:def can_rename(self): # type: (LocalSourcePaths) -> bool return len(self._paths) == 1 and ( self._paths[0].is_file() or blobxfer.models.upload.LocalSourcePath.is_stdin( str(self._paths[0])) )
[ "Check if source can be renamed\n :param LocalSourcePath self: this\n :rtype: bool\n :return: if rename possible\n " ]
Please provide a description of the function:def files(self, dry_run): # type: (LocalSourcePaths, bool) -> LocalPath for _path in self._paths: _ppath = os.path.expandvars(os.path.expanduser(str(_path))) # check of path is stdin if blobxfer.models.upload.LocalSourcePath.is_stdin(_ppath): yield LocalPath( parent_path=pathlib.Path(), relative_path=pathlib.Path('stdin'), use_stdin=True, ) continue # resolve path _expath = pathlib.Path(_ppath).resolve() # check if path is a single file tmp = pathlib.Path(_ppath) if tmp.is_file(): if self._inclusion_check(tmp.name): yield LocalPath( parent_path=tmp.parent, relative_path=pathlib.Path(tmp.name), use_stdin=False, ) elif dry_run: logger.info( '[DRY RUN] skipping due to filters: {}'.format(tmp)) else: del tmp for entry in blobxfer.util.scantree(_ppath): _rpath = pathlib.Path(entry.path).relative_to(_ppath) if not self._inclusion_check(_rpath): if dry_run: logger.info( '[DRY RUN] skipping due to filters: {}'.format( _rpath)) continue yield LocalPath( parent_path=_expath, relative_path=_rpath, use_stdin=False, )
[ "Generator for files in paths\n :param LocalSourcePath self: this\n :param bool dry_run: dry run\n :rtype: LocalPath\n :return: LocalPath\n " ]
Please provide a description of the function:def remote_is_append_blob(self): # type: (Descriptor) -> bool return self.entity.mode == blobxfer.models.azure.StorageModes.Append
[ "Remote destination is an Azure Append Blob\n :param Descriptor self: this\n :rtype: bool\n :return: remote is an Azure Append Blob\n " ]
Please provide a description of the function:def complete_offset_upload(self, chunk_num): # type: (Descriptor, int) -> None with self._meta_lock: self._outstanding_ops -= 1 # save resume state if self.is_resumable: # only set resumable completed if all replicas for this # chunk are complete if blobxfer.util.is_not_empty(self._ase.replica_targets): if chunk_num not in self._replica_counters: # start counter at -1 since we need 1 "extra" for the # primary in addition to the replica targets self._replica_counters[chunk_num] = -1 self._replica_counters[chunk_num] += 1 if (self._replica_counters[chunk_num] != len(self._ase.replica_targets)): return else: self._replica_counters.pop(chunk_num) self._completed_chunks.set(True, chunk_num) completed = self._outstanding_ops == 0 if not completed and self.must_compute_md5: last_consecutive = ( self._completed_chunks.find('0b0')[0] - 1 ) md5digest = self._md5_cache[last_consecutive] else: md5digest = None self._resume_mgr.add_or_update_record( self.local_path.absolute_path, self._ase, self._chunk_size, self._total_chunks, self._completed_chunks.int, completed, md5digest, ) # prune md5 cache if self.must_compute_md5: if completed: self._md5_cache.clear() elif (len(self._md5_cache) > _MD5_CACHE_RESUME_ENTRIES_GC_THRESHOLD): mkeys = sorted(list(self._md5_cache.keys())) for key in mkeys: if key >= last_consecutive: break self._md5_cache.pop(key)
[ "Complete the upload for the offset\n :param Descriptor self: this\n :param int chunk_num: chunk num completed\n " ]
Please provide a description of the function:def _initialize_encryption(self, options): # type: (Descriptor, blobxfer.models.options.Upload) -> None if (options.rsa_public_key is not None and self.local_path.size > 0 and (self._ase.mode == blobxfer.models.azure.StorageModes.Block or self._ase.mode == blobxfer.models.azure.StorageModes.File)): em = blobxfer.models.crypto.EncryptionMetadata() em.create_new_metadata(options.rsa_public_key) self.current_iv = em.content_encryption_iv self._ase.encryption_metadata = em
[ "Download is resume capable\n :param Descriptor self: this\n :param blobxfer.models.options.Upload options: upload options\n " ]
Please provide a description of the function:def _compute_remote_size(self, options): # type: (Descriptor, blobxfer.models.options.Upload) -> None size = self.local_path.size if (self._ase.mode == blobxfer.models.azure.StorageModes.Page and self.local_path.use_stdin): if options.stdin_as_page_blob_size == 0: allocatesize = _MAX_PAGE_BLOB_SIZE self._needs_resize = True else: allocatesize = options.stdin_as_page_blob_size elif size > 0: if self._ase.is_encrypted: # cipher_len_without_iv = (clear_len / aes_bs + 1) * aes_bs allocatesize = (size // self._AES_BLOCKSIZE + 1) * \ self._AES_BLOCKSIZE else: allocatesize = size else: allocatesize = 0 self._ase.size = allocatesize if blobxfer.util.is_not_empty(self._ase.replica_targets): for rt in self._ase.replica_targets: rt.size = allocatesize if self._verbose: logger.debug('remote size for {} is {} bytes'.format( self._ase.path, self._ase.size))
[ "Compute total remote file size\n :param Descriptor self: this\n :param blobxfer.models.options.Upload options: upload options\n :rtype: int\n :return: remote file size\n " ]
Please provide a description of the function:def _adjust_chunk_size(self, options): # type: (Descriptor, blobxfer.models.options.Upload) -> None chunk_size = options.chunk_size_bytes # auto-select chunk size if chunk_size == 0: if self._ase.mode != blobxfer.models.azure.StorageModes.Block: chunk_size = _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES else: if self._ase.size == 0: chunk_size = _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES else: chunk_size = _DEFAULT_AUTO_CHUNKSIZE_BYTES while chunk_size < _MAX_BLOCK_BLOB_CHUNKSIZE_BYTES: chunks = int(math.ceil(self._ase.size / chunk_size)) if chunks <= _MAX_NUM_CHUNKS: break chunk_size = chunk_size << 1 if self._verbose: logger.debug( 'auto-selected chunk size of {} for {}'.format( chunk_size, self.local_path.absolute_path)) if self.local_path.use_stdin: self._chunk_size = max( (chunk_size, _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES) ) else: self._chunk_size = min((chunk_size, self._ase.size)) # ensure chunk sizes are compatible with mode if self._ase.mode == blobxfer.models.azure.StorageModes.Append: if self._chunk_size > _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES: self._chunk_size = _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES if self._verbose: logger.debug( ('adjusting chunk size to {} for append blob ' 'from {}').format( self._chunk_size, self.local_path.absolute_path)) elif self._ase.mode == blobxfer.models.azure.StorageModes.Block: if (not self.local_path.use_stdin and self._ase.size <= options.one_shot_bytes): self._chunk_size = min( (self._ase.size, options.one_shot_bytes) ) else: if self._chunk_size > _MAX_BLOCK_BLOB_CHUNKSIZE_BYTES: self._chunk_size = _MAX_BLOCK_BLOB_CHUNKSIZE_BYTES if self._verbose: logger.debug( ('adjusting chunk size to {} for block blob ' 'from {}').format( self._chunk_size, self.local_path.absolute_path)) elif self._ase.mode == blobxfer.models.azure.StorageModes.File: if self._chunk_size > _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES: self._chunk_size = _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES if self._verbose: logger.debug( 'adjusting chunk size to {} for file from {}'.format( self._chunk_size, self.local_path.absolute_path)) elif self._ase.mode == blobxfer.models.azure.StorageModes.Page: if self._ase.size > _MAX_PAGE_BLOB_SIZE: raise RuntimeError( '{} size {} exceeds maximum page blob size of {}'.format( self.local_path.absolute_path, self._ase.size, _MAX_PAGE_BLOB_SIZE)) if self._chunk_size > _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES: self._chunk_size = _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES if self._verbose: logger.debug( ('adjusting chunk size to {} for page blob ' 'from {}').format( self._chunk_size, self.local_path.absolute_path))
[ "Adjust chunk size for entity mode\n :param Descriptor self: this\n :param blobxfer.models.options.Upload options: upload options\n " ]
Please provide a description of the function:def _compute_total_chunks(self, chunk_size): # type: (Descriptor, int) -> int try: chunks = int(math.ceil(self._ase.size / chunk_size)) except ZeroDivisionError: chunks = 1 # for stdin, override and use 1 chunk to start, this will change # dynamically as data as read if self.local_path.use_stdin: chunks = 1 if (self._ase.mode != blobxfer.models.azure.StorageModes.Page and chunks > 50000): max_vector = False if self._ase.mode == blobxfer.models.azure.StorageModes.Block: if self._chunk_size == _MAX_BLOCK_BLOB_CHUNKSIZE_BYTES: max_vector = True elif self._chunk_size == _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES: max_vector = True if max_vector: raise RuntimeError( ('number of chunks {} exceeds maximum permissible ' 'limit and chunk size is set at the maximum value ' 'for {}. Please try using stripe mode ' 'vectorization to overcome this limitation').format( chunks, self.local_path.absolute_path)) else: raise RuntimeError( ('number of chunks {} exceeds maximum permissible ' 'limit for {}, please adjust chunk size higher or ' 'set to -1 for automatic chunk size selection').format( chunks, self.local_path.absolute_path)) return chunks
[ "Compute total number of chunks for entity\n :param Descriptor self: this\n :param int chunk_size: chunk size\n :rtype: int\n :return: num chunks\n " ]
Please provide a description of the function:def _initialize_integrity_checkers(self, options): # type: (Descriptor, blobxfer.models.options.Upload) -> None if self._ase.is_encrypted: # ensure symmetric key exists if blobxfer.util.is_none_or_empty( self._ase.encryption_metadata.symmetric_key): raise RuntimeError( ('symmetric key is invalid: provide RSA private key ' 'or metadata corrupt for {}').format( self.local_path.absolute_path)) self.hmac = self._ase.encryption_metadata.initialize_hmac() # both hmac and md5 can be enabled if (options.store_file_properties.md5 and not self.remote_is_append_blob): self.md5 = blobxfer.util.new_md5_hasher()
[ "Initialize file integrity checkers\n :param Descriptor self: this\n :param blobxfer.models.options.Upload options: upload options\n " ]
Please provide a description of the function:def _resume(self): # type: (Descriptor) -> int if self._resume_mgr is None or self._offset > 0: return None # check if path exists in resume db rr = self._resume_mgr.get_record(self._ase) if rr is None: logger.debug('no resume record for {}'.format(self._ase.path)) return None # ensure lengths are the same if rr.length != self._ase.size: logger.warning('resume length mismatch {} -> {}'.format( rr.length, self._ase.size)) return None # compute replica factor if blobxfer.util.is_not_empty(self._ase.replica_targets): replica_factor = 1 + len(self._ase.replica_targets) else: replica_factor = 1 # set offsets if completed if rr.completed: with self._meta_lock: logger.debug('{} upload already completed'.format( self._ase.path)) self._offset = rr.total_chunks * rr.chunk_size self._chunk_num = rr.total_chunks self._chunk_size = rr.chunk_size self._total_chunks = rr.total_chunks self._completed_chunks.int = rr.completed_chunks self._outstanding_ops = 0 return self._ase.size * replica_factor # encrypted files are not resumable due to hmac requirement if self._ase.is_encrypted: logger.debug('cannot resume encrypted entity {}'.format( self._ase.path)) return None # check if path exists if not pathlib.Path(rr.local_path).exists(): logger.warning('resume from local path {} does not exist'.format( rr.local_path)) return None # re-hash from 0 to offset if needed _cc = bitstring.BitArray(length=rr.total_chunks) _cc.int = rr.completed_chunks curr_chunk = _cc.find('0b0')[0] del _cc _fd_offset = 0 _end_offset = min((curr_chunk * rr.chunk_size, rr.length)) if self.md5 is not None and curr_chunk > 0: _blocksize = blobxfer.util.MEGABYTE << 2 logger.debug( 'integrity checking existing file {} offset {} -> {}'.format( self._ase.path, self.local_path.view.fd_start, self.local_path.view.fd_start + _end_offset) ) with self._hasher_lock: with self.local_path.absolute_path.open('rb') as filedesc: filedesc.seek(self.local_path.view.fd_start, 0) while _fd_offset < _end_offset: if (_fd_offset + _blocksize) > _end_offset: _blocksize = _end_offset - _fd_offset _buf = filedesc.read(_blocksize) self.md5.update(_buf) _fd_offset += _blocksize del _blocksize # compare hashes hexdigest = self.md5.hexdigest() if rr.md5hexdigest != hexdigest: logger.warning( 'MD5 mismatch resume={} computed={} for {}'.format( rr.md5hexdigest, hexdigest, self._ase.path)) # reset hasher self.md5 = blobxfer.util.new_md5_hasher() return None # set values from resume with self._meta_lock: self._offset = _end_offset self._chunk_num = curr_chunk self._chunk_size = rr.chunk_size self._total_chunks = rr.total_chunks self._completed_chunks = bitstring.BitArray(length=rr.total_chunks) self._completed_chunks.set(True, range(0, curr_chunk + 1)) self._outstanding_ops = ( (rr.total_chunks - curr_chunk) * replica_factor ) logger.debug( ('resuming file {} from byte={} chunk={} chunk_size={} ' 'total_chunks={} outstanding_ops={}').format( self._ase.path, self._offset, self._chunk_num, self._chunk_size, self._total_chunks, self._outstanding_ops)) return _end_offset * replica_factor
[ "Resume upload\n :param Descriptor self: this\n :rtype: int\n :return: resume bytes\n " ]
Please provide a description of the function:def next_offsets(self): # type: (Descriptor) -> Offsets resume_bytes = self._resume() with self._meta_lock: if self._chunk_num >= self._total_chunks: return None, resume_bytes if self._offset + self._chunk_size > self._ase.size: num_bytes = self._ase.size - self._offset else: num_bytes = self._chunk_size chunk_num = self._chunk_num range_start = self._offset range_end = self._offset + num_bytes - 1 self._offset += num_bytes self._chunk_num += 1 if self._ase.is_encrypted and self._offset >= self._ase.size: pad = True else: pad = False return Offsets( chunk_num=chunk_num, num_bytes=num_bytes, range_start=range_start, range_end=range_end, pad=pad, ), resume_bytes
[ "Retrieve the next offsets\n :param Descriptor self: this\n :rtype: Offsets\n :return: upload offsets\n " ]