Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def base64_encode_as_string(obj): # noqa
# type: (any) -> str
if on_python2():
return base64.b64encode(obj)
else:
return str(base64.b64encode(obj), 'ascii') | [
"Encode object to base64\n :param any obj: object to encode\n :rtype: str\n :return: base64 encoded string\n "
]
|
Please provide a description of the function:def page_align_content_length(length):
# type: (int) -> int
mod = length % _PAGEBLOB_BOUNDARY
if mod != 0:
return length + (_PAGEBLOB_BOUNDARY - mod)
return length | [
"Compute page boundary alignment\n :param int length: content length\n :rtype: int\n :return: aligned byte boundary\n "
]
|
Please provide a description of the function:def normalize_azure_path(path):
# type: (str) -> str
if is_none_or_empty(path):
raise ValueError('provided path is invalid')
_path = path.strip('/').strip('\\')
return '/'.join(re.split('/|\\\\', _path)) | [
"Normalize remote path (strip slashes and use forward slashes)\n :param str path: path to normalize\n :rtype: str\n :return: normalized path\n "
]
|
Please provide a description of the function:def explode_azure_path(path):
# type: (str) -> Tuple[str, str]
rpath = normalize_azure_path(path).split('/')
container = str(rpath[0])
if len(rpath) > 1:
rpath = '/'.join(rpath[1:])
else:
rpath = ''
return container, rpath | [
"Explodes an azure path into a container or fileshare and the\n remaining virtual path\n :param str path: path to explode\n :rtype: tuple\n :return: container, vpath\n "
]
|
Please provide a description of the function:def blob_is_snapshot(url):
# type: (str) -> bool
if '?snapshot=' in url:
try:
dateutil.parser.parse(url.split('?snapshot=')[-1])
return True
except (ValueError, OverflowError):
pass
return False | [
"Checks if the blob is a snapshot blob\n :param url str: blob url\n :rtype: bool\n :return: if blob is a snapshot blob\n "
]
|
Please provide a description of the function:def parse_blob_snapshot_parameter(url):
# type: (str) -> str
if blob_is_snapshot(url):
tmp = url.split('?snapshot=')
if len(tmp) == 2:
return tmp[0], tmp[1]
return None | [
"Retrieves the blob snapshot parameter from a url\n :param url str: blob url\n :rtype: str\n :return: snapshot parameter\n "
]
|
Please provide a description of the function:def parse_fileshare_or_file_snapshot_parameter(url):
# type: (str) -> Tuple[str, str]
if is_not_empty(url):
if '?sharesnapshot=' in url:
try:
tmp = url.split('?sharesnapshot=')
if len(tmp) == 2:
dateutil.parser.parse(tmp[1])
return tmp[0], tmp[1]
except (ValueError, OverflowError):
pass
elif '?snapshot=' in url:
try:
tmp = url.split('?snapshot=')
if len(tmp) == 2:
dateutil.parser.parse(tmp[1])
return tmp[0], tmp[1]
except (ValueError, OverflowError):
pass
return url, None | [
"Checks if the fileshare or file is a snapshot\n :param url str: file url\n :rtype: tuple\n :return: (url, snapshot)\n "
]
|
Please provide a description of the function:def explode_azure_storage_url(url):
# type: (str) -> Tuple[str, str, str, str, str]
tmp = url.split('/')
host = tmp[2].split('.')
sa = host[0]
mode = host[1].lower()
ep = '.'.join(host[2:])
tmp = '/'.join(tmp[3:]).split('?')
rpath = tmp[0]
if len(tmp) > 1:
sas = tmp[1]
else:
sas = None
return sa, mode, ep, rpath, sas | [
"Explode Azure Storage URL into parts\n :param url str: storage url\n :rtype: tuple\n :return: (sa, mode, ep, rpath, sas)\n "
]
|
Please provide a description of the function:def ensure_path_exists(self):
# type: (LocalDestinationPath) -> None
if self._is_dir is None:
raise RuntimeError('is_dir not set')
if self._is_dir:
self._path.mkdir(mode=0o750, parents=True, exist_ok=True)
else:
if self._path.exists() and self._path.is_dir():
raise RuntimeError(
('destination path {} already exists and is a '
'directory').format(self._path))
else:
# ensure parent path exists and is created
self._path.parent.mkdir(
mode=0o750, parents=True, exist_ok=True) | [
"Ensure path exists\n :param LocalDestinationPath self: this\n "
]
|
Please provide a description of the function:def _compute_total_chunks(self, chunk_size):
# type: (Descriptor, int) -> int
try:
return int(math.ceil(self._ase.size / chunk_size))
except ZeroDivisionError:
return 0 | [
"Compute total number of chunks for entity\n :param Descriptor self: this\n :param int chunk_size: chunk size\n :rtype: int\n :return: num chunks\n "
]
|
Please provide a description of the function:def _initialize_integrity_checkers(self, options):
# type: (Descriptor, blobxfer.models.options.Download) -> None
if self._ase.is_encrypted:
# ensure symmetric key exists
if blobxfer.util.is_none_or_empty(
self._ase.encryption_metadata.symmetric_key):
raise RuntimeError(
'symmetric key is invalid: provide RSA private key '
'or metadata corrupt')
self.hmac = self._ase.encryption_metadata.initialize_hmac()
if (self.hmac is None and options.check_file_md5 and
blobxfer.util.is_not_empty(self._ase.md5)):
self.md5 = blobxfer.util.new_md5_hasher() | [
"Initialize file integrity checkers\n :param Descriptor self: this\n :param blobxfer.models.options.Download options: download options\n "
]
|
Please provide a description of the function:def compute_allocated_size(size, is_encrypted):
# type: (int, bool) -> int
# compute size
if size > 0:
if is_encrypted:
# cipher_len_without_iv = (clear_len / aes_bs + 1) * aes_bs
allocatesize = (
size //
blobxfer.models.download.Descriptor._AES_BLOCKSIZE - 1
) * blobxfer.models.download.Descriptor._AES_BLOCKSIZE
if allocatesize < 0:
raise RuntimeError('allocatesize is negative')
else:
allocatesize = size
else:
allocatesize = 0
return allocatesize | [
"Compute allocated size on disk\n :param int size: size (content length)\n :param bool is_ecrypted: if entity is encrypted\n :rtype: int\n :return: required size on disk\n "
]
|
Please provide a description of the function:def generate_view(ase):
# type: (blobxfer.models.azure.StorageEntity) ->
# Tuple[LocalPathView, int]
slicesize = blobxfer.models.download.Descriptor.compute_allocated_size(
ase.size, ase.is_encrypted)
if ase.vectored_io is None:
view = LocalPathView(
fd_start=0,
fd_end=slicesize,
)
total_size = slicesize
else:
view = LocalPathView(
fd_start=ase.vectored_io.offset_start,
fd_end=ase.vectored_io.offset_start + slicesize,
)
total_size = ase.vectored_io.total_size
return view, total_size | [
"Generate local path view and total size required\n :param blobxfer.models.azure.StorageEntity ase: Storage Entity\n :rtype: tuple\n :return: (local path view, allocation size)\n "
]
|
Please provide a description of the function:def convert_vectored_io_slice_to_final_path_name(local_path, ase):
# type: (pathlib.Path,
# blobxfer.models.azure.StorageEntity) -> pathlib.Path
name = blobxfer.models.metadata.\
remove_vectored_io_slice_suffix_from_name(
local_path.name, ase.vectored_io.slice_id)
_tmp = list(local_path.parts[:-1])
_tmp.append(name)
return pathlib.Path(*_tmp) | [
"Convert vectored io slice to final path name\n :param pathlib.Path local_path: local path\n :param blobxfer.models.azure.StorageEntity ase: Storage Entity\n :rtype: pathlib.Path\n :return: converted final path\n "
]
|
Please provide a description of the function:def _set_final_path_view(self):
# type: (Descriptor) -> int
# set final path if vectored io stripe
if self._ase.vectored_io is not None:
self.final_path = blobxfer.models.download.Descriptor.\
convert_vectored_io_slice_to_final_path_name(
self.final_path, self._ase)
# generate view
view, total_size = blobxfer.models.download.Descriptor.generate_view(
self._ase)
self.view = view
return total_size | [
"Set final path view and return required space on disk\n :param Descriptor self: this\n :rtype: int\n :return: required size on disk\n "
]
|
Please provide a description of the function:def _allocate_disk_space(self):
# type: (Descriptor) -> None
with self._meta_lock:
if self._allocated or self._offset != 0:
return
# set local path view
allocatesize = self._set_final_path_view()
# check if path already exists and is of sufficient size
if (not self.final_path.exists() or
self.final_path.stat().st_size != allocatesize):
# create parent path
self.final_path.parent.mkdir(
mode=0o750, parents=True, exist_ok=True)
# allocate file
with self.final_path.open('wb') as fd:
if allocatesize > 0:
try:
os.posix_fallocate(fd.fileno(), 0, allocatesize)
except AttributeError:
fd.seek(allocatesize - 1)
fd.write(b'\0')
self._allocated = True | [
"Perform file allocation (possibly sparse)\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def _resume(self):
# type: (Descriptor) -> int
if self._resume_mgr is None or self._offset > 0 or self._finalized:
return None
# check if path exists in resume db
rr = self._resume_mgr.get_record(self._ase)
if rr is None:
logger.debug('no resume record for {}'.format(self.final_path))
return None
# ensure lengths are the same
if rr.length != self._ase.size:
logger.warning('resume length mismatch {} -> {}'.format(
rr.length, self._ase.size))
return None
# calculate current chunk and offset
if rr.next_integrity_chunk == 0:
logger.debug('nothing to resume for {}'.format(self.final_path))
return None
curr_chunk = rr.next_integrity_chunk
# set offsets if completed and the final path exists
if rr.completed and self.final_path.exists():
with self._meta_lock:
logger.debug('{} download already completed'.format(
self.final_path))
self._offset = self._ase.size
self._chunk_num = curr_chunk
self._chunk_size = rr.chunk_size
self._total_chunks = self._compute_total_chunks(rr.chunk_size)
self._next_integrity_chunk = rr.next_integrity_chunk
self._outstanding_ops = 0
self._finalized = True
return self._ase.size
# encrypted files are not resumable due to hmac requirement
if self._ase.is_encrypted:
logger.debug('cannot resume encrypted entity {}'.format(
self._ase.path))
return None
self._allocate_disk_space()
# check if final path exists
if not self.final_path.exists(): # noqa
logger.warning('download path {} does not exist'.format(
self.final_path))
return None
if self.hmac is not None:
raise RuntimeError(
'unexpected hmac object for entity {}'.format(self._ase.path))
# re-hash from 0 to offset if needed
_fd_offset = 0
_end_offset = min((curr_chunk * rr.chunk_size, rr.length))
if self.md5 is not None and curr_chunk > 0:
_blocksize = blobxfer.util.MEGABYTE << 2
logger.debug(
'integrity checking existing file {} offset {} -> {}'.format(
self.final_path,
self.view.fd_start,
self.view.fd_start + _end_offset)
)
with self._hasher_lock:
with self.final_path.open('rb') as filedesc:
filedesc.seek(self.view.fd_start, 0)
while _fd_offset < _end_offset:
if (_fd_offset + _blocksize) > _end_offset:
_blocksize = _end_offset - _fd_offset
_buf = filedesc.read(_blocksize)
self.md5.update(_buf)
_fd_offset += _blocksize
del _blocksize
# compare hashes
hexdigest = self.md5.hexdigest()
if rr.md5hexdigest != hexdigest:
logger.warning(
'MD5 mismatch resume={} computed={} for {}'.format(
rr.md5hexdigest, hexdigest, self.final_path))
# reset hasher
self.md5 = blobxfer.util.new_md5_hasher()
return None
# set values from resume
with self._meta_lock:
self._offset = _end_offset
self._chunk_num = curr_chunk
self._chunk_size = rr.chunk_size
self._total_chunks = self._compute_total_chunks(rr.chunk_size)
self._next_integrity_chunk = rr.next_integrity_chunk
self._outstanding_ops = (
self._total_chunks - self._next_integrity_chunk
)
logger.debug(
('resuming file {} from byte={} chunk={} chunk_size={} '
'total_chunks={} next_integrity_chunk={} '
'outstanding_ops={}').format(
self.final_path, self._offset, self._chunk_num,
self._chunk_size, self._total_chunks,
self._next_integrity_chunk, self._outstanding_ops))
return _end_offset | [
"Resume a download, if possible\n :param Descriptor self: this\n :rtype: int or None\n :return: verified download offset\n "
]
|
Please provide a description of the function:def cleanup_all_temporary_files(self):
# type: (Descriptor) -> None
# delete local file
try:
self.final_path.unlink()
except OSError:
pass
# iterate unchecked chunks and delete
for key in self._unchecked_chunks:
ucc = self._unchecked_chunks[key]['ucc']
if ucc.temp:
try:
ucc.file_path.unlink()
except OSError:
pass | [
"Cleanup all temporary files in case of an exception or interrupt.\n This function is not thread-safe.\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def next_offsets(self):
# type: (Descriptor) -> Offsets
resume_bytes = self._resume()
if resume_bytes is None and not self._allocated:
self._allocate_disk_space()
with self._meta_lock:
if self._offset >= self._ase.size:
return None, resume_bytes
if self._offset + self._chunk_size > self._ase.size:
chunk = self._ase.size - self._offset
else:
chunk = self._chunk_size
# on download, num_bytes must be offset by -1 as the x-ms-range
# header expects it that way. x -> y bytes means first bits of the
# (x+1)th byte to the last bits of the (y+1)th byte. for example,
# 0 -> 511 means byte 1 to byte 512
num_bytes = chunk - 1
chunk_num = self._chunk_num
fd_start = self._offset
range_start = self._offset
if self._ase.is_encrypted:
# ensure start is AES block size aligned
range_start = range_start - \
(range_start % self._AES_BLOCKSIZE) - \
self._AES_BLOCKSIZE
if range_start <= 0:
range_start = 0
range_end = self._offset + num_bytes
self._offset += chunk
self._chunk_num += 1
if self._ase.is_encrypted and self._offset >= self._ase.size:
unpad = True
else:
unpad = False
return Offsets(
chunk_num=chunk_num,
fd_start=fd_start,
num_bytes=chunk,
range_start=range_start,
range_end=range_end,
unpad=unpad,
), resume_bytes | [
"Retrieve the next offsets\n :param Descriptor self: this\n :rtype: Offsets\n :return: download offsets\n "
]
|
Please provide a description of the function:def write_unchecked_data(self, offsets, data):
# type: (Descriptor, Offsets, bytes) -> None
self.write_data(offsets, data)
unchecked = UncheckedChunk(
data_len=len(data),
fd_start=self.view.fd_start + offsets.fd_start,
file_path=self.final_path,
temp=False,
)
with self._meta_lock:
self._unchecked_chunks[offsets.chunk_num] = {
'ucc': unchecked,
'decrypted': True,
} | [
"Write unchecked data to disk\n :param Descriptor self: this\n :param Offsets offsets: download offsets\n :param bytes data: data\n "
]
|
Please provide a description of the function:def write_unchecked_hmac_data(self, offsets, data):
# type: (Descriptor, Offsets, bytes) -> None
fname = None
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as fd:
fname = fd.name
fd.write(data)
unchecked = UncheckedChunk(
data_len=len(data),
fd_start=0,
file_path=pathlib.Path(fname),
temp=True,
)
with self._meta_lock:
self._unchecked_chunks[offsets.chunk_num] = {
'ucc': unchecked,
'decrypted': False,
}
return str(unchecked.file_path) | [
"Write unchecked encrypted data to disk\n :param Descriptor self: this\n :param Offsets offsets: download offsets\n :param bytes data: hmac/encrypted data\n "
]
|
Please provide a description of the function:def perform_chunked_integrity_check(self):
# type: (Descriptor) -> None
hasher = self.hmac or self.md5
# iterate from next chunk to be checked
while True:
ucc = None
with self._meta_lock:
chunk_num = self._next_integrity_chunk
# check if the next chunk is ready
if (chunk_num in self._unchecked_chunks and
self._unchecked_chunks[chunk_num]['decrypted']):
ucc = self._unchecked_chunks.pop(chunk_num)['ucc']
else:
break
# hash data and set next integrity chunk
md5hexdigest = None
if hasher is not None:
with ucc.file_path.open('rb') as fd:
if not ucc.temp:
fd.seek(ucc.fd_start, 0)
chunk = fd.read(ucc.data_len)
if ucc.temp:
ucc.file_path.unlink()
with self._hasher_lock:
hasher.update(chunk)
if hasher == self.md5:
md5hexdigest = hasher.hexdigest()
with self._meta_lock:
# update integrity counter and resume db
self._next_integrity_chunk += 1
if self.is_resumable:
self._resume_mgr.add_or_update_record(
self.final_path, self._ase, self._chunk_size,
self._next_integrity_chunk, False, md5hexdigest,
)
# decrement outstanding op counter
self._outstanding_ops -= 1 | [
"Hash data against stored hasher safely\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def _update_resume_for_completed(self):
# type: (Descriptor) -> None
if not self.is_resumable:
return
with self._meta_lock:
self._resume_mgr.add_or_update_record(
self.final_path, self._ase, self._chunk_size,
self._next_integrity_chunk, True, None,
) | [
"Update resume for completion\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def write_data(self, offsets, data):
# type: (Descriptor, Offsets, bytes) -> None
if len(data) > 0:
# offset from internal view
pos = self.view.fd_start + offsets.fd_start
with self.final_path.open('r+b') as fd:
fd.seek(pos, 0)
fd.write(data) | [
"Write data to disk\n :param Descriptor self: this\n :param Offsets offsets: download offsets\n :param bytes data: data\n "
]
|
Please provide a description of the function:def finalize_integrity(self):
# type: (Descriptor) -> None
with self._meta_lock:
if self._finalized:
return
# check final file integrity
check = False
msg = None
if self.hmac is not None:
mac = self._ase.encryption_metadata.encryption_authentication.\
message_authentication_code
digest = blobxfer.util.base64_encode_as_string(self.hmac.digest())
if digest == mac:
check = True
msg = '{}: {}, {} {} <L..R> {}'.format(
self._ase.encryption_metadata.encryption_authentication.
algorithm,
'OK' if check else 'MISMATCH',
self._ase.path,
digest,
mac,
)
elif self.md5 is not None:
digest = blobxfer.util.base64_encode_as_string(self.md5.digest())
if digest == self._ase.md5:
check = True
msg = 'MD5: {}, {} {} <L..R> {}'.format(
'OK' if check else 'MISMATCH',
self._ase.path,
digest,
self._ase.md5,
)
else:
check = True
msg = 'MD5: SKIPPED, {} None <L..R> {}'.format(
self._ase.path,
self._ase.md5
)
# cleanup if download failed
if not check:
self._integrity_failed = True
logger.error(msg)
else:
logger.info(msg) | [
"Finalize integrity check for download\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def _restore_file_attributes(self):
# type: (Descriptor) -> None
if (not self._restore_file_properties.attributes or
self._ase.file_attributes is None):
return
# set file uid/gid and mode
if blobxfer.util.on_windows(): # noqa
# TODO not implemented yet
pass
else:
self.final_path.chmod(int(self._ase.file_attributes.mode, 8))
if os.getuid() == 0: # noqa
os.chown(
str(self.final_path),
self._ase.file_attributes.uid,
self._ase.file_attributes.gid
) | [
"Restore file attributes for file\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def _restore_file_lmt(self):
# type: (Descriptor) -> None
if not self._restore_file_properties.lmt or self._ase.lmt is None:
return
# timestamp() func is not available in py27
ts = time.mktime(self._ase.lmt.timetuple())
os.utime(str(self.final_path), (ts, ts)) | [
"Restore file lmt for file\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def finalize_file(self):
# type: (Descriptor) -> None
# delete bad file if integrity failed
if self._integrity_failed:
self.final_path.unlink()
else:
self._restore_file_attributes()
self._restore_file_lmt()
# update resume file
self._update_resume_for_completed()
with self._meta_lock:
self._finalized = True | [
"Finalize file for download\n :param Descriptor self: this\n "
]
|
Please provide a description of the function:def termination_check(self):
# type: (Uploader) -> bool
with self._upload_lock:
with self._transfer_lock:
return (self._upload_terminate or
len(self._exceptions) > 0 or
(self._all_files_processed and
len(self._upload_set) == 0 and
len(self._transfer_set) == 0)) | [
"Check if terminated\n :param Uploader self: this\n :rtype: bool\n :return: if terminated\n "
]
|
Please provide a description of the function:def termination_check_md5(self):
# type: (Uploader) -> bool
with self._md5_meta_lock:
with self._upload_lock:
return (self._upload_terminate or
(self._all_files_processed and
len(self._md5_map) == 0 and
len(self._upload_set) == 0)) | [
"Check if terminated from MD5 context\n :param Uploader self: this\n :rtype: bool\n :return: if terminated from MD5 context\n "
]
|
Please provide a description of the function:def create_unique_id(src, ase):
# type: (blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity) -> str
return ';'.join(
(str(src.absolute_path), ase._client.primary_endpoint, ase.path)
) | [
"Create a unique id given a LocalPath and StorageEntity\n :param blobxfer.models.upload.LocalPath src: local path\n :param blobxfer.models.azure.StorageEntity ase: azure storage entity\n :rtype: str\n :return: unique id for pair\n "
]
|
Please provide a description of the function:def create_unique_transfer_id(local_path, ase, offsets):
# type: (blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity) -> str
return ';'.join(
(str(local_path.absolute_path), ase._client.primary_endpoint,
ase.path, str(local_path.view.fd_start), str(offsets.range_start))
) | [
"Create a unique transfer id given a offsets\n :param blobxfer.models.upload.LocalPath local_path: local path\n :param blobxfer.models.azure.StorageEntity ase: azure storage entity\n :param blobxfer.models.upload.Offsets offsets: upload offsets\n :rtype: str\n :return: unique id for transfer\n "
]
|
Please provide a description of the function:def create_destination_id(client, container, name):
# type: (azure.storage.StorageClient, str, str) -> str
path = str(pathlib.PurePath(name))
return ';'.join((client.primary_endpoint, container, path)) | [
"Create a unique destination id\n :param azure.storage.StorageClient client: storage client\n :param str container: container name\n :param str name: entity name\n :rtype: str\n :return: unique id for the destination\n "
]
|
Please provide a description of the function:def _update_progress_bar(self, stdin=False):
# type: (Uploader, bool) -> None
if not self._all_files_processed:
return
blobxfer.operations.progress.update_progress_bar(
self._general_options,
'upload',
self._upload_start_time,
self._upload_total,
self._upload_sofar,
self._upload_bytes_total,
self._upload_bytes_sofar,
stdin_upload=stdin,
) | [
"Update progress bar\n :param Uploader self: this\n :param bool stdin: stdin upload\n "
]
|
Please provide a description of the function:def _pre_md5_skip_on_check(self, src, rfile):
# type: (Uploader, blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity) -> None
md5 = blobxfer.models.metadata.get_md5_from_metadata(rfile)
key = blobxfer.operations.upload.Uploader.create_unique_id(src, rfile)
with self._md5_meta_lock:
self._md5_map[key] = (src, rfile)
self._md5_offload.add_localfile_for_md5_check(
key, None, str(src.absolute_path), md5, rfile.mode, src.view) | [
"Perform pre MD5 skip on check\n :param Uploader self: this\n :param blobxfer.models.upload.LocalPath src: local path\n :param blobxfer.models.azure.StorageEntity rfile: remote file\n "
]
|
Please provide a description of the function:def _post_md5_skip_on_check(self, key, md5_match):
# type: (Uploader, str, bool) -> None
with self._md5_meta_lock:
src, rfile = self._md5_map.pop(key)
uid = blobxfer.operations.upload.Uploader.create_unique_id(src, rfile)
if md5_match:
with self._upload_lock:
self._upload_set.remove(uid)
self._upload_total -= 1
if self._general_options.dry_run:
logger.info('[DRY RUN] MD5 match, skipping: {} -> {}'.format(
src.absolute_path, rfile.path))
else:
if self._general_options.dry_run:
with self._upload_lock:
self._upload_set.remove(uid)
self._upload_total -= 1
logger.info('[DRY RUN] MD5 mismatch, upload: {} -> {}'.format(
src.absolute_path, rfile.path))
else:
self._add_to_upload_queue(src, rfile, uid) | [
"Perform post MD5 skip on check\n :param Uploader self: this\n :param str key: md5 map key\n :param bool md5_match: if MD5 matches\n "
]
|
Please provide a description of the function:def _check_for_uploads_from_md5(self):
# type: (Uploader) -> None
cv = self._md5_offload.done_cv
while not self.termination_check_md5:
result = None
cv.acquire()
while True:
result = self._md5_offload.pop_done_queue()
if result is None:
# use cv timeout due to possible non-wake while running
cv.wait(1)
# check for terminating conditions
if self.termination_check_md5:
break
else:
break
cv.release()
if result is not None:
self._post_md5_skip_on_check(result[0], result[3]) | [
"Check queue for a file to upload\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def _add_to_upload_queue(self, src, rfile, uid):
# type: (Uploader, blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity, str) -> None
# prepare local file for upload
ud = blobxfer.models.upload.Descriptor(
src, rfile, uid, self._spec.options, self._general_options,
self._resume)
if ud.entity.is_encrypted:
with self._upload_lock:
self._ud_map[uid] = ud
# add download descriptor to queue
self._upload_queue.put(ud)
if self._upload_start_time is None:
with self._upload_lock:
if self._upload_start_time is None:
self._upload_start_time = blobxfer.util.datetime_now() | [
"Add remote file to download queue\n :param Uploader self: this\n :param blobxfer.models.upload.LocalPath src: local path\n :param blobxfer.models.azure.StorageEntity rfile: remote file\n :param str uid: unique id\n "
]
|
Please provide a description of the function:def _initialize_disk_threads(self):
# type: (Uploader) -> None
logger.debug('spawning {} disk threads'.format(
self._general_options.concurrency.disk_threads))
for _ in range(self._general_options.concurrency.disk_threads):
thr = threading.Thread(target=self._worker_thread_upload)
self._disk_threads.append(thr)
thr.start() | [
"Initialize disk threads\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def _wait_for_disk_threads(self, terminate):
# type: (Uploader, bool) -> None
if terminate:
self._upload_terminate = terminate
for thr in self._disk_threads:
thr.join() | [
"Wait for disk threads\n :param Uploader self: this\n :param bool terminate: terminate threads\n "
]
|
Please provide a description of the function:def _wait_for_transfer_threads(self, terminate):
# type: (Uploader, bool) -> None
if terminate:
self._upload_terminate = terminate
for thr in self._transfer_threads:
thr.join() | [
"Wait for transfer threads\n :param Uploader self: this\n :param bool terminate: terminate threads\n "
]
|
Please provide a description of the function:def _worker_thread_transfer(self):
# type: (Uploader) -> None
while not self.termination_check:
try:
ud, ase, offsets, data = self._transfer_queue.get(
block=False, timeout=0.1)
except queue.Empty:
continue
try:
self._process_transfer(ud, ase, offsets, data)
except Exception as e:
with self._upload_lock:
self._exceptions.append(e) | [
"Worker thread transfer\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def _process_transfer(self, ud, ase, offsets, data):
# type: (Uploader, blobxfer.models.upload.Descriptor,
# blobxfer.models.azure.StorageEntity,
# blobxfer.models.upload.Offsets, bytes) -> None
# issue put range
self._put_data(ud, ase, offsets, data)
# accounting
with self._transfer_lock:
if ud.local_path.use_stdin:
self._upload_bytes_total += offsets.num_bytes
elif offsets.chunk_num == 0:
self._upload_bytes_total += ase.size
self._upload_bytes_sofar += offsets.num_bytes
self._transfer_set.remove(
blobxfer.operations.upload.Uploader.create_unique_transfer_id(
ud.local_path, ase, offsets))
ud.complete_offset_upload(offsets.chunk_num)
# add descriptor back to upload queue only for append blobs
if ud.entity.mode == blobxfer.models.azure.StorageModes.Append:
self._upload_queue.put(ud)
# update progress bar
self._update_progress_bar(stdin=ud.local_path.use_stdin) | [
"Process transfer instructions\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param blobxfer.models.azure.StorageEntity ase: Storage entity\n :param blobxfer.models.upload.Offsets offsets: offsets\n :param bytes data: data to upload\n "
]
|
Please provide a description of the function:def _put_data(self, ud, ase, offsets, data):
# type: (Uploader, blobxfer.models.upload.Descriptor,
# blobxfer.models.azure.StorageEntity,
# blobxfer.models.upload.Offsets, bytes) -> None
if ase.mode == blobxfer.models.azure.StorageModes.Append:
# append block
if data is not None:
blobxfer.operations.azure.blob.append.append_block(ase, data)
elif ase.mode == blobxfer.models.azure.StorageModes.Block:
# handle one-shot uploads
if ud.is_one_shot_block_blob:
metadata = ud.generate_metadata()
if not ud.entity.is_encrypted and ud.must_compute_md5:
digest = blobxfer.util.base64_encode_as_string(
ud.md5.digest())
else:
digest = None
blobxfer.operations.azure.blob.block.create_blob(
ase, data, digest, metadata)
return
# upload block
if data is not None:
blobxfer.operations.azure.blob.block.put_block(
ase, offsets, data)
elif ase.mode == blobxfer.models.azure.StorageModes.File:
# upload range
if data is not None:
blobxfer.operations.azure.file.put_file_range(
ase, offsets, data)
elif ase.mode == blobxfer.models.azure.StorageModes.Page:
if data is None:
return
# compute aligned size
aligned = blobxfer.util.page_align_content_length(
offsets.num_bytes)
# align page
if aligned != offsets.num_bytes:
data = data.ljust(aligned, b'\0')
if blobxfer.operations.md5.check_data_is_empty(data):
return
# upload page
blobxfer.operations.azure.blob.page.put_page(
ase, offsets.range_start, offsets.range_start + aligned - 1,
data) | [
"Put data in Azure\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param blobxfer.models.azure.StorageEntity ase: Storage entity\n :param blobxfer.models.upload.Offsets offsets: offsets\n :param bytes data: data to upload\n "
]
|
Please provide a description of the function:def _worker_thread_upload(self):
# type: (Uploader) -> None
max_set_len = self._general_options.concurrency.transfer_threads << 2
while not self.termination_check:
try:
if len(self._transfer_set) > max_set_len:
time.sleep(0.1)
continue
else:
ud = self._upload_queue.get(block=False, timeout=0.1)
except queue.Empty:
continue
try:
self._process_upload_descriptor(ud)
except Exception as e:
with self._upload_lock:
self._exceptions.append(e) | [
"Worker thread upload\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def _prepare_upload(self, ase):
# type: (Uploader, blobxfer.models.azure.StorageEntity) -> None
if ase.mode == blobxfer.models.azure.StorageModes.Append:
if ase.append_create:
# create container if necessary
blobxfer.operations.azure.blob.create_container(
ase, self._containers_created)
# create remote blob
blobxfer.operations.azure.blob.append.create_blob(ase)
elif ase.mode == blobxfer.models.azure.StorageModes.Block:
# create container if necessary
blobxfer.operations.azure.blob.create_container(
ase, self._containers_created)
elif ase.mode == blobxfer.models.azure.StorageModes.File:
# create share directory structure
with self._fileshare_dir_lock:
# create container if necessary
blobxfer.operations.azure.file.create_share(
ase, self._containers_created)
# create parent directories
blobxfer.operations.azure.file.create_all_parent_directories(
ase, self._dirs_created)
# create remote file
blobxfer.operations.azure.file.create_file(ase)
elif ase.mode == blobxfer.models.azure.StorageModes.Page:
# create container if necessary
blobxfer.operations.azure.blob.create_container(
ase, self._containers_created)
# create remote blob
blobxfer.operations.azure.blob.page.create_blob(ase) | [
"Prepare upload\n :param Uploader self: this\n :param blobxfer.models.azure.StorageEntity ase: Storage entity\n "
]
|
Please provide a description of the function:def _process_upload_descriptor(self, ud):
# type: (Uploader, blobxfer.models.upload.Descriptor) -> None
# get upload offsets
offsets, resume_bytes = ud.next_offsets()
# add resume bytes to counter
if resume_bytes is not None:
with self._transfer_lock:
self._upload_bytes_total += ud.entity.size
self._upload_bytes_sofar += resume_bytes
logger.debug('adding {} sofar {} from {}'.format(
resume_bytes, self._upload_bytes_sofar, ud.entity.name))
del resume_bytes
# check if all operations completed
if offsets is None and ud.all_operations_completed:
# finalize file
self._finalize_upload(ud)
# accounting
with self._upload_lock:
if ud.entity.is_encrypted:
self._ud_map.pop(ud.unique_id)
self._upload_set.remove(ud.unique_id)
self._upload_sofar += 1
return
# if nothing to upload, re-enqueue for finalization
if offsets is None:
self._upload_queue.put(ud)
return
# prepare upload
if offsets.chunk_num == 0:
self._prepare_upload(ud.entity)
# prepare replicae targets
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
if offsets.chunk_num == 0:
self._prepare_upload(ase)
# encrypt if necessary
if ud.entity.is_encrypted and ud.entity.size > 0:
# send iv through hmac if first chunk
if offsets.chunk_num == 0:
ud.hmac_data(ud.current_iv)
# encrypt data
if self._crypto_offload is None:
# read data from file and encrypt
data, _ = ud.read_data(offsets)
encdata = blobxfer.operations.crypto.aes_cbc_encrypt_data(
ud.entity.encryption_metadata.symmetric_key,
ud.current_iv, data, offsets.pad)
# send encrypted data through hmac
ud.hmac_data(encdata)
data = encdata
# save last 16 encrypted bytes for next IV
ud.current_iv = \
encdata[-blobxfer.models.crypto.AES256_BLOCKSIZE_BYTES:]
else: # noqa
# crypto offload is not supported with AES256-CBC FullBlob
raise NotImplementedError()
# self._crypto_offload.add_encrypt_chunk(
# str(ud.local_path.absolute_path), offsets,
# ud.entity.encryption_metadata.symmetric_key,
# ud.current_iv)
# encrypted data will be retrieved from a temp file once
# retrieved from crypto queue
# return_early = True
else:
data, newoffset = ud.read_data(offsets)
# set new offset if stdin
if newoffset is not None:
offsets = newoffset
# re-enqueue for other threads to upload if not append
if ud.entity.mode != blobxfer.models.azure.StorageModes.Append:
self._upload_queue.put(ud)
# no data can be returned on stdin uploads
if ud.local_path.use_stdin and not data:
return
# add data to transfer queue
with self._transfer_lock:
self._transfer_set.add(
blobxfer.operations.upload.Uploader.create_unique_transfer_id(
ud.local_path, ud.entity, offsets))
self._transfer_queue.put((ud, ud.entity, offsets, data))
# iterate replicas
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
with self._transfer_lock:
self._transfer_set.add(
blobxfer.operations.upload.Uploader.
create_unique_transfer_id(ud.local_path, ase, offsets)
)
self._transfer_queue.put((ud, ase, offsets, data)) | [
"Process upload descriptor\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor: upload descriptor\n "
]
|
Please provide a description of the function:def _finalize_block_blob(self, ud, metadata):
# type: (Uploader, blobxfer.models.upload.Descriptor, dict) -> None
if not ud.entity.is_encrypted and ud.must_compute_md5:
digest = blobxfer.util.base64_encode_as_string(ud.md5.digest())
else:
digest = None
blobxfer.operations.azure.blob.block.put_block_list(
ud.entity, ud.last_block_num, digest, metadata)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.block.put_block_list(
ase, ud.last_block_num, digest, metadata) | [
"Finalize Block blob\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param dict metadata: metadata dict\n "
]
|
Please provide a description of the function:def _set_blob_properties(self, ud):
# type: (Uploader, blobxfer.models.upload.Descriptor) -> None
if ud.requires_non_encrypted_md5_put:
digest = blobxfer.util.base64_encode_as_string(ud.md5.digest())
else:
digest = None
blobxfer.operations.azure.blob.set_blob_properties(ud.entity, digest)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) | [
"Set blob properties (md5, cache control)\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n "
]
|
Please provide a description of the function:def _set_blob_metadata(self, ud, metadata):
# type: (Uploader, blobxfer.models.upload.Descriptor, dict) -> None
blobxfer.operations.azure.blob.set_blob_metadata(ud.entity, metadata)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_metadata(ase, metadata) | [
"Set blob metadata\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param dict metadata: metadata dict\n "
]
|
Please provide a description of the function:def _resize_blob(self, ud, size):
# type: (Uploader, blobxfer.models.upload.Descriptor, int) -> None
blobxfer.operations.azure.blob.page.resize_blob(ud.entity, size)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.page.resize_blob(ase, size) | [
"Resize page blob\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param int size: content length\n "
]
|
Please provide a description of the function:def _finalize_nonblock_blob(self, ud, metadata):
# type: (Uploader, blobxfer.models.upload.Descriptor, dict) -> None
# resize blobs to final size if required
needs_resize, final_size = ud.requires_resize()
if needs_resize:
self._resize_blob(ud, final_size)
# set md5 page blob property if required
if (ud.requires_non_encrypted_md5_put or
ud.entity.cache_control is not None):
self._set_blob_properties(ud)
# set metadata if needed
if blobxfer.util.is_not_empty(metadata):
self._set_blob_metadata(ud, metadata) | [
"Finalize Non-Block blob\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param dict metadata: metadata dict\n "
]
|
Please provide a description of the function:def _finalize_azure_file(self, ud, metadata):
# type: (Uploader, blobxfer.models.upload.Descriptor, dict) -> None
# set md5 file property if required
if ud.requires_non_encrypted_md5_put:
digest = blobxfer.util.base64_encode_as_string(ud.md5.digest())
else:
digest = None
if digest is not None or ud.entity.cache_control is not None:
blobxfer.operations.azure.file.set_file_properties(
ud.entity, digest)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.file.set_file_properties(
ase, digest)
# set file metadata if needed
if blobxfer.util.is_not_empty(metadata):
blobxfer.operations.azure.file.set_file_metadata(
ud.entity, metadata)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.file.set_file_metadata(
ase, metadata) | [
"Finalize Azure File\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n :param dict metadata: metadata dict\n "
]
|
Please provide a description of the function:def _finalize_upload(self, ud):
# type: (Uploader, blobxfer.models.upload.Descriptor) -> None
metadata = ud.generate_metadata()
if ud.requires_put_block_list:
# put block list for non one-shot block blobs
self._finalize_block_blob(ud, metadata)
elif ud.remote_is_page_blob or ud.remote_is_append_blob:
# append and page blob finalization
self._finalize_nonblock_blob(ud, metadata)
elif ud.remote_is_file:
# azure file finalization
self._finalize_azure_file(ud, metadata)
# set access tier
if ud.requires_access_tier_set:
blobxfer.operations.azure.blob.block.set_blob_access_tier(
ud.entity) | [
"Finalize file upload\n :param Uploader self: this\n :param blobxfer.models.upload.Descriptor ud: upload descriptor\n "
]
|
Please provide a description of the function:def _get_destination_paths(self):
# type: (Uploader) ->
# Tuple[blobxfer.operations.azure.StorageAccount, str, str, str]
for dst in self._spec.destinations:
for dpath in dst.paths:
sdpath = str(dpath)
cont, dir = blobxfer.util.explode_azure_path(sdpath)
sa = self._creds.get_storage_account(
dst.lookup_storage_account(sdpath))
yield sa, cont, dir, dpath | [
"Get destination paths\n :param Uploader self: this\n :rtype: tuple\n :return: (storage account, container, name, dpath)\n "
]
|
Please provide a description of the function:def _delete_extraneous_files(self):
# type: (Uploader) -> None
if not self._spec.options.delete_extraneous_destination:
return
# list blobs for all destinations
checked = set()
deleted = 0
for sa, container, vpath, dpath in self._get_destination_paths():
key = ';'.join((sa.name, sa.endpoint, str(dpath)))
if key in checked:
continue
logger.debug(
'attempting to delete extraneous blobs/files from: {}'.format(
key))
if (self._spec.options.mode ==
blobxfer.models.azure.StorageModes.File):
files = blobxfer.operations.azure.file.list_all_files(
sa.file_client, container)
for file in files:
try:
pathlib.Path(file).relative_to(vpath)
except ValueError:
continue
id = blobxfer.operations.upload.Uploader.\
create_destination_id(sa.file_client, container, file)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting file: {}'.format(
file))
else:
if self._general_options.verbose:
logger.debug('deleting file: {}'.format(file))
blobxfer.operations.azure.file.delete_file(
sa.file_client, container, file)
deleted += 1
else:
blobs = blobxfer.operations.azure.blob.list_all_blobs(
sa.block_blob_client, container)
for blob in blobs:
try:
pathlib.Path(blob.name).relative_to(vpath)
except ValueError:
continue
id = blobxfer.operations.upload.Uploader.\
create_destination_id(
sa.block_blob_client, container, blob.name)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting blob: {}'.format(
blob.name))
else:
if self._general_options.verbose:
logger.debug('deleting blob: {}'.format(
blob.name))
blobxfer.operations.azure.blob.delete_blob(
sa.block_blob_client, container, blob.name)
deleted += 1
checked.add(key)
logger.info('deleted {} extraneous blobs/files'.format(deleted)) | [
"Delete extraneous files on the remote\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def _check_upload_conditions(self, local_path, rfile):
# type: (Uploader, blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity) -> UploadAction
lpath = local_path.absolute_path
# check if local file still exists
if not local_path.use_stdin and not lpath.exists():
if self._general_options.verbose:
logger.warning(
'skipping file that no longer exists: {}'.format(lpath))
return UploadAction.Skip
# if remote file doesn't exist, upload
if rfile is None or rfile.from_local:
return UploadAction.Upload
# check overwrite option
if not self._spec.options.overwrite:
if rfile.mode == blobxfer.models.azure.StorageModes.Append:
rfile.append_create = False
return UploadAction.Upload
logger.info(
'not overwriting remote file: {} (local: {})'.format(
rfile.path, lpath))
return UploadAction.Skip
# check skip on options, MD5 match takes priority
md5 = blobxfer.models.metadata.get_md5_from_metadata(rfile)
if self._spec.skip_on.md5_match and blobxfer.util.is_not_empty(md5):
return UploadAction.CheckMd5
# if neither of the remaining skip on actions are activated, upload
if (not self._spec.skip_on.filesize_match and
not self._spec.skip_on.lmt_ge):
return UploadAction.Upload
# check skip on file size match
ul_fs = None
if self._spec.skip_on.filesize_match:
lsize = local_path.size
if rfile.mode == blobxfer.models.azure.StorageModes.Page:
lsize = blobxfer.util.page_align_content_length(lsize)
if rfile.size == lsize:
ul_fs = False
if self._general_options.verbose:
logger.debug('filesize match: {} == {} size={}'.format(
local_path.absolute_path, rfile.path, lsize))
else:
ul_fs = True
# check skip on lmt ge
ul_lmt = None
if self._spec.skip_on.lmt_ge:
mtime = blobxfer.util.datetime_from_timestamp(
local_path.lmt, as_utc=True)
if rfile.lmt >= mtime:
ul_lmt = False
if self._general_options.verbose:
logger.debug('lmt ge match: {} lmt={} >= {} lmt={}'.format(
rfile.path, rfile.lmt, local_path.absolute_path,
mtime))
else:
ul_lmt = True
# upload if either skip on mismatch is True
if ul_fs or ul_lmt:
return UploadAction.Upload
else:
return UploadAction.Skip | [
"Check for upload conditions\n :param Uploader self: this\n :param blobxfer.models.LocalPath local_path: local path\n :param blobxfer.models.azure.StorageEntity rfile: remote file\n :rtype: UploadAction\n :return: upload action\n "
]
|
Please provide a description of the function:def _check_for_existing_remote(self, sa, cont, name):
# type: (Uploader, blobxfer.operations.azure.StorageAccount,
# str, str) -> bobxfer.models.azure.StorageEntity
if self._spec.options.mode == blobxfer.models.azure.StorageModes.File:
fp = blobxfer.operations.azure.file.get_file_properties(
sa.file_client, cont, name)
else:
fp = blobxfer.operations.azure.blob.get_blob_properties(
sa.block_blob_client, cont, name, self._spec.options.mode)
if fp is not None:
if blobxfer.models.crypto.EncryptionMetadata.\
encryption_metadata_exists(fp.metadata):
ed = blobxfer.models.crypto.EncryptionMetadata()
ed.convert_from_json(fp.metadata, fp.name, None)
else:
ed = None
ase = blobxfer.models.azure.StorageEntity(cont, ed)
if (self._spec.options.mode ==
blobxfer.models.azure.StorageModes.File):
dir, _, _ = blobxfer.operations.azure.file.parse_file_path(
name)
ase.populate_from_file(sa, fp, dir)
else:
ase.populate_from_blob(sa, fp)
# always overwrite cache control with option
ase.cache_control = (
self._spec.options.store_file_properties.cache_control
)
# overwrite tier with specified storage tier
if ase.mode == blobxfer.models.azure.StorageModes.Block:
ase.access_tier = self._spec.options.access_tier
else:
ase = None
return ase | [
"Check for an existing remote file\n :param Uploader self: this\n :param blobxfer.operations.azure.StorageAccount sa: storage account\n :param str cont: container\n :param str name: entity name\n :rtype: blobxfer.models.azure.StorageEntity\n :return: remote storage entity\n "
]
|
Please provide a description of the function:def _generate_destination_for_source(self, local_path):
# type: (Uploader, blobxfer.models.upload.LocalSourcePath) ->
# Tuple[blobxfer.operations.azure.StorageAccount,
# blobxfer.models.azure.StorageEntity)
# construct stripped destination path
spath = local_path.relative_path
# apply strip components
if self._spec.options.strip_components > 0:
_rparts = local_path.relative_path.parts
_strip = min(
(len(_rparts) - 1, self._spec.options.strip_components)
)
if _strip > 0:
spath = pathlib.Path(*_rparts[_strip:])
# create a storage entity for each destination
for sa, cont, name, dpath in self._get_destination_paths():
# if not renaming, form name from with spath
if not self._spec.options.rename and not local_path.use_stdin:
name = str(name / spath)
if blobxfer.util.is_none_or_empty(name):
raise ValueError(
('invalid destination, must specify a container or '
'fileshare and remote file name: {}').format(dpath))
# do not check for existing remote right now if striped
# vectored io mode
if (self._spec.options.vectored_io.distribution_mode ==
blobxfer.models.upload.
VectoredIoDistributionMode.Stripe):
ase = None
else:
if sa.can_read_object:
ase = self._check_for_existing_remote(sa, cont, name)
else:
ase = None
if ase is None:
# encryption metadata will be populated later, if required
ase = blobxfer.models.azure.StorageEntity(cont, ed=None)
ase.populate_from_local(
sa, cont, name, self._spec.options.mode,
self._spec.options.store_file_properties.cache_control)
if ase.mode == blobxfer.models.azure.StorageModes.Block:
ase.access_tier = self._spec.options.access_tier
yield sa, ase | [
"Generate entities for source path\n :param Uploader self: this\n :param blobxfer.models.upload.LocalSourcePath local_path: local path\n :rtype: tuple\n :return: storage account, storage entity\n "
]
|
Please provide a description of the function:def _vectorize_and_bind(self, local_path, dest):
# type: (Uploader, blobxfer.models.upload.LocalPath,
# List[blobxfer.models.azure.StorageEntity]) ->
# Tuple[blobxfer.operations.upload.UploadAction,
# blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity]
if (self._spec.options.vectored_io.distribution_mode ==
blobxfer.models.upload.VectoredIoDistributionMode.Stripe and
not local_path.use_stdin):
# compute total number of slices
slices = int(math.ceil(
local_path.total_size /
self._spec.options.vectored_io.stripe_chunk_size_bytes))
# check if vectorization is possible
if slices == 1:
sa, ase = dest[0]
if not sa.can_write_object:
raise RuntimeError(
'unable to write to remote path {} as credential '
'for storage account {} does not permit write '
'access'.format(ase.path, sa.name))
action = self._check_upload_conditions(local_path, ase)
yield action, local_path, ase
return
num_dest = len(dest)
logger.debug(
'{} slices for vectored out of {} to {} destinations'.format(
slices, local_path.absolute_path, num_dest))
# pre-populate slice map for next pointers
slice_map = {}
for i in range(0, slices):
sa, ase = dest[i % num_dest]
if not sa.can_write_object:
raise RuntimeError(
'unable to write to remote path {} as credential '
'for storage account {} does not permit write '
'access'.format(ase.path, sa.name))
name = blobxfer.operations.upload.Uploader.\
append_slice_suffix_to_name(ase.name, i)
sase = self._check_for_existing_remote(sa, ase.container, name)
if sase is None:
# encryption metadata will be populated later, if required
sase = blobxfer.models.azure.StorageEntity(
ase.container, ed=None)
sase.populate_from_local(
sa, ase.container, name, self._spec.options.mode,
self._spec.options.store_file_properties.cache_control)
if sase.mode == blobxfer.models.azure.StorageModes.Block:
sase.access_tier = self._spec.options.access_tier
slice_map[i] = sase
# create new local path to ase mappings
curr = 0
for i in range(0, slices):
start = curr
end = (
curr +
self._spec.options.vectored_io.stripe_chunk_size_bytes
)
if end > local_path.total_size:
end = local_path.total_size
ase = slice_map[i]
if i < slices - 1:
next_entry = blobxfer.models.metadata.\
create_vectored_io_next_entry(slice_map[i + 1])
else:
next_entry = None
lp_slice = blobxfer.models.upload.LocalPath(
parent_path=local_path.parent_path,
relative_path=local_path.relative_path,
use_stdin=False,
view=blobxfer.models.upload.LocalPathView(
fd_start=start,
fd_end=end,
slice_num=i,
mode=self._spec.options.vectored_io.distribution_mode,
total_slices=slices,
next=next_entry,
)
)
action = self._check_upload_conditions(lp_slice, ase)
yield action, lp_slice, ase
curr = end
elif (self._spec.options.vectored_io.distribution_mode ==
blobxfer.models.upload.VectoredIoDistributionMode.Replica):
action_map = {}
for sa, ase in dest:
if not sa.can_write_object:
raise RuntimeError(
'unable to write to remote path {} as credential '
'for storage account {} does not permit write '
'access'.format(ase.path, sa.name))
action = self._check_upload_conditions(local_path, ase)
if action not in action_map:
action_map[action] = []
action_map[action].append(ase)
for action in action_map:
dst = action_map[action]
if len(dst) == 1:
yield action, local_path, dst[0]
else:
if (action == UploadAction.CheckMd5 or
action == UploadAction.Skip):
for ase in dst:
yield action, local_path, ase
else:
primary_ase = dst[0]
if primary_ase.replica_targets is None:
primary_ase.replica_targets = []
primary_ase.replica_targets.extend(dst[1:])
# add replica targets to deletion exclusion set
if self._spec.options.delete_extraneous_destination:
for rt in primary_ase.replica_targets:
self._delete_exclude.add(
blobxfer.operations.upload.Uploader.
create_destination_id(
rt._client, rt.container, rt.name)
)
yield action, local_path, primary_ase
else:
for sa, ase in dest:
if not sa.can_write_object:
raise RuntimeError(
'unable to write to remote path {} as credential '
'for storage account {} does not permit write '
'access'.format(ase.path, sa.name))
action = self._check_upload_conditions(local_path, ase)
yield action, local_path, ase | [
"Vectorize local path to destinations, if necessary, and bind\n :param Uploader self: this\n :param blobxfer.models.LocalPath local_path: local path\n :param list dest: list of destination tuples (sa, ase)\n :rtype: tuple\n :return: action, LocalPath, ase\n "
]
|
Please provide a description of the function:def _run(self):
# type: (Uploader) -> None
# mark start
self._start_time = blobxfer.util.datetime_now()
logger.info('blobxfer start time: {0}'.format(self._start_time))
# check renames
if not self._spec.sources.can_rename() and self._spec.options.rename:
raise RuntimeError(
'cannot rename to specified destination with multiple sources')
# initialize resume db if specified
if self._general_options.resume_file is not None:
self._resume = blobxfer.operations.resume.UploadResumeManager(
self._general_options.resume_file)
# initialize MD5 processes
if ((self._spec.options.store_file_properties.md5 or
self._spec.skip_on.md5_match) and
self._general_options.concurrency.md5_processes > 0):
self._md5_offload = blobxfer.operations.md5.LocalFileMd5Offload(
num_workers=self._general_options.concurrency.md5_processes)
self._md5_offload.initialize_check_thread(
self._check_for_uploads_from_md5)
# initialize crypto processes
if (self._spec.options.rsa_public_key is not None and
self._general_options.concurrency.crypto_processes > 0):
logger.warning(
'crypto offload for upload is not possible due to '
'sequential nature of {} and FullBlob encryption mode'.format(
blobxfer.models.crypto.EncryptionMetadata.
_ENCRYPTION_ALGORITHM)
)
# initialize worker threads
self._initialize_disk_threads()
self._initialize_transfer_threads()
# initialize local counters
files_processed = 0
skipped_files = 0
skipped_size = 0
approx_total_bytes = 0
# iterate through source paths to upload
seen = set()
for src in self._spec.sources.files(self._general_options.dry_run):
# create a destination array for the source
dest = [
(sa, ase) for sa, ase in
self._generate_destination_for_source(src)
]
for action, lp, ase in self._vectorize_and_bind(src, dest):
dest_id = blobxfer.operations.upload.Uploader.\
create_destination_id(ase._client, ase.container, ase.name)
if dest_id in seen:
raise RuntimeError(
'duplicate destination entity detected: {}/{}'.format(
ase._client.primary_endpoint, ase.path))
seen.add(dest_id)
if self._spec.options.delete_extraneous_destination:
self._delete_exclude.add(dest_id)
files_processed += 1
if action == UploadAction.Skip:
skipped_files += 1
skipped_size += ase.size if ase.size is not None else 0
if self._general_options.dry_run:
logger.info('[DRY RUN] skipping: {} -> {}'.format(
lp.absolute_path, ase.path))
continue
approx_total_bytes += lp.size
if blobxfer.util.is_not_empty(ase.replica_targets):
approx_total_bytes += lp.size * len(ase.replica_targets)
# add to potential upload set
uid = blobxfer.operations.upload.Uploader.create_unique_id(
lp, ase)
with self._upload_lock:
self._upload_set.add(uid)
self._upload_total += 1
if action == UploadAction.CheckMd5:
self._pre_md5_skip_on_check(lp, ase)
elif action == UploadAction.Upload:
if self._general_options.dry_run:
logger.info('[DRY RUN] upload: {} -> {}'.format(
lp.absolute_path, ase.path))
with self._upload_lock:
self._upload_set.remove(uid)
self._upload_total -= 1
else:
self._add_to_upload_queue(lp, ase, uid)
del seen
# set remote files processed
with self._md5_meta_lock:
self._all_files_processed = True
with self._upload_lock:
upload_size_mib = approx_total_bytes / blobxfer.util.MEGABYTE
logger.debug(
('{0} files {1:.4f} MiB filesize and/or lmt_ge '
'skipped').format(
skipped_files, skipped_size / blobxfer.util.MEGABYTE))
logger.debug(
('{0} local files processed, waiting for upload '
'completion of approx. {1:.4f} MiB').format(
files_processed, upload_size_mib))
del files_processed
del skipped_files
del skipped_size
del upload_size_mib
del approx_total_bytes
# wait for uploads to complete
self._wait_for_disk_threads(terminate=False)
self._wait_for_transfer_threads(terminate=False)
end_time = blobxfer.util.datetime_now()
# update progress bar
self._update_progress_bar()
# check for exceptions
if len(self._exceptions) > 0:
logger.error('exceptions encountered while uploading')
# raise the first one
raise self._exceptions[0]
# check for mismatches
if (self._upload_sofar != self._upload_total or
self._upload_bytes_sofar != self._upload_bytes_total):
raise RuntimeError(
'upload mismatch: [count={}/{} bytes={}/{}]'.format(
self._upload_sofar, self._upload_total,
self._upload_bytes_sofar, self._upload_bytes_total))
# delete all remaining local files not accounted for if
# delete extraneous enabled
self._delete_extraneous_files()
# delete resume file if we've gotten this far
if self._resume is not None:
self._resume.delete()
# output throughput
if self._upload_start_time is not None:
ultime = (end_time - self._upload_start_time).total_seconds()
if ultime == 0: # noqa
ultime = 1e-9
mibup = self._upload_bytes_total / blobxfer.util.MEGABYTE
mibps = mibup / ultime
logger.info(
('elapsed upload + verify time and throughput of {0:.4f} '
'GiB: {1:.3f} sec, {2:.4f} Mbps ({3:.3f} MiB/s)').format(
mibup / 1024, ultime, mibps * 8, mibps))
end_time = blobxfer.util.datetime_now()
logger.info('blobxfer end time: {0} (elapsed: {1:.3f} sec)'.format(
end_time, (end_time - self._start_time).total_seconds())) | [
"Execute Uploader\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def start(self):
# type: (Uploader) -> None
try:
blobxfer.operations.progress.output_parameters(
self._general_options, self._spec)
self._run()
except (KeyboardInterrupt, Exception) as ex:
if isinstance(ex, KeyboardInterrupt):
logger.info(
'KeyboardInterrupt detected, force terminating '
'processes and threads (this may take a while)...')
else:
logger.exception(ex)
self._wait_for_transfer_threads(terminate=True)
self._wait_for_disk_threads(terminate=True)
finally:
# shutdown processes
if self._md5_offload is not None:
self._md5_offload.finalize_processes()
if self._crypto_offload is not None:
self._crypto_offload.finalize_processes()
# close resume file
if self._resume is not None:
self._resume.close() | [
"Start the Uploader\n :param Uploader self: this\n "
]
|
Please provide a description of the function:def _should_retry(self, context):
# type: (ExponentialRetryWithMaxWait,
# azure.storage.common.models.RetryContext) -> bool
# do not retry if max attempts equal or exceeded
if context.count >= self.max_attempts:
return False
# get response status
status = None
if context.response and context.response.status:
status = context.response.status
# if there is no response status, then handle the exception
# appropriately from the lower layer
if status is None:
exc = context.exception
# default to not retry in unknown/unhandled exception case
ret = False
# requests timeout, retry
if isinstance(exc, requests.Timeout):
ret = True
elif isinstance(exc, requests.exceptions.ContentDecodingError):
ret = True
elif (isinstance(exc, requests.exceptions.ConnectionError) or
isinstance(exc, requests.exceptions.ChunkedEncodingError)):
# newer versions of requests do not expose errno on the
# args[0] reason object; manually string parse
if isinstance(exc.args[0], urllib3.exceptions.MaxRetryError):
try:
msg = exc.args[0].reason.args[0]
except (AttributeError, IndexError):
# unexpected/malformed exception hierarchy, don't retry
pass
else:
if any(x in msg for x in _RETRYABLE_ERRNO_MAXRETRY):
ret = True
elif isinstance(exc.args[0], urllib3.exceptions.ProtocolError):
try:
msg = exc.args[0].args[0]
except (AttributeError, IndexError):
# unexpected/malformed exception hierarchy, don't retry
pass
else:
if any(x in msg for x in _RETRYABLE_ERRNO_PROTOCOL):
ret = True
# fallback to string search
if not ret:
msg = str(exc).lower()
if any(x in msg for x in _RETRYABLE_STRING_FALLBACK):
ret = True
return ret
elif 200 <= status < 300:
# failure during respond body download or parsing, so success
# codes should be retried
return True
elif 300 <= status < 500:
# response code 404 should be retried if secondary was used
if (status == 404 and
context.location_mode ==
azure.storage.common.models.LocationMode.SECONDARY):
return True
# response code 408 is a timeout and should be retried
# response code 429 is too many requests (throttle)
# TODO use "Retry-After" header for backoff amount
if status == 408 or status == 429:
return True
return False
elif status >= 500:
# response codes above 500 should be retried except for
# 501 (not implemented) and 505 (version not supported)
if status == 501 or status == 505:
return False
return True
else: # noqa
# this should be unreachable, retry anyway
return True | [
"Determine if retry should happen or not\n :param ExponentialRetryWithMaxWait self: this\n :param azure.storage.common.models.RetryContext context: retry context\n :rtype: bool\n :return: True if retry should happen, False otherwise\n "
]
|
Please provide a description of the function:def _backoff(self, context):
# type: (ExponentialRetryWithMaxWait,
# azure.storage.common.models.RetryContext) -> int
self._backoff_count += 1
if self._backoff_count == 1:
self._last_backoff = self.initial_backoff
else:
self._last_backoff *= 2
if self._last_backoff > self.max_backoff and self.reset_at_max:
self._backoff_count = 1
self._last_backoff = self.initial_backoff
return self._last_backoff | [
"Backoff calculator\n :param ExponentialRetryWithMaxWait self: this\n :param azure.storage.common.models.RetryContext context: retry context\n :rtype: int\n :return: backoff amount\n "
]
|
Please provide a description of the function:def termination_check(self):
# type: (SyncCopy) -> bool
with self._transfer_lock:
return (self._synccopy_terminate or
len(self._exceptions) > 0 or
(self._all_remote_files_processed and
len(self._transfer_set) == 0)) | [
"Check if terminated\n :param SyncCopy self: this\n :rtype: bool\n :return: if terminated\n "
]
|
Please provide a description of the function:def create_unique_transfer_operation_id(src_ase, dst_ase):
# type: (blobxfer.models.azure.StorageEntity,
# blobxfer.models.azure.StorageEntity) -> str
return ';'.join(
(src_ase._client.primary_endpoint, src_ase.path,
dst_ase._client.primary_endpoint, dst_ase.path)
) | [
"Create a unique transfer operation id\n :param blobxfer.models.azure.StorageEntity src_ase: src storage entity\n :param blobxfer.models.azure.StorageEntity dst_ase: dst storage entity\n :rtype: str\n :return: unique transfer id\n "
]
|
Please provide a description of the function:def _update_progress_bar(self):
# type: (SyncCopy) -> None
blobxfer.operations.progress.update_progress_bar(
self._general_options,
'synccopy',
self._synccopy_start_time,
self._synccopy_total,
self._synccopy_sofar,
self._synccopy_bytes_total,
self._synccopy_bytes_sofar,
) | [
"Update progress bar\n :param SyncCopy self: this\n "
]
|
Please provide a description of the function:def _global_dest_mode_is_file(self):
# type: (SyncCopy) -> bool
if (self._spec.options.dest_mode ==
blobxfer.models.azure.StorageModes.File or
(self._spec.options.mode ==
blobxfer.models.azure.StorageModes.File and
self._spec.options.dest_mode ==
blobxfer.models.azure.StorageModes.Auto)):
return True
return False | [
"Determine if destination mode is file\n :param SyncCopy self: this\n :rtype: bool\n :return: destination mode is file\n "
]
|
Please provide a description of the function:def _translate_src_mode_to_dst_mode(self, src_mode):
# type: (SyncCopy, blobxfer.models.azure.StorageModes) -> bool
if (self._spec.options.dest_mode ==
blobxfer.models.azure.StorageModes.Auto):
return src_mode
else:
return self._spec.options.dest_mode | [
"Translate the source mode into the destination mode\n :param SyncCopy self: this\n :param blobxfer.models.azure.StorageModes src_mode: source mode\n :rtype: blobxfer.models.azure.StorageModes\n :return: destination mode\n "
]
|
Please provide a description of the function:def _delete_extraneous_files(self):
# type: (SyncCopy) -> None
if not self._spec.options.delete_extraneous_destination:
return
# list blobs for all destinations
checked = set()
deleted = 0
for sa, container, _, _ in self._get_destination_paths():
key = ';'.join((sa.name, sa.endpoint, container))
if key in checked:
continue
logger.debug(
'attempting to delete extraneous blobs/files from: {}'.format(
key))
if self._global_dest_mode_is_file():
files = blobxfer.operations.azure.file.list_all_files(
sa.file_client, container)
for file in files:
id = blobxfer.operations.synccopy.SyncCopy.\
create_deletion_id(sa.file_client, container, file)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting file: {}'.format(
file))
else:
if self._general_options.verbose:
logger.debug('deleting file: {}'.format(file))
blobxfer.operations.azure.file.delete_file(
sa.file_client, container, file)
deleted += 1
else:
blobs = blobxfer.operations.azure.blob.list_all_blobs(
sa.block_blob_client, container)
for blob in blobs:
id = blobxfer.operations.synccopy.SyncCopy.\
create_deletion_id(
sa.block_blob_client, container, blob.name)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting blob: {}'.format(
blob.name))
else:
if self._general_options.verbose:
logger.debug('deleting blob: {}'.format(
blob.name))
blobxfer.operations.azure.blob.delete_blob(
sa.block_blob_client, container, blob.name)
deleted += 1
checked.add(key)
logger.info('deleted {} extraneous blobs/files'.format(deleted)) | [
"Delete extraneous files on the remote\n :param SyncCopy self: this\n "
]
|
Please provide a description of the function:def _add_to_transfer_queue(self, src_ase, dst_ase):
# type: (SyncCopy, blobxfer.models.azure.StorageEntity,
# blobxfer.models.azure.StorageEntity) -> None
# prepare remote file for download
# if remote file is a block blob, need to retrieve block list
if (src_ase.mode == dst_ase.mode ==
blobxfer.models.azure.StorageModes.Block):
bl = blobxfer.operations.azure.blob.block.get_committed_block_list(
src_ase)
else:
bl = None
# TODO future optimization for page blob synccopies: query
# page ranges and omit cleared pages from being transferred
sd = blobxfer.models.synccopy.Descriptor(
src_ase, dst_ase, bl, self._spec.options, self._resume)
# add download descriptor to queue
self._transfer_queue.put(sd)
if self._synccopy_start_time is None:
with self._transfer_lock:
if self._synccopy_start_time is None:
self._synccopy_start_time = blobxfer.util.datetime_now() | [
"Add remote file to download queue\n :param SyncCopy self: this\n :param blobxfer.models.azure.StorageEntity src_ase: src ase\n :param blobxfer.models.azure.StorageEntity dst_ase: dst ase\n "
]
|
Please provide a description of the function:def _wait_for_transfer_threads(self, terminate):
# type: (SyncCopy, bool) -> None
if terminate:
self._synccopy_terminate = terminate
for thr in self._transfer_threads:
blobxfer.util.join_thread(thr) | [
"Wait for download threads\n :param SyncCopy self: this\n :param bool terminate: terminate threads\n "
]
|
Please provide a description of the function:def _worker_thread_transfer(self):
# type: (SyncCopy) -> None
while not self.termination_check:
try:
sd = self._transfer_queue.get(block=False, timeout=0.1)
except queue.Empty:
continue
try:
self._process_synccopy_descriptor(sd)
except Exception as e:
with self._transfer_lock:
self._exceptions.append(e) | [
"Worker thread download\n :param SyncCopy self: this\n "
]
|
Please provide a description of the function:def _put_data(self, sd, ase, offsets, data):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor,
# blobxfer.models.azure.StorageEntity,
# blobxfer.models.upload.Offsets, bytes) -> None
if ase.mode == blobxfer.models.azure.StorageModes.Append:
# append block
if data is not None:
blobxfer.operations.azure.blob.append.append_block(ase, data)
elif ase.mode == blobxfer.models.azure.StorageModes.Block:
# handle one-shot uploads
if sd.is_one_shot_block_blob:
if blobxfer.util.is_not_empty(sd.src_entity.md5):
digest = sd.src_entity.md5
else:
digest = None
blobxfer.operations.azure.blob.block.create_blob(
ase, data, digest, sd.src_entity.raw_metadata)
return
# upload block
if data is not None:
blobxfer.operations.azure.blob.block.put_block(
ase, offsets, data)
elif ase.mode == blobxfer.models.azure.StorageModes.File:
# upload range
if data is not None:
blobxfer.operations.azure.file.put_file_range(
ase, offsets, data)
elif ase.mode == blobxfer.models.azure.StorageModes.Page:
if data is not None:
# compute aligned size
aligned = blobxfer.util.page_align_content_length(
offsets.num_bytes)
# align page
if aligned != offsets.num_bytes:
data = data.ljust(aligned, b'\0')
if blobxfer.operations.md5.check_data_is_empty(data):
return
# upload page
blobxfer.operations.azure.blob.page.put_page(
ase, offsets.range_start,
offsets.range_start + aligned - 1, data) | [
"Put data in Azure\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param blobxfer.models.azure.StorageEntity ase: Storage entity\n :param blobxfer.models.upload.Offsets offsets: offsets\n :param bytes data: data to upload\n "
]
|
Please provide a description of the function:def _process_data(self, sd, ase, offsets, data):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor,
# blobxfer.models.azure.StorageEntity,
# blobxfer.models.synccopy.Offsets, bytes) -> None
# issue put data
self._put_data(sd, ase, offsets, data)
# accounting
with self._transfer_lock:
self._synccopy_bytes_sofar += offsets.num_bytes
# complete offset upload and save resume state
sd.complete_offset_upload(offsets.chunk_num) | [
"Process downloaded data for upload\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param blobxfer.models.azure.StorageEntity ase: storage entity\n :param blobxfer.models.synccopy.Offsets offsets: offsets\n :param bytes data: data to process\n "
]
|
Please provide a description of the function:def _process_synccopy_descriptor(self, sd):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor) -> None
# update progress bar
self._update_progress_bar()
# get download offsets
offsets, resume_bytes = sd.next_offsets()
# add resume bytes to counter
if resume_bytes is not None:
with self._transfer_lock:
self._synccopy_bytes_sofar += resume_bytes
logger.debug('adding {} sofar {} from {}'.format(
resume_bytes, self._synccopy_bytes_sofar,
sd.dst_entity.name))
del resume_bytes
# check if all operations completed
if offsets is None and sd.all_operations_completed:
# finalize upload for non-one shots
if not sd.is_one_shot_block_blob:
self._finalize_upload(sd)
else:
# set access tier for one shots
if sd.requires_access_tier_set:
blobxfer.operations.azure.blob.block.set_blob_access_tier(
sd.dst_entity)
# accounting
with self._transfer_lock:
self._transfer_set.remove(
blobxfer.operations.synccopy.SyncCopy.
create_unique_transfer_operation_id(
sd.src_entity, sd.dst_entity))
self._synccopy_sofar += 1
return
# re-enqueue for finalization if no offsets
if offsets is None:
self._transfer_queue.put(sd)
return
# prepare upload
if offsets.chunk_num == 0:
self._prepare_upload(sd.dst_entity)
# prepare replica targets
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
if offsets.chunk_num == 0:
self._prepare_upload(ase)
# re-enqueue for other threads to download next offset if not append
if sd.src_entity.mode != blobxfer.models.azure.StorageModes.Append:
self._transfer_queue.put(sd)
# issue get range
if sd.src_entity.mode == blobxfer.models.azure.StorageModes.File:
data = blobxfer.operations.azure.file.get_file_range(
sd.src_entity, offsets)
else:
data = blobxfer.operations.azure.blob.get_blob_range(
sd.src_entity, offsets)
# process data for upload
self._process_data(sd, sd.dst_entity, offsets, data)
# iterate replicas
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
self._process_data(sd, ase, offsets, data)
# re-enqueue for append blobs
if sd.src_entity.mode == blobxfer.models.azure.StorageModes.Append:
self._transfer_queue.put(sd) | [
"Process synccopy descriptor\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n "
]
|
Please provide a description of the function:def _finalize_block_blob(self, sd, metadata, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict,
# str) -> None
blobxfer.operations.azure.blob.block.put_block_list(
sd.dst_entity, sd.last_block_num, digest, metadata)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.blob.block.put_block_list(
ase, sd.last_block_num, digest, metadata) | [
"Finalize Block blob\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param dict metadata: metadata dict\n :param str digest: md5 digest\n "
]
|
Please provide a description of the function:def _set_blob_properties(self, sd, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, str) -> None
blobxfer.operations.azure.blob.set_blob_properties(
sd.dst_entity, digest)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) | [
"Set blob properties (md5, cache control)\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param str digest: md5 digest\n "
]
|
Please provide a description of the function:def _set_blob_metadata(self, sd, metadata):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict) -> None
blobxfer.operations.azure.blob.set_blob_metadata(
sd.dst_entity, metadata)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_metadata(ase, metadata) | [
"Set blob metadata\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param dict metadata: metadata dict\n :param dict metadata: metadata dict\n "
]
|
Please provide a description of the function:def _finalize_nonblock_blob(self, sd, metadata, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict,
# str) -> None
# set md5 page blob property if required
if (blobxfer.util.is_not_empty(digest) or
sd.dst_entity.cache_control is not None):
self._set_blob_properties(sd, digest)
# set metadata if needed
if blobxfer.util.is_not_empty(metadata):
self._set_blob_metadata(sd, metadata) | [
"Finalize Non-Block blob\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param dict metadata: metadata dict\n :param str digest: md5 digest\n "
]
|
Please provide a description of the function:def _finalize_azure_file(self, sd, metadata, digest):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor, dict,
# str) -> None
# set file properties if required
if (blobxfer.util.is_not_empty(digest) or
sd.dst_entity.cache_control is not None):
blobxfer.operations.azure.file.set_file_properties(
sd.dst_entity, digest)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.file.set_file_properties(
ase, digest)
# set file metadata if needed
if blobxfer.util.is_not_empty(metadata):
blobxfer.operations.azure.file.set_file_metadata(
sd.dst_entity, metadata)
if blobxfer.util.is_not_empty(sd.dst_entity.replica_targets):
for ase in sd.dst_entity.replica_targets:
blobxfer.operations.azure.file.set_file_metadata(
ase, metadata) | [
"Finalize Azure File\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n :param dict metadata: metadata dict\n :param str digest: md5 digest\n "
]
|
Please provide a description of the function:def _finalize_upload(self, sd):
# type: (SyncCopy, blobxfer.models.synccopy.Descriptor) -> None
metadata = sd.src_entity.raw_metadata
if blobxfer.util.is_not_empty(sd.src_entity.md5):
digest = sd.src_entity.md5
else:
digest = None
if sd.requires_put_block_list:
# put block list for non one-shot block blobs
self._finalize_block_blob(sd, metadata, digest)
elif sd.remote_is_page_blob or sd.remote_is_append_blob:
# append and page blob finalization
self._finalize_nonblock_blob(sd, metadata, digest)
elif sd.remote_is_file:
# azure file finalization
self._finalize_azure_file(sd, metadata, digest)
# set access tier
if sd.requires_access_tier_set:
blobxfer.operations.azure.blob.block.set_blob_access_tier(
sd.dst_entity) | [
"Finalize file upload\n :param SyncCopy self: this\n :param blobxfer.models.synccopy.Descriptor sd: synccopy descriptor\n "
]
|
Please provide a description of the function:def _check_copy_conditions(self, src, dst):
# type: (SyncCopy, blobxfer.models.azure.StorageEntity,
# blobxfer.models.azure.StorageEntity) -> UploadAction
# if remote file doesn't exist, copy
if dst is None or dst.from_local:
return SynccopyAction.Copy
# check overwrite option
if not self._spec.options.overwrite:
logger.info(
'not overwriting remote file: {})'.format(dst.path))
return SynccopyAction.Skip
# check skip on options, MD5 match takes priority
src_md5 = blobxfer.models.metadata.get_md5_from_metadata(src)
dst_md5 = blobxfer.models.metadata.get_md5_from_metadata(dst)
if (self._spec.skip_on.md5_match and
blobxfer.util.is_not_empty(src_md5)):
if src_md5 == dst_md5:
return SynccopyAction.Skip
else:
return SynccopyAction.Copy
# if neither of the remaining skip on actions are activated, copy
if (not self._spec.skip_on.filesize_match and
not self._spec.skip_on.lmt_ge):
return SynccopyAction.Copy
# check skip on file size match
ul_fs = None
if self._spec.skip_on.filesize_match:
if src.size == dst.size:
ul_fs = False
else:
ul_fs = True
# check skip on lmt ge
ul_lmt = None
if self._spec.skip_on.lmt_ge:
if dst.lmt >= src.lmt:
ul_lmt = False
else:
ul_lmt = True
# upload if either skip on mismatch is True
if ul_fs or ul_lmt:
return SynccopyAction.Copy
else:
return SynccopyAction.Skip | [
"Check for synccopy conditions\n :param SyncCopy self: this\n :param blobxfer.models.azure.StorageEntity src: src\n :param blobxfer.models.azure.StorageEntity dst: dst\n :rtype: SynccopyAction\n :return: synccopy action\n "
]
|
Please provide a description of the function:def _generate_destination_for_source(self, src_ase):
# type: (SyncCopy, blobxfer.models.azure.StorageEntity) ->
# blobxfer.models.azure.StorageEntity)
# create a storage entity for each destination
for sa, cont, name, dpath in self._get_destination_paths():
if self._spec.options.rename:
name = str(pathlib.Path(name))
if name == '.':
raise RuntimeError(
'attempting rename multiple files to a directory')
else:
name = str(pathlib.Path(name) / src_ase.name)
# translate source mode to dest mode
dst_mode = self._translate_src_mode_to_dst_mode(src_ase.mode)
dst_ase = self._check_for_existing_remote(sa, cont, name, dst_mode)
if dst_ase is None:
dst_ase = blobxfer.models.azure.StorageEntity(cont, ed=None)
dst_ase.populate_from_local(
sa, cont, name, dst_mode, src_ase.cache_control)
dst_ase.size = src_ase.size
# overwrite tier with specified storage tier
if (dst_mode == blobxfer.models.azure.StorageModes.Block and
self._spec.options.access_tier is not None):
dst_ase.access_tier = self._spec.options.access_tier
# check condition for dst
action = self._check_copy_conditions(src_ase, dst_ase)
if action == SynccopyAction.Copy:
yield dst_ase
elif action == SynccopyAction.Skip:
# add to exclusion set if skipping
if self._spec.options.delete_extraneous_destination:
uid = (
blobxfer.operations.synccopy.SyncCopy.
create_deletion_id(
dst_ase._client, dst_ase.container, dst_ase.name)
)
self._delete_exclude.add(uid)
if self._general_options.dry_run:
logger.info('[DRY RUN] skipping: {} -> {}'.format(
src_ase.path, dst_ase.path)) | [
"Generate entities for source path\n :param SyncCopy self: this\n :param blobxfer.models.azure.StorageEntity src_ase: source ase\n :rtype: blobxfer.models.azure.StorageEntity\n :return: destination storage entity\n "
]
|
Please provide a description of the function:def _bind_sources_to_destination(self):
# type: (SyncCopy) ->
# Tuple[blobxfer.models.azure.StorageEntity,
# blobxfer.models.azure.StorageEntity]
seen = set()
# iterate through source paths to download
for src in self._spec.sources:
for src_ase in src.files(
self._creds, self._spec.options,
self._general_options.dry_run):
# generate copy destinations for source
dest = [
dst_ase for dst_ase in
self._generate_destination_for_source(src_ase)
]
if len(dest) == 0:
continue
primary_dst = dest[0]
uid = blobxfer.operations.synccopy.SyncCopy.create_deletion_id(
primary_dst._client, primary_dst.container,
primary_dst.name)
if uid in seen:
raise RuntimeError(
'duplicate destination entity detected: {}/{}'.format(
primary_dst._client.primary_endpoint,
primary_dst.path))
seen.add(uid)
# add to exclusion set
if self._spec.options.delete_extraneous_destination:
self._delete_exclude.add(uid)
if len(dest[1:]) > 0:
if primary_dst.replica_targets is None:
primary_dst.replica_targets = []
primary_dst.replica_targets.extend(dest[1:])
# check replica targets for duplicates
for rt in primary_dst.replica_targets:
ruid = (
blobxfer.operations.synccopy.SyncCopy.
create_deletion_id(
rt._client, rt.container, rt.name)
)
if ruid in seen:
raise RuntimeError(
('duplicate destination entity detected: '
'{}/{}').format(
rt._client.primary_endpoint, rt.path))
seen.add(ruid)
# add replica targets to deletion exclusion set
if self._spec.options.delete_extraneous_destination:
self._delete_exclude.add(ruid)
yield src_ase, primary_dst | [
"Bind source storage entity to destination storage entities\n :param SyncCopy self: this\n :rtype: tuple\n :return: (source storage entity, destination storage entity)\n "
]
|
Please provide a description of the function:def _run(self):
# type: (SyncCopy) -> None
# mark start
self._start_time = blobxfer.util.datetime_now()
logger.info('blobxfer start time: {0}'.format(self._start_time))
# initialize resume db if specified
if self._general_options.resume_file is not None:
self._resume = blobxfer.operations.resume.SyncCopyResumeManager(
self._general_options.resume_file)
# initialize download threads
self._initialize_transfer_threads()
# iterate through source paths to download
processed_files = 0
for src_ase, dst_ase in self._bind_sources_to_destination():
processed_files += 1
if self._general_options.dry_run:
logger.info('[DRY RUN] synccopy: {} -> {}'.format(
src_ase.path, dst_ase.path))
else:
# add transfer to set
with self._transfer_lock:
self._transfer_set.add(
blobxfer.operations.synccopy.SyncCopy.
create_unique_transfer_operation_id(src_ase, dst_ase))
self._synccopy_total += 1
self._synccopy_bytes_total += src_ase.size
if blobxfer.util.is_not_empty(dst_ase.replica_targets):
self._synccopy_bytes_total += (
len(dst_ase.replica_targets) * src_ase.size
)
self._add_to_transfer_queue(src_ase, dst_ase)
# set remote files processed
with self._transfer_lock:
self._all_remote_files_processed = True
synccopy_size_mib = (
self._synccopy_bytes_total / blobxfer.util.MEGABYTE
)
logger.debug(
('{0} remote files to sync, waiting for copy '
'completion of approx. {1:.4f} MiB').format(
processed_files, synccopy_size_mib))
del processed_files
# wait for downloads to complete
self._wait_for_transfer_threads(terminate=False)
end_time = blobxfer.util.datetime_now()
# update progress bar
self._update_progress_bar()
# check for exceptions
if len(self._exceptions) > 0:
logger.error('exceptions encountered while downloading')
# raise the first one
raise self._exceptions[0]
# check for mismatches
if (self._synccopy_sofar != self._synccopy_total or
self._synccopy_bytes_sofar != self._synccopy_bytes_total):
raise RuntimeError(
'copy mismatch: [count={}/{} bytes={}/{}]'.format(
self._synccopy_sofar, self._synccopy_total,
self._synccopy_bytes_sofar, self._synccopy_bytes_total))
# delete all remaining local files not accounted for if
# delete extraneous enabled
self._delete_extraneous_files()
# delete resume file if we've gotten this far
if self._resume is not None:
self._resume.delete()
# output throughput
if self._synccopy_start_time is not None:
dltime = (end_time - self._synccopy_start_time).total_seconds()
synccopy_size_mib = (
(self._synccopy_bytes_total << 1) / blobxfer.util.MEGABYTE
)
dlmibspeed = synccopy_size_mib / dltime
logger.info(
('elapsed copy time and throughput of {0:.4f} '
'GiB: {1:.3f} sec, {2:.4f} Mbps ({3:.3f} MiB/sec)').format(
synccopy_size_mib / 1024, dltime, dlmibspeed * 8,
dlmibspeed))
end_time = blobxfer.util.datetime_now()
logger.info('blobxfer end time: {0} (elapsed: {1:.3f} sec)'.format(
end_time, (end_time - self._start_time).total_seconds())) | [
"Execute SyncCopy\n :param SyncCopy self: this\n "
]
|
Please provide a description of the function:def start(self):
# type: (SyncCopy) -> None
try:
blobxfer.operations.progress.output_parameters(
self._general_options, self._spec)
self._run()
except (KeyboardInterrupt, Exception) as ex:
if isinstance(ex, KeyboardInterrupt):
logger.info(
'KeyboardInterrupt detected, force terminating '
'processes and threads (this may take a while)...')
else:
logger.exception(ex)
self._wait_for_transfer_threads(terminate=True)
finally:
# close resume file
if self._resume is not None:
self._resume.close() | [
"Start the SyncCopy\n :param SyncCopy self: this\n "
]
|
Please provide a description of the function:def delete(self):
# type: (_BaseResumeManager) -> None
self.close()
if self._resume_file.exists(): # noqa
try:
self._resume_file.unlink()
except OSError as e:
logger.warning('could not unlink resume db: {}'.format(e))
for ext in ('.bak', '.dat', '.dir'): # noqa
fp = pathlib.Path(str(self._resume_file) + ext)
if fp.exists():
try:
fp.unlink()
except OSError as e:
logger.warning('could not unlink resume db: {}'.format(e)) | [
"Delete the resume file db\n :param _BaseResumeManager self: this\n "
]
|
Please provide a description of the function:def datalock(self, acquire=True):
# type: (_BaseResumeManager) -> None
if acquire:
self._lock.acquire()
try:
yield
finally:
if acquire:
self._lock.release() | [
"Delete the resume file db\n :param _BaseResumeManager self: this\n :param bool acquire: acquire lock\n "
]
|
Please provide a description of the function:def generate_record_key(ase):
# type: (blobxfer.models.azure.StorageEntity) -> str
key = '{}:{}'.format(ase._client.primary_endpoint, ase.path)
if blobxfer.util.on_python2():
return key.encode('utf8')
else:
return key | [
"Generate a record key\n :param blobxfer.models.azure.StorageEntity ase: Storage Entity\n :rtype: str\n :return: record key\n "
]
|
Please provide a description of the function:def get_record(self, ase, key=None, lock=True):
# type: (_BaseResumeManager, str, bool) -> object
if key is None:
key = blobxfer.operations.resume._BaseResumeManager.\
generate_record_key(ase)
with self.datalock(lock):
try:
return self._data[key]
except KeyError:
return None | [
"Get a resume record\n :param _BaseResumeManager self: this\n :param blobxfer.models.azure.StorageEntity ase: Storage Entity\n :param str key: record key\n :param bool lock: acquire lock\n :rtype: object\n :return: resume record object\n "
]
|
Please provide a description of the function:def add_or_update_record(
self, final_path, ase, chunk_size, next_integrity_chunk,
completed, md5):
# type: (DownloadResumeManager, pathlib.Path,
# blobxfer.models.azure.StorageEntity, int, int, bool,
# str) -> None
key = blobxfer.operations.resume._BaseResumeManager.\
generate_record_key(ase)
with self.datalock():
dl = self.get_record(ase, key=key, lock=False)
if dl is None:
dl = blobxfer.models.resume.Download(
final_path=str(final_path),
length=ase._size,
chunk_size=chunk_size,
next_integrity_chunk=next_integrity_chunk,
completed=completed,
md5=md5,
)
else:
if (dl.completed or
next_integrity_chunk < dl.next_integrity_chunk):
return
if completed:
dl.completed = completed
else:
dl.next_integrity_chunk = next_integrity_chunk
dl.md5hexdigest = md5
self._data[key] = dl
self._data.sync() | [
"Add or update a resume record\n :param DownloadResumeManager self: this\n :param pathlib.Path final_path: final path\n :param blobxfer.models.azure.StorageEntity ase: Storage Entity\n :param int chunk_size: chunk size in bytes\n :param int next_integrity_chunk: next integrity chunk\n :param bool completed: if completed\n :param str md5: md5 hex digest\n "
]
|
Please provide a description of the function:def add_or_update_record(
self, local_path, ase, chunk_size, total_chunks, completed_chunks,
completed, md5):
# type: (UploadResumeManager, pathlib.Path,
# blobxfer.models.azure.StorageEntity, int, int, int, bool,
# str) -> None
key = blobxfer.operations.resume._BaseResumeManager.\
generate_record_key(ase)
with self.datalock():
ul = self.get_record(ase, key=key, lock=False)
if ul is None:
ul = blobxfer.models.resume.Upload(
local_path=str(local_path),
length=ase._size,
chunk_size=chunk_size,
total_chunks=total_chunks,
completed_chunks=completed_chunks,
completed=completed,
md5=md5,
)
else:
if ul.completed or completed_chunks == ul.completed_chunks:
return
ul.completed_chunks = completed_chunks
if completed:
ul.completed = completed
else:
ul.md5hexdigest = md5
self._data[key] = ul
self._data.sync() | [
"Add or update a resume record\n :param UploadResumeManager self: this\n :param pathlib.Path local_path: local path\n :param blobxfer.models.azure.StorageEntity ase: Storage Entity\n :param int chunk_size: chunk size in bytes\n :param int total_chunks: total chunks\n :param int completed_chunks: completed chunks bitarray\n :param bool completed: if completed\n :param str md5: md5 hex digest\n "
]
|
Please provide a description of the function:def add_or_update_record(
self, dst_ase, src_block_list, offset, chunk_size, total_chunks,
completed_chunks, completed):
# type: (SyncCopyResumeManager,
# blobxfer.models.azure.StorageEntity, list, int, int, int,
# int, bool) -> None
key = blobxfer.operations.resume._BaseResumeManager.\
generate_record_key(dst_ase)
with self.datalock():
sc = self.get_record(dst_ase, key=key, lock=False)
if sc is None:
sc = blobxfer.models.resume.SyncCopy(
length=dst_ase._size,
src_block_list=src_block_list,
offset=offset,
chunk_size=chunk_size,
total_chunks=total_chunks,
completed_chunks=completed_chunks,
completed=completed,
)
else:
if sc.completed or completed_chunks == sc.completed_chunks:
return
sc.offset = offset
sc.completed_chunks = completed_chunks
if completed:
sc.completed = completed
self._data[key] = sc
self._data.sync() | [
"Add or update a resume record\n :param SyncCopyResumeManager self: this\n :param blobxfer.models.azure.StorageEntity dst_ase: Storage Entity\n :param list src_block_list: source block list\n :param int offset: offset\n :param int chunk_size: chunk size in bytes\n :param int total_chunks: total chunks\n :param int completed_chunks: completed chunks bitarray\n :param bool completed: if completed\n "
]
|
Please provide a description of the function:def update_progress_bar(
go, optext, start, total_files, files_sofar, total_bytes,
bytes_sofar, stdin_upload=False):
# type: (blobxfer.models.options.General, str, datetime.datetime, int,
# int, int, int, bool) -> None
if (go.quiet or not go.progress_bar or
blobxfer.util.is_none_or_empty(go.log_file) or
start is None):
return
diff = (blobxfer.util.datetime_now() - start).total_seconds()
if diff <= 0:
# arbitrarily give a small delta
diff = 1e-9
if total_bytes is None or total_bytes == 0 or bytes_sofar > total_bytes:
done = 0
else:
done = float(bytes_sofar) / total_bytes
rate = bytes_sofar / blobxfer.util.MEGABYTE / diff
if optext == 'synccopy':
rtext = 'sync-copied'
else:
rtext = optext + 'ed'
if total_files is None:
fprog = 'n/a'
else:
fprog = '{}/{}'.format(files_sofar, total_files)
if stdin_upload:
sys.stdout.write(
('\r{0} progress: [{1:30s}] n/a % {2:12.3f} MiB/sec, '
'{3} {4}').format(
optext, '>' * int(total_bytes % 30), rate, fprog, rtext)
)
else:
sys.stdout.write(
('\r{0} progress: [{1:30s}] {2:.2f}% {3:12.3f} MiB/sec, '
'{4} {5}').format(
optext, '>' * int(done * 30), done * 100, rate, fprog, rtext)
)
if files_sofar == total_files:
sys.stdout.write('\n')
sys.stdout.flush() | [
"Update the progress bar\n :param blobxfer.models.options.General go: general options\n :param str optext: operation prefix text\n :param datetime.datetime start: start time\n :param int total_files: total number of files\n :param int files_sofar: files transfered so far\n :param int total_bytes: total number of bytes\n :param int bytes_sofar: bytes transferred so far\n :param bool stdin_upload: stdin upload\n "
]
|
Please provide a description of the function:def output_parameters(general_options, spec):
# type: (blobxfer.models.options.General, object) -> None
if general_options.quiet:
return
sep = '============================================'
log = []
log.append(sep)
log.append(' Azure blobxfer parameters')
log.append(sep)
log.append(' blobxfer version: {}'.format(
blobxfer.version.__version__))
log.append(' platform: {}'.format(platform.platform()))
log.append(
(' components: {}={}-{} azstor.blob={} azstor.file={} '
'crypt={} req={}').format(
platform.python_implementation(),
platform.python_version(),
'64bit' if sys.maxsize > 2**32 else '32bit',
azure.storage.blob._constants.__version__,
azure.storage.file._constants.__version__,
cryptography.__version__,
requests.__version__,))
# specific preamble
if isinstance(spec, blobxfer.models.download.Specification):
log.append(' transfer direction: {}'.format('Azure -> local'))
log.append(
(' workers: disk={} xfer={} (msoc={}) md5={} '
'crypto={}').format(
general_options.concurrency.disk_threads,
general_options.concurrency.transfer_threads,
spec.options.max_single_object_concurrency,
general_options.concurrency.md5_processes
if spec.options.check_file_md5 else 0,
general_options.concurrency.crypto_processes))
elif isinstance(spec, blobxfer.models.upload.Specification):
log.append(' transfer direction: {}'.format('local -> Azure'))
log.append(
(' workers: disk={} xfer={} md5={} '
'crypto={}').format(
general_options.concurrency.disk_threads,
general_options.concurrency.transfer_threads,
general_options.concurrency.md5_processes
if spec.skip_on.md5_match or
spec.options.store_file_properties.md5 else 0,
0))
elif isinstance(spec, blobxfer.models.synccopy.Specification):
log.append(' transfer direction: {}'.format('Azure -> Azure'))
log.append(
(' workers: disk={} xfer={} md5={} '
'crypto={}').format(
general_options.concurrency.disk_threads,
general_options.concurrency.transfer_threads,
general_options.concurrency.md5_processes,
general_options.concurrency.crypto_processes))
# common block
log.append(' log file: {}'.format(
general_options.log_file))
log.append(' dry run: {}'.format(
general_options.dry_run))
log.append(' resume file: {}'.format(
general_options.resume_file))
log.append(
' timeout: connect={} read={} max_retries={}'.format(
general_options.timeout.connect, general_options.timeout.read,
general_options.timeout.max_retries))
if isinstance(spec, blobxfer.models.synccopy.Specification):
log.append(' source mode: {}'.format(
spec.options.mode))
log.append(' dest mode: {}'.format(
spec.options.dest_mode))
else:
log.append(' mode: {}'.format(
spec.options.mode))
log.append(
' skip on: fs_match={} lmt_ge={} md5={}'.format(
spec.skip_on.filesize_match,
spec.skip_on.lmt_ge,
spec.skip_on.md5_match))
log.append(' delete extraneous: {}'.format(
spec.options.delete_extraneous_destination))
log.append(' overwrite: {}'.format(
spec.options.overwrite))
log.append(' recursive: {}'.format(
spec.options.recursive))
log.append(' rename single: {}'.format(
spec.options.rename))
# specific epilog
if isinstance(spec, blobxfer.models.download.Specification):
log.append(' chunk size bytes: {}'.format(
spec.options.chunk_size_bytes))
log.append(' strip components: {}'.format(
spec.options.strip_components))
log.append(' compute file md5: {}'.format(
spec.options.check_file_md5))
log.append(' restore properties: attr={} lmt={}'.format(
spec.options.restore_file_properties.attributes,
spec.options.restore_file_properties.lmt))
log.append(' rsa private key: {}'.format(
'Loaded' if spec.options.rsa_private_key else 'None'))
log.append(' local destination: {}'.format(
spec.destination.path))
elif isinstance(spec, blobxfer.models.upload.Specification):
log.append(' access tier: {}'.format(
spec.options.access_tier))
log.append(' chunk size bytes: {}'.format(
spec.options.chunk_size_bytes))
log.append(' one shot bytes: {}'.format(
spec.options.one_shot_bytes))
log.append(' strip components: {}'.format(
spec.options.strip_components))
log.append(
' store properties: attr={} cc=\'{}\' md5={}'.format(
spec.options.store_file_properties.attributes,
spec.options.store_file_properties.cache_control or '',
spec.options.store_file_properties.md5))
log.append(' rsa public key: {}'.format(
'Loaded' if spec.options.rsa_public_key else 'None'))
log.append(' local source paths: {}'.format(
' '.join([str(src) for src in spec.sources.paths])))
elif isinstance(spec, blobxfer.models.synccopy.Specification):
log.append(' access tier: {}'.format(
spec.options.access_tier))
log.append(sep)
log = '\n'.join(log)
if blobxfer.util.is_not_empty(general_options.log_file):
print(log)
logger.info('\n{}'.format(log)) | [
"Output parameters\n :param blobxfer.models.options.General general_options: general options\n :param object spec: upload or download spec\n "
]
|
Please provide a description of the function:def compute_md5_for_file_asbase64(
filename, pagealign=False, start=None, end=None, blocksize=65536):
# type: (str, bool, int, int, int) -> str
hasher = blobxfer.util.new_md5_hasher()
with open(filename, 'rb') as filedesc:
if start is not None:
filedesc.seek(start)
curr = start
else:
curr = 0
while True:
if end is not None and curr + blocksize > end:
blocksize = end - curr
if blocksize == 0:
break
buf = filedesc.read(blocksize)
if not buf:
break
buflen = len(buf)
if pagealign and buflen < blocksize:
aligned = blobxfer.util.page_align_content_length(buflen)
if aligned != buflen:
buf = buf.ljust(aligned, b'\0')
hasher.update(buf)
curr += blocksize
return blobxfer.util.base64_encode_as_string(hasher.digest()) | [
"Compute MD5 hash for file and encode as Base64\n :param str filename: file to compute MD5 for\n :param bool pagealign: page align data\n :param int start: file start offset\n :param int end: file end offset\n :param int blocksize: block size\n :rtype: str\n :return: MD5 for file encoded as Base64\n "
]
|
Please provide a description of the function:def compute_md5_for_data_asbase64(data):
# type: (obj) -> str
hasher = blobxfer.util.new_md5_hasher()
hasher.update(data)
return blobxfer.util.base64_encode_as_string(hasher.digest()) | [
"Compute MD5 hash for bits and encode as Base64\n :param any data: data to compute MD5 for\n :rtype: str\n :return: MD5 for data\n "
]
|
Please provide a description of the function:def check_data_is_empty(data):
# type: (bytes) -> bool
contentmd5 = compute_md5_for_data_asbase64(data)
datalen = len(data)
if datalen == _MAX_PAGE_SIZE_BYTES:
if contentmd5 == _EMPTY_MAX_PAGE_SIZE_MD5:
return True
else:
data_chk = b'\0' * datalen
if compute_md5_for_data_asbase64(data_chk) == contentmd5:
return True
return False | [
"Check if data is empty via MD5\n :param bytes data: data to check\n :rtype: bool\n :return: if data is empty\n "
]
|
Please provide a description of the function:def _worker_process(self):
# type: (LocalFileMd5Offload) -> None
while not self.terminated:
try:
key, lpath, fpath, remote_md5, pagealign, lpview = \
self._task_queue.get(True, 0.1)
except queue.Empty:
continue
if lpview is None:
start = None
end = None
size = None
else:
start = lpview.fd_start
end = lpview.fd_end
size = end - start
md5 = blobxfer.operations.md5.compute_md5_for_file_asbase64(
fpath, pagealign, start, end)
logger.debug('pre-transfer MD5 check: {} <L..R> {} {}'.format(
md5, remote_md5, fpath))
self._done_cv.acquire()
self._done_queue.put((key, lpath, size, md5 == remote_md5))
self._done_cv.notify()
self._done_cv.release() | [
"Compute MD5 for local file\n :param LocalFileMd5Offload self: this\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.