response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: full path to the device
:raises ValueError: if drive fails to validate
|
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: full path to the device
:raises ValueError: if drive fails to validate
"""
return check_drive(root, drive, True)
|
Validate the path given by root and drive is a valid existing directory.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:param mount_check: additionally require path is mounted
:returns: full path to the device
:raises ValueError: if drive fails to validate
|
def check_drive(root, drive, mount_check):
"""
Validate the path given by root and drive is a valid existing directory.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:param mount_check: additionally require path is mounted
:returns: full path to the device
:raises ValueError: if drive fails to validate
"""
if not (urllib.parse.quote_plus(drive) == drive):
raise ValueError('%s is not a valid drive name' % drive)
path = os.path.join(root, drive)
if mount_check:
if not utils.ismount(path):
raise ValueError('%s is not mounted' % path)
else:
if not isdir(path):
raise ValueError('%s is not a directory' % path)
return path
|
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
|
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
|
Helper function to extract a timestamp from requests that require one.
:param request: the swob request object
:returns: a valid Timestamp instance
:raises HTTPBadRequest: on missing or invalid X-Timestamp
|
def valid_timestamp(request):
"""
Helper function to extract a timestamp from requests that require one.
:param request: the swob request object
:returns: a valid Timestamp instance
:raises HTTPBadRequest: on missing or invalid X-Timestamp
"""
try:
return request.timestamp
except exceptions.InvalidTimestamp as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
|
Check that 'x-delete-after' and 'x-delete-at' headers have valid values.
Values should be positive integers and correspond to a time greater than
the request timestamp.
If the 'x-delete-after' header is found then its value is used to compute
an 'x-delete-at' value which takes precedence over any existing
'x-delete-at' header.
:param request: the swob request object
:raises: HTTPBadRequest in case of invalid values
:returns: the swob request object
|
def check_delete_headers(request):
"""
Check that 'x-delete-after' and 'x-delete-at' headers have valid values.
Values should be positive integers and correspond to a time greater than
the request timestamp.
If the 'x-delete-after' header is found then its value is used to compute
an 'x-delete-at' value which takes precedence over any existing
'x-delete-at' header.
:param request: the swob request object
:raises: HTTPBadRequest in case of invalid values
:returns: the swob request object
"""
now = float(valid_timestamp(request))
if 'x-delete-after' in request.headers:
try:
x_delete_after = int(request.headers['x-delete-after'])
except ValueError:
raise HTTPBadRequest(request=request,
content_type='text/plain',
body='Non-integer X-Delete-After')
actual_del_time = utils.normalize_delete_at_timestamp(
now + x_delete_after)
if int(actual_del_time) <= now:
raise HTTPBadRequest(request=request,
content_type='text/plain',
body='X-Delete-After in past')
request.headers['x-delete-at'] = actual_del_time
del request.headers['x-delete-after']
if 'x-delete-at' in request.headers:
try:
x_delete_at = int(utils.normalize_delete_at_timestamp(
int(request.headers['x-delete-at'])))
except ValueError:
raise HTTPBadRequest(request=request, content_type='text/plain',
body='Non-integer X-Delete-At')
if x_delete_at <= now and not utils.config_true_value(
request.headers.get('x-backend-replication', 'f')):
raise HTTPBadRequest(request=request, content_type='text/plain',
body='X-Delete-At in past')
return request
|
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any reserved characters.
:param string: string to be validated
:param internal: boolean, allows reserved characters if True
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
|
def check_utf8(string, internal=False):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any reserved characters.
:param string: string to be validated
:param internal: boolean, allows reserved characters if True
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, six.text_type):
encoded = string.encode('utf-8')
decoded = string
else:
encoded = string
decoded = string.decode('UTF-8')
if decoded.encode('UTF-8') != encoded:
return False
# A UTF-8 string with surrogates in it is invalid.
#
# Note: this check is only useful on Python 2. On Python 3, a
# bytestring with a UTF-8-encoded surrogate codepoint is (correctly)
# treated as invalid, so the decode() call above will fail.
#
# Note 2: this check requires us to use a wide build of Python 2. On
# narrow builds of Python 2, potato = u"\U0001F954" will have length
# 2, potato[0] == u"\ud83e" (surrogate), and potato[1] == u"\udda0"
# (also a surrogate), so even if it is correctly UTF-8 encoded as
# b'\xf0\x9f\xa6\xa0', it will not pass this check. Fortunately,
# most Linux distributions build Python 2 wide, and Python 3.3+
# removed the wide/narrow distinction entirely.
if any(0xD800 <= ord(codepoint) <= 0xDFFF
for codepoint in decoded):
return False
if b'\x00' != utils.RESERVED_BYTE and b'\x00' in encoded:
return False
return True if internal else utils.RESERVED_BYTE not in encoded
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
|
Validate that the header contains valid account or container name.
:param req: HTTP request object
:param name: header value to validate
:param target_type: which header is being validated (Account or Container)
:returns: A properly encoded account name or container name
:raise HTTPPreconditionFailed: if account header
is not well formatted.
|
def check_name_format(req, name, target_type):
"""
Validate that the header contains valid account or container name.
:param req: HTTP request object
:param name: header value to validate
:param target_type: which header is being validated (Account or Container)
:returns: A properly encoded account name or container name
:raise HTTPPreconditionFailed: if account header
is not well formatted.
"""
if not name:
raise HTTPPreconditionFailed(
request=req,
body='%s name cannot be empty' % target_type)
if six.PY2:
if isinstance(name, six.text_type):
name = name.encode('utf-8')
if '/' in name:
raise HTTPPreconditionFailed(
request=req,
body='%s name cannot contain slashes' % target_type)
return name
|
Checks if the requested version is valid.
Currently Swift only supports "v1" and "v1.0".
|
def valid_api_version(version):
"""
Checks if the requested version is valid.
Currently Swift only supports "v1" and "v1.0".
"""
global VALID_API_VERSIONS
if not isinstance(VALID_API_VERSIONS, list):
VALID_API_VERSIONS = [str(VALID_API_VERSIONS)]
return version in VALID_API_VERSIONS
|
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
|
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
# patch eventlet/logging early
utils.monkey_patch()
eventlet.hubs.use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
d = klass(conf)
DaemonStrategy(d, logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
return d
|
We've cargo culted our consumers to be tolerant of various expressions of
zero in our databases for backwards compatibility with less disciplined
producers.
|
def zero_like(count):
"""
We've cargo culted our consumers to be tolerant of various expressions of
zero in our databases for backwards compatibility with less disciplined
producers.
"""
return count in ZERO_LIKE_VALUES
|
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
|
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
|
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
|
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = md5(('%s-%s' % (name, timestamp)).encode('utf8'),
usedforsecurity=False).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
|
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
|
def get_db_connection(path, timeout=30, logger=None, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if QUERY_LOGGING and logger and not six.PY2:
conn.set_trace_callback(logger.debug)
if not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
|
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
|
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir, fsync=False)
|
True if the directory name is a valid partition number, False otherwise.
|
def looks_like_partition(dir_name):
"""
True if the directory name is a valid partition number, False otherwise.
"""
try:
part = int(dir_name)
return part >= 0
except ValueError:
return False
|
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of tuples of (path, context, partition_filter) to
walk. The context may be any object; the context is not
used by this function but is included with each yielded
tuple.
:returns: A generator of (partition, path_to_db_file, context)
|
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of tuples of (path, context, partition_filter) to
walk. The context may be any object; the context is not
used by this function but is included with each yielded
tuple.
:returns: A generator of (partition, path_to_db_file, context)
"""
def walk_datadir(datadir, context, part_filter):
partitions = [pd for pd in os.listdir(datadir)
if looks_like_partition(pd) and part_filter(pd)]
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
if not suffixes:
os.rmdir(part_dir)
continue
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
if not hashes:
os.rmdir(suff_dir)
continue
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
# common case
if os.path.exists(object_file):
yield (partition, object_file, context)
continue
# look for any alternate db filenames
db_files = get_db_files(object_file)
if db_files:
yield (partition, db_files[-1], context)
continue
try:
os.rmdir(hash_dir)
except OSError as e:
if e.errno != errno.ENOTEMPTY:
raise
its = [walk_datadir(datadir, context, filt)
for datadir, context, filt in datadirs]
rr_its = round_robin_iter(its)
for datadir in rr_its:
yield datadir
|
Returns the hexdigest string of the HMAC (see RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:param digest: constructor or the string name for the digest to use in
calculating the HMAC
Defaults to SHA1
:param ip_range: The ip range from which the resource is allowed
to be accessed. We need to put the ip_range as the
first argument to hmac to avoid manipulation of the path
due to newlines being valid in paths
e.g. /v1/a/c/o\n127.0.0.1
:returns: hexdigest str of the HMAC for the request using the specified
digest algorithm.
|
def get_hmac(request_method, path, expires, key, digest="sha1",
ip_range=None):
"""
Returns the hexdigest string of the HMAC (see RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:param digest: constructor or the string name for the digest to use in
calculating the HMAC
Defaults to SHA1
:param ip_range: The ip range from which the resource is allowed
to be accessed. We need to put the ip_range as the
first argument to hmac to avoid manipulation of the path
due to newlines being valid in paths
e.g. /v1/a/c/o\\n127.0.0.1
:returns: hexdigest str of the HMAC for the request using the specified
digest algorithm.
"""
# These are the three mandatory fields.
parts = [request_method, str(expires), path]
formats = [b"%s", b"%s", b"%s"]
if ip_range:
parts.insert(0, ip_range)
formats.insert(0, b"ip=%s")
if not isinstance(key, six.binary_type):
key = key.encode('utf8')
message = b'\n'.join(
fmt % (part if isinstance(part, six.binary_type)
else part.encode("utf-8"))
for fmt, part in zip(formats, parts))
if six.PY2 and isinstance(digest, six.string_types):
digest = getattr(hashlib, digest)
return hmac.new(key, message, digest).hexdigest()
|
Pulls out 'allowed_digests' from the supplied conf. Then compares them with
the list of supported and deprecated digests and returns whatever remain.
When something is unsupported or deprecated it'll log a warning.
:param conf_digests: iterable of allowed digests. If empty, defaults to
DEFAULT_ALLOWED_DIGESTS.
:param logger: optional logger; if provided, use it issue deprecation
warnings
:returns: A set of allowed digests that are supported and a set of
deprecated digests.
:raises: ValueError, if there are no digests left to return.
|
def get_allowed_digests(conf_digests, logger=None):
"""
Pulls out 'allowed_digests' from the supplied conf. Then compares them with
the list of supported and deprecated digests and returns whatever remain.
When something is unsupported or deprecated it'll log a warning.
:param conf_digests: iterable of allowed digests. If empty, defaults to
DEFAULT_ALLOWED_DIGESTS.
:param logger: optional logger; if provided, use it issue deprecation
warnings
:returns: A set of allowed digests that are supported and a set of
deprecated digests.
:raises: ValueError, if there are no digests left to return.
"""
allowed_digests = set(digest.lower() for digest in conf_digests)
if not allowed_digests:
allowed_digests = SUPPORTED_DIGESTS
not_supported = allowed_digests - SUPPORTED_DIGESTS
if not_supported:
if logger:
logger.warning('The following digest algorithms are configured '
'but not supported: %s', ', '.join(not_supported))
allowed_digests -= not_supported
deprecated = allowed_digests & DEPRECATED_DIGESTS
if deprecated and logger:
if not conf_digests:
logger.warning('The following digest algorithms are allowed by '
'default but deprecated: %s. Support will be '
'disabled by default in a future release, and '
'later removed entirely.', ', '.join(deprecated))
else:
logger.warning('The following digest algorithms are configured '
'but deprecated: %s. Support will be removed in a '
'future release.', ', '.join(deprecated))
if not allowed_digests:
raise ValueError('No valid digest algorithms are configured')
return allowed_digests, deprecated
|
Returns a tuple of (digest_algorithm, hex_encoded_digest)
from a client-provided string of the form::
<hex-encoded digest>
or::
<algorithm>:<base64-encoded digest>
Note that hex-encoded strings must use one of sha1, sha256, or sha512.
:raises: ValueError on parse failures
|
def extract_digest_and_algorithm(value):
"""
Returns a tuple of (digest_algorithm, hex_encoded_digest)
from a client-provided string of the form::
<hex-encoded digest>
or::
<algorithm>:<base64-encoded digest>
Note that hex-encoded strings must use one of sha1, sha256, or sha512.
:raises: ValueError on parse failures
"""
if ':' in value:
algo, value = value.split(':', 1)
# accept both standard and url-safe base64
if ('-' in value or '_' in value) and not (
'+' in value or '/' in value):
value = value.replace('-', '+').replace('_', '/')
value = binascii.hexlify(strict_b64decode(value + '=='))
if not six.PY2:
value = value.decode('ascii')
else:
try:
binascii.unhexlify(value) # make sure it decodes
except TypeError:
# This is just for py2
raise ValueError('Non-hexadecimal digit found')
algo = {
40: 'sha1',
64: 'sha256',
128: 'sha512',
}.get(len(value))
if not algo:
raise ValueError('Bad digest length')
return algo, value
|
Make request to backend storage node.
(i.e. 'Account', 'Container', 'Object')
:param node: a node dict from a ring
:param part: an integer, the partition number
:param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc)
:param path: a string, the request path
:param headers: a dict, header name => value
:param stype: a string, describing the type of service
:param conn_timeout: timeout while waiting for connection; default is 5
seconds
:param response_timeout: timeout while waiting for response; default is 15
seconds
:param send_timeout: timeout for sending request body; default is 15
seconds
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param chunk_size: if defined, chunk size of data to send
:returns: an HTTPResponse object
:raises DirectClientException: if the response status is not 2xx
:raises eventlet.Timeout: if either conn_timeout or response_timeout is
exceeded
|
def _make_req(node, part, method, path, headers, stype,
conn_timeout=5, response_timeout=15, send_timeout=15,
contents=None, content_length=None, chunk_size=65535):
"""
Make request to backend storage node.
(i.e. 'Account', 'Container', 'Object')
:param node: a node dict from a ring
:param part: an integer, the partition number
:param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc)
:param path: a string, the request path
:param headers: a dict, header name => value
:param stype: a string, describing the type of service
:param conn_timeout: timeout while waiting for connection; default is 5
seconds
:param response_timeout: timeout while waiting for response; default is 15
seconds
:param send_timeout: timeout for sending request body; default is 15
seconds
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param chunk_size: if defined, chunk size of data to send
:returns: an HTTPResponse object
:raises DirectClientException: if the response status is not 2xx
:raises eventlet.Timeout: if either conn_timeout or response_timeout is
exceeded
"""
if contents is not None:
if content_length is not None:
headers['Content-Length'] = str(content_length)
else:
for n, v in headers.items():
if n.lower() == 'content-length':
content_length = int(v)
if not contents:
headers['Content-Length'] = '0'
if isinstance(contents, six.string_types):
contents = [contents]
if content_length is None:
headers['Transfer-Encoding'] = 'chunked'
ip, port = get_ip_port(node, headers)
headers.setdefault('X-Backend-Allow-Reserved-Names', 'true')
with Timeout(conn_timeout):
conn = http_connect(ip, port, node['device'], part,
method, path, headers=headers)
if contents is not None:
contents_f = FileLikeIter(contents)
with Timeout(send_timeout):
if content_length is None:
chunk = contents_f.read(chunk_size)
while chunk:
conn.send(b'%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents_f.read(chunk_size)
conn.send(b'0\r\n\r\n')
else:
left = content_length
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents_f.read(size)
if not chunk:
break
conn.send(chunk)
left -= len(chunk)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise DirectClientException(stype, method, node, part, path, resp)
return resp
|
Base function for get direct account and container.
Do not use directly use the direct_get_account or
direct_get_container instead.
|
def _get_direct_account_container(path, stype, node, part,
marker=None, limit=None,
prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15,
end_marker=None, reverse=None, headers=None,
extra_params=None):
"""Base function for get direct account and container.
Do not use directly use the direct_get_account or
direct_get_container instead.
"""
if headers is None:
headers = {}
params = {'format': 'json'}
if extra_params:
for key, value in extra_params.items():
if value is not None:
params[key] = value
if marker:
if 'marker' in params:
raise TypeError('duplicate values for keyword arg: marker')
params['marker'] = quote(marker)
if limit:
if 'limit' in params:
raise TypeError('duplicate values for keyword arg: limit')
params['limit'] = '%d' % limit
if prefix:
if 'prefix' in params:
raise TypeError('duplicate values for keyword arg: prefix')
params['prefix'] = quote(prefix)
if delimiter:
if 'delimiter' in params:
raise TypeError('duplicate values for keyword arg: delimiter')
params['delimiter'] = quote(delimiter)
if end_marker:
if 'end_marker' in params:
raise TypeError('duplicate values for keyword arg: end_marker')
params['end_marker'] = quote(end_marker)
if reverse:
if 'reverse' in params:
raise TypeError('duplicate values for keyword arg: reverse')
params['reverse'] = quote(reverse)
qs = '&'.join('%s=%s' % (k, v) for k, v in params.items())
ip, port = get_ip_port(node, headers)
with Timeout(conn_timeout):
conn = http_connect(ip, port, node['device'], part,
'GET', path, query_string=qs,
headers=gen_headers(hdrs_in=headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise DirectClientException(stype, 'GET', node, part, path, resp)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json.loads(resp.read())
|
Get the headers ready for a request. All requests should have a User-Agent
string, but if one is passed in don't over-write it. Not all requests will
need an X-Timestamp, but if one is passed in do not over-write it.
:param headers: dict or None, base for HTTP headers
:param add_ts: boolean, should be True for any "unsafe" HTTP request
:returns: HeaderKeyDict based on headers and ready for the request
|
def gen_headers(hdrs_in=None, add_ts=True):
"""
Get the headers ready for a request. All requests should have a User-Agent
string, but if one is passed in don't over-write it. Not all requests will
need an X-Timestamp, but if one is passed in do not over-write it.
:param headers: dict or None, base for HTTP headers
:param add_ts: boolean, should be True for any "unsafe" HTTP request
:returns: HeaderKeyDict based on headers and ready for the request
"""
hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict()
if add_ts and 'X-Timestamp' not in hdrs_out:
hdrs_out['X-Timestamp'] = Timestamp.now().internal
if 'user-agent' not in hdrs_out:
hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid()
hdrs_out.setdefault('X-Backend-Allow-Reserved-Names', 'true')
return hdrs_out
|
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:returns: a tuple of (response headers, a list of containers) The response
headers will HeaderKeyDict.
|
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15, end_marker=None, reverse=None,
headers=None):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:returns: a tuple of (response headers, a list of containers) The response
headers will HeaderKeyDict.
"""
path = _make_path(account)
return _get_direct_account_container(path, "Account", node, part,
headers=headers,
marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
end_marker=end_marker,
reverse=reverse,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
|
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
|
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
"""
if headers is None:
headers = {}
path = _make_path(account, container)
resp = _make_req(node, part, 'HEAD', path, gen_headers(headers),
'Container', conn_timeout, response_timeout)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers
|
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:param headers: headers to be included in the request
:param extra_params: a dict of extra parameters to be included in the
request. It can be used to pass additional parameters, e.g,
{'states':'updating'} can be used with shard_range/namespace listing.
It can also be used to pass the existing keyword args, like 'marker' or
'limit', but if the same parameter appears twice in both keyword arg
(not None) and extra_params, this function will raise TypeError.
:returns: a tuple of (response headers, a list of objects) The response
headers will be a HeaderKeyDict.
|
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15, end_marker=None,
reverse=None, headers=None, extra_params=None):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:param headers: headers to be included in the request
:param extra_params: a dict of extra parameters to be included in the
request. It can be used to pass additional parameters, e.g,
{'states':'updating'} can be used with shard_range/namespace listing.
It can also be used to pass the existing keyword args, like 'marker' or
'limit', but if the same parameter appears twice in both keyword arg
(not None) and extra_params, this function will raise TypeError.
:returns: a tuple of (response headers, a list of objects) The response
headers will be a HeaderKeyDict.
"""
path = _make_path(account, container)
return _get_direct_account_container(path, "Container", node,
part, marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
end_marker=end_marker,
reverse=reverse,
conn_timeout=conn_timeout,
response_timeout=response_timeout,
headers=headers,
extra_params=extra_params)
|
Delete container directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:raises ClientException: HTTP DELETE request failed
|
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Delete container directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
path = _make_path(account, container)
add_timestamp = 'x-timestamp' not in (k.lower() for k in headers)
_make_req(node, part, 'DELETE', path, gen_headers(headers, add_timestamp),
'Container', conn_timeout, response_timeout)
|
Make a PUT request to a container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: additional headers to include in the request
:param contents: an iterable or string to send in request body (optional)
:param content_length: value to send as content-length header (optional)
:param chunk_size: chunk size of data to send (optional)
:raises ClientException: HTTP PUT request failed
|
def direct_put_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None, contents=None,
content_length=None, chunk_size=65535):
"""
Make a PUT request to a container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: additional headers to include in the request
:param contents: an iterable or string to send in request body (optional)
:param content_length: value to send as content-length header (optional)
:param chunk_size: chunk size of data to send (optional)
:raises ClientException: HTTP PUT request failed
"""
if headers is None:
headers = {}
lower_headers = set(k.lower() for k in headers)
headers_out = gen_headers(headers,
add_ts='x-timestamp' not in lower_headers)
path = _make_path(account, container)
_make_req(node, part, 'PUT', path, headers_out, 'Container', conn_timeout,
response_timeout, contents=contents,
content_length=content_length, chunk_size=chunk_size)
|
Make a POST request to a container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: additional headers to include in the request
:raises ClientException: HTTP PUT request failed
|
def direct_post_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Make a POST request to a container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: additional headers to include in the request
:raises ClientException: HTTP PUT request failed
"""
if headers is None:
headers = {}
lower_headers = set(k.lower() for k in headers)
headers_out = gen_headers(headers,
add_ts='x-timestamp' not in lower_headers)
path = _make_path(account, container)
return _make_req(node, part, 'POST', path, headers_out, 'Container',
conn_timeout, response_timeout)
|
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
|
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, headers=None):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
"""
if headers is None:
headers = {}
headers = gen_headers(headers)
path = _make_path(account, container, obj)
resp = _make_req(node, part, 'HEAD', path, headers,
'Object', conn_timeout, response_timeout)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers
|
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a HeaderKeyDict.
:raises ClientException: HTTP GET request failed
|
def direct_get_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, resp_chunk_size=None, headers=None):
"""
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a HeaderKeyDict.
:raises ClientException: HTTP GET request failed
"""
if headers is None:
headers = {}
ip, port = get_ip_port(node, headers)
path = _make_path(account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(ip, port, node['device'], part,
'GET', path, headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise DirectClientException('Object', 'GET', node, part, path, resp)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers, object_body
|
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
:raises ClientException: HTTP PUT request failed
|
def direct_put_object(node, part, account, container, name, contents,
content_length=None, etag=None, content_type=None,
headers=None, conn_timeout=5, response_timeout=15,
chunk_size=65535):
"""
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
:raises ClientException: HTTP PUT request failed
"""
path = _make_path(account, container, name)
if headers is None:
headers = {}
if etag:
headers['ETag'] = normalize_etag(etag)
if content_type is not None:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
# Incase the caller want to insert an object with specific age
add_ts = 'X-Timestamp' not in headers
resp = _make_req(
node, part, 'PUT', path, gen_headers(headers, add_ts=add_ts),
'Object', conn_timeout, response_timeout, contents=contents,
content_length=content_length, chunk_size=chunk_size)
return normalize_etag(resp.getheader('etag'))
|
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
|
def direct_post_object(node, part, account, container, name, headers,
conn_timeout=5, response_timeout=15):
"""
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
"""
path = _make_path(account, container, name)
_make_req(node, part, 'POST', path, gen_headers(headers, True),
'Object', conn_timeout, response_timeout)
|
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP DELETE request failed
|
def direct_delete_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15, headers=None):
"""
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
headers = gen_headers(headers, add_ts='x-timestamp' not in (
k.lower() for k in headers))
path = _make_path(account, container, obj)
_make_req(node, part, 'DELETE', path, headers,
'Object', conn_timeout, response_timeout)
|
Get suffix hashes directly from the object server.
Note that unlike other ``direct_client`` functions, this one defaults
to using the replication network to make requests.
:param node: node dictionary from the ring
:param part: partition the container is on
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: dict of suffix hashes
:raises ClientException: HTTP REPLICATE request failed
|
def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5,
response_timeout=15, headers=None):
"""
Get suffix hashes directly from the object server.
Note that unlike other ``direct_client`` functions, this one defaults
to using the replication network to make requests.
:param node: node dictionary from the ring
:param part: partition the container is on
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: dict of suffix hashes
:raises ClientException: HTTP REPLICATE request failed
"""
if headers is None:
headers = {}
headers.setdefault(USE_REPLICATION_NETWORK_HEADER, 'true')
ip, port = get_ip_port(node, headers)
path = '/%s' % '-'.join(suffixes)
with Timeout(conn_timeout):
conn = http_connect(ip, port,
node['device'], part, 'REPLICATE', path,
headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
raise DirectClientException('Object', 'REPLICATE',
node, part, path, resp,
host={'ip': node['replication_ip'],
'port': node['replication_port']}
)
return pickle.loads(resp.read())
|
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: result of func
:raises ClientException: all retries failed
|
def retry(func, *args, **kwargs):
"""
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: result of func
:raises ClientException: all retries failed
"""
retries = kwargs.pop('retries', 5)
error_log = kwargs.pop('error_log', None)
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
return attempts, func(*args, **kwargs)
except (socket.error, HTTPException, Timeout) as err:
if error_log:
error_log(err)
if attempts > retries:
raise
except ClientException as err:
if error_log:
error_log(err)
if attempts > retries or not is_server_error(err.http_status) or \
err.http_status == HTTP_INSUFFICIENT_STORAGE:
raise
sleep(backoff)
backoff *= 2
# Shouldn't actually get down here, but just in case.
if args and 'ip' in args[0]:
raise ClientException('Raise too many retries',
http_host=args[0]['ip'],
http_port=args[0]['port'],
http_device=args[0]['device'])
else:
raise ClientException('Raise too many retries')
|
Get recon json directly from the storage server.
:param node: node dictionary from the ring
:param recon_command: recon string (post /recon/)
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: deserialized json response
:raises DirectClientReconException: HTTP GET request failed
|
def direct_get_recon(node, recon_command, conn_timeout=5, response_timeout=15,
headers=None):
"""
Get recon json directly from the storage server.
:param node: node dictionary from the ring
:param recon_command: recon string (post /recon/)
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: deserialized json response
:raises DirectClientReconException: HTTP GET request failed
"""
if headers is None:
headers = {}
ip, port = get_ip_port(node, headers)
path = '/recon/%s' % recon_command
with Timeout(conn_timeout):
conn = http_connect_raw(ip, port, 'GET', path,
headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
raise DirectClientReconException('GET', node, path, resp)
return json.loads(resp.read())
|
Check if HTTP status code is informational.
:param status: http status code
:returns: True if status is successful, else False
|
def is_informational(status):
"""
Check if HTTP status code is informational.
:param status: http status code
:returns: True if status is successful, else False
"""
return 100 <= status <= 199
|
Check if HTTP status code is successful.
:param status: http status code
:returns: True if status is successful, else False
|
def is_success(status):
"""
Check if HTTP status code is successful.
:param status: http status code
:returns: True if status is successful, else False
"""
return 200 <= status <= 299
|
Check if HTTP status code is redirection.
:param status: http status code
:returns: True if status is redirection, else False
|
def is_redirection(status):
"""
Check if HTTP status code is redirection.
:param status: http status code
:returns: True if status is redirection, else False
"""
return 300 <= status <= 399
|
Check if HTTP status code is client error.
:param status: http status code
:returns: True if status is client error, else False
|
def is_client_error(status):
"""
Check if HTTP status code is client error.
:param status: http status code
:returns: True if status is client error, else False
"""
return 400 <= status <= 499
|
Check if HTTP status code is server error.
:param status: http status code
:returns: True if status is server error, else False
|
def is_server_error(status):
"""
Check if HTTP status code is server error.
:param status: http status code
:returns: True if status is server error, else False
"""
return 500 <= status <= 599
|
For usage with container sync
|
def head_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
return client.retry_request('HEAD', **kwargs)
|
For usage with container sync
|
def put_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('PUT', **kwargs)
|
For usage with container sync
|
def delete_object(url, **kwargs):
"""For usage with container sync """
client = SimpleClient(url=url)
client.retry_request('DELETE', **kwargs)
|
Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
|
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?")
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print("WARNING: Unable to modify memory limit. "
"Running as non-root?")
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print("WARNING: Unable to modify max process limit. "
"Running as non-root?")
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', tempfile.gettempdir())
|
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
|
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(self, *a, **kw):
rv = func(self, *a, **kw)
if len(self.servers) == 0:
return 1
return 1 if rv else 0
return wrapped
|
Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
|
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
|
Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
|
def safe_kill(pid, sig, name):
"""Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
"""
# check process name for SIG_DFL
if sig == signal.SIG_DFL:
try:
proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
if os.path.exists(proc_file):
with open(proc_file, 'r') as fd:
if name not in fd.read():
# unknown process is using the pid
raise InvalidPidFileException()
except IOError:
pass
os.kill(pid, sig)
|
Send signal to process group
: param pid: process id
: param sig: signal to send
|
def kill_group(pid, sig):
"""Send signal to process group
: param pid: process id
: param sig: signal to send
"""
# Negative PID means process group
os.kill(-pid, sig)
|
Get the current set of all child PIDs for a PID.
:param pid: process id
|
def get_child_pids(pid):
"""
Get the current set of all child PIDs for a PID.
:param pid: process id
"""
output = subprocess.check_output(
["ps", "--ppid", str(pid), "--no-headers", "-o", "pid"])
return {int(pid) for pid in output.split()}
|
Formats server name as swift compatible server names
E.g. swift-object-server
:param servername: server name
:returns: swift compatible server name and its binary name
|
def format_server_name(servername):
"""
Formats server name as swift compatible server names
E.g. swift-object-server
:param servername: server name
:returns: swift compatible server name and its binary name
"""
if '.' in servername:
servername = servername.split('.', 1)[0]
if '-' not in servername:
servername = '%s-server' % servername
cmd = 'swift-%s' % servername
return servername, cmd
|
Check whether the server is among swift servers or not, and also
checks whether the server's binaries are installed or not.
:param server: name of the server
:returns: True, when the server name is valid and its binaries are found.
False, otherwise.
|
def verify_server(server):
"""
Check whether the server is among swift servers or not, and also
checks whether the server's binaries are installed or not.
:param server: name of the server
:returns: True, when the server name is valid and its binaries are found.
False, otherwise.
"""
if not server:
return False
_, cmd = format_server_name(server)
if which(cmd) is None:
return False
return True
|
Sanitize a timeout value to use an absolute expiration time if the delta
is greater than 30 days (in seconds). Note that the memcached server
translates negative values to mean a delta of 30 days in seconds (and 1
additional second), client beware.
|
def sanitize_timeout(timeout):
"""
Sanitize a timeout value to use an absolute expiration time if the delta
is greater than 30 days (in seconds). Note that the memcached server
translates negative values to mean a delta of 30 days in seconds (and 1
additional second), client beware.
"""
if timeout > EXPTIME_MAXDELTA:
timeout += tm.time()
return int(timeout)
|
Build a MemcacheRing object from the given config. It will also use the
passed in logger.
:param conf: a dict, the config options
:param logger: a logger
|
def load_memcache(conf, logger):
"""
Build a MemcacheRing object from the given config. It will also use the
passed in logger.
:param conf: a dict, the config options
:param logger: a logger
"""
memcache_servers = conf.get('memcache_servers')
try:
# Originally, while we documented using memcache_max_connections
# we only accepted max_connections
max_conns = int(conf.get('memcache_max_connections',
conf.get('max_connections', 0)))
except ValueError:
max_conns = 0
memcache_options = {}
if (not memcache_servers
or max_conns <= 0):
path = os.path.join(conf.get('swift_dir', '/etc/swift'),
'memcache.conf')
memcache_conf = ConfigParser()
if memcache_conf.read(path):
# if memcache.conf exists we'll start with those base options
try:
memcache_options = dict(memcache_conf.items('memcache'))
except NoSectionError:
pass
if not memcache_servers:
try:
memcache_servers = \
memcache_conf.get('memcache', 'memcache_servers')
except (NoSectionError, NoOptionError):
pass
if max_conns <= 0:
try:
new_max_conns = \
memcache_conf.get('memcache',
'memcache_max_connections')
max_conns = int(new_max_conns)
except (NoSectionError, NoOptionError, ValueError):
pass
# while memcache.conf options are the base for the memcache
# middleware, if you set the same option also in the filter
# section of the proxy config it is more specific.
memcache_options.update(conf)
connect_timeout = float(memcache_options.get(
'connect_timeout', CONN_TIMEOUT))
pool_timeout = float(memcache_options.get(
'pool_timeout', POOL_TIMEOUT))
tries = int(memcache_options.get('tries', TRY_COUNT))
io_timeout = float(memcache_options.get('io_timeout', IO_TIMEOUT))
if config_true_value(memcache_options.get('tls_enabled', 'false')):
tls_cafile = memcache_options.get('tls_cafile')
tls_certfile = memcache_options.get('tls_certfile')
tls_keyfile = memcache_options.get('tls_keyfile')
tls_context = ssl.create_default_context(
cafile=tls_cafile)
if tls_certfile:
tls_context.load_cert_chain(tls_certfile, tls_keyfile)
else:
tls_context = None
error_suppression_interval = float(memcache_options.get(
'error_suppression_interval', ERROR_LIMIT_TIME))
error_suppression_limit = float(memcache_options.get(
'error_suppression_limit', ERROR_LIMIT_COUNT))
item_size_warning_threshold = int(memcache_options.get(
'item_size_warning_threshold', DEFAULT_ITEM_SIZE_WARNING_THRESHOLD))
if not memcache_servers:
memcache_servers = '127.0.0.1:11211'
if max_conns <= 0:
max_conns = 2
return MemcacheRing(
[s.strip() for s in memcache_servers.split(',')
if s.strip()],
connect_timeout=connect_timeout,
pool_timeout=pool_timeout,
tries=tries,
io_timeout=io_timeout,
max_conns=max_conns,
tls_context=tls_context,
logger=logger,
error_limit_count=error_suppression_limit,
error_limit_time=error_suppression_interval,
error_limit_duration=error_suppression_interval,
item_size_warning_threshold=item_size_warning_threshold)
|
Returns information about the swift cluster that has been previously
registered with the register_swift_info call.
:param admin: boolean value, if True will additionally return an 'admin'
section with information previously registered as admin
info.
:param disallowed_sections: list of section names to be withheld from the
information returned.
:returns: dictionary of information about the swift cluster.
|
def get_swift_info(admin=False, disallowed_sections=None):
"""
Returns information about the swift cluster that has been previously
registered with the register_swift_info call.
:param admin: boolean value, if True will additionally return an 'admin'
section with information previously registered as admin
info.
:param disallowed_sections: list of section names to be withheld from the
information returned.
:returns: dictionary of information about the swift cluster.
"""
disallowed_sections = disallowed_sections or []
info = deepcopy(_swift_info)
for section in disallowed_sections:
key_to_pop = None
sub_section_dict = info
for sub_section in section.split('.'):
if key_to_pop:
sub_section_dict = sub_section_dict.get(key_to_pop, {})
if not isinstance(sub_section_dict, dict):
sub_section_dict = {}
break
key_to_pop = sub_section
sub_section_dict.pop(key_to_pop, None)
if admin:
info['admin'] = dict(_swift_admin_info)
info['admin']['disallowed_sections'] = list(disallowed_sections)
return info
|
Registers information about the swift cluster to be retrieved with calls
to get_swift_info.
NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used
in the disallowed_sections to remove unwanted keys from /info.
:param name: string, the section name to place the information under.
:param admin: boolean, if True, information will be registered to an
admin section which can optionally be withheld when
requesting the information.
:param kwargs: key value arguments representing the information to be
added.
:raises ValueError: if name or any of the keys in kwargs has "." in it
|
def register_swift_info(name='swift', admin=False, **kwargs):
"""
Registers information about the swift cluster to be retrieved with calls
to get_swift_info.
NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used
in the disallowed_sections to remove unwanted keys from /info.
:param name: string, the section name to place the information under.
:param admin: boolean, if True, information will be registered to an
admin section which can optionally be withheld when
requesting the information.
:param kwargs: key value arguments representing the information to be
added.
:raises ValueError: if name or any of the keys in kwargs has "." in it
"""
if name == 'admin' or name == 'disallowed_sections':
raise ValueError('\'{0}\' is reserved name.'.format(name))
if admin:
dict_to_use = _swift_admin_info
else:
dict_to_use = _swift_info
if name not in dict_to_use:
if "." in name:
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
dict_to_use[name] = {}
for key, val in kwargs.items():
if "." in key:
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
dict_to_use[name][key] = val
|
Returns the set of registered sensitive headers.
Used by :mod:`swift.common.middleware.proxy_logging` to perform redactions
prior to logging.
|
def get_sensitive_headers():
"""
Returns the set of registered sensitive headers.
Used by :mod:`swift.common.middleware.proxy_logging` to perform redactions
prior to logging.
"""
return frozenset(_sensitive_headers)
|
Register a header as being "sensitive".
Sensitive headers are automatically redacted when logging. See the
``reveal_sensitive_prefix`` option in the proxy-server sample config
for more information.
:param header: The (case-insensitive) header name which, if present, may
contain sensitive information. Examples include ``X-Auth-Token`` and
(if s3api is enabled) ``Authorization``. Limited to ASCII characters.
|
def register_sensitive_header(header):
"""
Register a header as being "sensitive".
Sensitive headers are automatically redacted when logging. See the
``reveal_sensitive_prefix`` option in the proxy-server sample config
for more information.
:param header: The (case-insensitive) header name which, if present, may
contain sensitive information. Examples include ``X-Auth-Token`` and
(if s3api is enabled) ``Authorization``. Limited to ASCII characters.
"""
if not isinstance(header, str):
raise TypeError
if six.PY2:
header.decode('ascii')
else:
header.encode('ascii')
_sensitive_headers.add(header.lower())
|
Returns the set of registered sensitive query parameters.
Used by :mod:`swift.common.middleware.proxy_logging` to perform redactions
prior to logging.
|
def get_sensitive_params():
"""
Returns the set of registered sensitive query parameters.
Used by :mod:`swift.common.middleware.proxy_logging` to perform redactions
prior to logging.
"""
return frozenset(_sensitive_params)
|
Register a query parameter as being "sensitive".
Sensitive query parameters are automatically redacted when logging. See
the ``reveal_sensitive_prefix`` option in the proxy-server sample config
for more information.
:param query_param: The (case-sensitive) query parameter name which, if
present, may contain sensitive information. Examples include
``temp_url_signature`` and (if s3api is enabled) ``X-Amz-Signature``.
Limited to ASCII characters.
|
def register_sensitive_param(query_param):
"""
Register a query parameter as being "sensitive".
Sensitive query parameters are automatically redacted when logging. See
the ``reveal_sensitive_prefix`` option in the proxy-server sample config
for more information.
:param query_param: The (case-sensitive) query parameter name which, if
present, may contain sensitive information. Examples include
``temp_url_signature`` and (if s3api is enabled) ``X-Amz-Signature``.
Limited to ASCII characters.
"""
if not isinstance(query_param, str):
raise TypeError
if six.PY2:
query_param.decode('ascii')
else:
query_param.encode('ascii')
_sensitive_params.add(query_param)
|
Get a parameter from an HTTP request ensuring proper handling UTF-8
encoding.
:param req: request object
:param name: parameter name
:param default: result to return if the parameter is not found
:returns: HTTP request parameter value, as a native string
(in py2, as UTF-8 encoded str, not unicode object)
:raises HTTPBadRequest: if param not valid UTF-8 byte sequence
|
def get_param(req, name, default=None):
"""
Get a parameter from an HTTP request ensuring proper handling UTF-8
encoding.
:param req: request object
:param name: parameter name
:param default: result to return if the parameter is not found
:returns: HTTP request parameter value, as a native string
(in py2, as UTF-8 encoded str, not unicode object)
:raises HTTPBadRequest: if param not valid UTF-8 byte sequence
"""
value = req.params.get(name, default)
if six.PY2:
if value and not isinstance(value, six.text_type):
try:
value.decode('utf8') # Ensure UTF8ness
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
else:
if value:
# req.params is a dict of WSGI strings, so encoding will succeed
value = value.encode('latin1')
try:
# Ensure UTF8ness since we're at it
value = value.decode('utf8')
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
return value
|
Any non-range GET or HEAD request for a SLO object may include a
part-number parameter in query string. If the passed in request
includes a part-number parameter it will be parsed into a valid integer
and returned. If the passed in request does not include a part-number
param we will return None. If the part-number parameter is invalid for
the given request we will raise the appropriate HTTP exception
:param req: the request object
:returns: validated part-number value or None
:raises HTTPBadRequest: if request or part-number param is not valid
|
def get_valid_part_num(req):
"""
Any non-range GET or HEAD request for a SLO object may include a
part-number parameter in query string. If the passed in request
includes a part-number parameter it will be parsed into a valid integer
and returned. If the passed in request does not include a part-number
param we will return None. If the part-number parameter is invalid for
the given request we will raise the appropriate HTTP exception
:param req: the request object
:returns: validated part-number value or None
:raises HTTPBadRequest: if request or part-number param is not valid
"""
part_number_param = get_param(req, 'part-number')
if part_number_param is None:
return None
try:
part_number = int(part_number_param)
if part_number <= 0:
raise ValueError
except ValueError:
raise HTTPBadRequest('Part number must be an integer greater '
'than 0')
if req.range:
raise HTTPBadRequest(req=req,
body='Range requests are not supported '
'with part number queries')
return part_number
|
Get list of parameters from an HTTP request, validating the encoding of
each parameter.
:param req: request object
:param names: parameter names
:returns: a dict mapping parameter names to values for each name that
appears in the request parameters
:raises HTTPBadRequest: if any parameter value is not a valid UTF-8 byte
sequence
|
def validate_params(req, names):
"""
Get list of parameters from an HTTP request, validating the encoding of
each parameter.
:param req: request object
:param names: parameter names
:returns: a dict mapping parameter names to values for each name that
appears in the request parameters
:raises HTTPBadRequest: if any parameter value is not a valid UTF-8 byte
sequence
"""
params = {}
for name in names:
value = get_param(req, name)
if value is None:
continue
params[name] = value
return params
|
Validate internal account name.
:raises: HTTPBadRequest
|
def validate_internal_account(account):
"""
Validate internal account name.
:raises: HTTPBadRequest
"""
_validate_internal_name(account, 'account')
|
Validate internal account and container names.
:raises: HTTPBadRequest
|
def validate_internal_container(account, container):
"""
Validate internal account and container names.
:raises: HTTPBadRequest
"""
if not account:
raise ValueError('Account is required')
validate_internal_account(account)
if container:
_validate_internal_name(container, 'container')
|
Validate internal account, container and object names.
:raises: HTTPBadRequest
|
def validate_internal_obj(account, container, obj):
"""
Validate internal account, container and object names.
:raises: HTTPBadRequest
"""
if not account:
raise ValueError('Account is required')
if not container:
raise ValueError('Container is required')
validate_internal_container(account, container)
if obj and not (account.startswith(AUTO_CREATE_ACCOUNT_PREFIX) or
account == MISPLACED_OBJECTS_ACCOUNT):
_validate_internal_name(obj, 'object')
if container.startswith(RESERVED) and not obj.startswith(RESERVED):
raise HTTPBadRequest(body='Invalid user-namespace object '
'in reserved-namespace container')
elif obj.startswith(RESERVED) and not container.startswith(RESERVED):
raise HTTPBadRequest(body='Invalid reserved-namespace object '
'in user-namespace container')
|
Utility function to split and validate the request path and storage
policy. The storage policy index is extracted from the headers of
the request and converted to a StoragePolicy instance. The
remaining args are passed through to
:meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
the BaseStoragePolicy instance appended on the end
:raises HTTPServiceUnavailable: if the path is invalid or no policy exists
with the extracted policy_index.
|
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path and storage
policy. The storage policy index is extracted from the headers of
the request and converted to a StoragePolicy instance. The
remaining args are passed through to
:meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
the BaseStoragePolicy instance appended on the end
:raises HTTPServiceUnavailable: if the path is invalid or no policy exists
with the extracted policy_index.
"""
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not policy:
raise HTTPServiceUnavailable(
body="No policy with index %s" % policy_index,
request=request, content_type='text/plain')
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy)
return results
|
Utility function to split and validate the request path.
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay, as native strings
:raises HTTPBadRequest: if something's not okay
|
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path.
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay, as native strings
:raises HTTPBadRequest: if something's not okay
"""
try:
segs = request.split_path(minsegs, maxsegs, rest_with_last)
validate_device_partition(segs[0], segs[1])
return [wsgi_to_str(seg) for seg in segs]
except ValueError as err:
raise HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
|
Tests if a header key starts with and is longer than the user
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
|
def is_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 8 + len(server_type):
return False
return key.lower().startswith(get_user_meta_prefix(server_type))
|
Tests if a header key starts with and is longer than the system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
|
def is_sys_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 11 + len(server_type):
return False
return key.lower().startswith(get_sys_meta_prefix(server_type))
|
Tests if a header key starts with and is longer than the user or system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
|
def is_sys_or_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user or system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
return is_user_meta(server_type, key) or is_sys_meta(server_type, key)
|
Tests if a header key starts with and is longer than the prefix for object
transient system metadata.
:param key: header key
:returns: True if the key satisfies the test, False otherwise
|
def is_object_transient_sysmeta(key):
"""
Tests if a header key starts with and is longer than the prefix for object
transient system metadata.
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX):
return False
return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX)
|
Removes the user metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
|
def strip_user_meta_prefix(server_type, key):
"""
Removes the user metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
if not is_user_meta(server_type, key):
raise ValueError('Key is not user meta')
return key[len(get_user_meta_prefix(server_type)):]
|
Removes the system metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
|
def strip_sys_meta_prefix(server_type, key):
"""
Removes the system metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
if not is_sys_meta(server_type, key):
raise ValueError('Key is not sysmeta')
return key[len(get_sys_meta_prefix(server_type)):]
|
Removes the object transient system metadata prefix from the start of a
header key.
:param key: header key
:returns: stripped header key
|
def strip_object_transient_sysmeta_prefix(key):
"""
Removes the object transient system metadata prefix from the start of a
header key.
:param key: header key
:returns: stripped header key
"""
if not is_object_transient_sysmeta(key):
raise ValueError('Key is not object transient sysmeta')
return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):]
|
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
|
def get_user_meta_prefix(server_type):
"""
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'meta')
|
Returns the prefix for system metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's system metadata headers
|
def get_sys_meta_prefix(server_type):
"""
Returns the prefix for system metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's system metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'sysmeta')
|
Returns the Object Transient System Metadata header for key.
The Object Transient System Metadata namespace will be persisted by
backend object servers. These headers are treated in the same way as
object user metadata i.e. all headers in this namespace will be
replaced on every POST request.
:param key: metadata key
:returns: the entire object transient system metadata header for key
|
def get_object_transient_sysmeta(key):
"""
Returns the Object Transient System Metadata header for key.
The Object Transient System Metadata namespace will be persisted by
backend object servers. These headers are treated in the same way as
object user metadata i.e. all headers in this namespace will be
replaced on every POST request.
:param key: metadata key
:returns: the entire object transient system metadata header for key
"""
return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key)
|
Returns the full X-Object-Sysmeta-Container-Update-Override-* header key.
:param key: the key you want to override in the container update
:returns: the full header key
|
def get_container_update_override_key(key):
"""
Returns the full X-Object-Sysmeta-Container-Update-Override-* header key.
:param key: the key you want to override in the container update
:returns: the full header key
"""
header = '%s%s' % (OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX, key)
return header.title()
|
Generate a valid reserved name that joins the component parts.
:returns: a string
|
def get_reserved_name(*parts):
"""
Generate a valid reserved name that joins the component parts.
:returns: a string
"""
if any(RESERVED in p for p in parts):
raise ValueError('Invalid reserved part in components')
return RESERVED + RESERVED.join(parts)
|
Separate a valid reserved name into the component parts.
:returns: a list of strings
|
def split_reserved_name(name):
"""
Separate a valid reserved name into the component parts.
:returns: a list of strings
"""
if not name.startswith(RESERVED):
raise ValueError('Invalid reserved name')
return name.split(RESERVED)[1:]
|
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
|
def remove_items(headers, condition):
"""
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
"""
removed = {}
keys = [key for key in headers if condition(key)]
removed.update((key, headers.pop(key)) for key in keys)
return removed
|
Will copy desired subset of headers from from_r to to_r.
:param from_r: a swob Request or Response
:param to_r: a swob Request or Response
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be copied.
|
def copy_header_subset(from_r, to_r, condition):
"""
Will copy desired subset of headers from from_r to to_r.
:param from_r: a swob Request or Response
:param to_r: a swob Request or Response
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be copied.
"""
for k, v in from_r.headers.items():
if condition(k):
to_r.headers[k] = v
|
Validate that the value of path-like header is
well formatted. We assume the caller ensures that
specific header is present in req.headers.
:param req: HTTP request object
:param name: header name
:param length: length of path segment check
:param error_msg: error message for client
:returns: A tuple with path parts according to length
:raise: HTTPPreconditionFailed if header value
is not well formatted.
|
def check_path_header(req, name, length, error_msg):
"""
Validate that the value of path-like header is
well formatted. We assume the caller ensures that
specific header is present in req.headers.
:param req: HTTP request object
:param name: header name
:param length: length of path segment check
:param error_msg: error message for client
:returns: A tuple with path parts according to length
:raise: HTTPPreconditionFailed if header value
is not well formatted.
"""
hdr = wsgi_unquote(req.headers.get(name))
if not hdr.startswith('/'):
hdr = '/' + hdr
try:
return split_path(hdr, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body=error_msg)
|
Takes a successful object-GET HTTP response and turns it into an
iterator of (first-byte, last-byte, length, headers, body-file)
5-tuples.
The response must either be a 200 or a 206; if you feed in a 204 or
something similar, this probably won't work.
:param response: HTTP response, like from bufferedhttp.http_connect(),
not a swob.Response.
|
def http_response_to_document_iters(response, read_chunk_size=4096):
"""
Takes a successful object-GET HTTP response and turns it into an
iterator of (first-byte, last-byte, length, headers, body-file)
5-tuples.
The response must either be a 200 or a 206; if you feed in a 204 or
something similar, this probably won't work.
:param response: HTTP response, like from bufferedhttp.http_connect(),
not a swob.Response.
"""
chunked = is_chunked(dict(response.getheaders()))
if response.status == 200:
if chunked:
# Single "range" that's the whole object with an unknown length
return iter([(0, None, None, response.getheaders(),
response)])
# Single "range" that's the whole object
content_length = int(response.getheader('Content-Length'))
return iter([(0, content_length - 1, content_length,
response.getheaders(), response)])
content_type, params_list = parse_content_type(
response.getheader('Content-Type'))
if content_type != 'multipart/byteranges':
# Single range; no MIME framing, just the bytes. The start and end
# byte indices are in the Content-Range header.
start, end, length = parse_content_range(
response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
# Multiple ranges; the response body is a multipart/byteranges MIME
# document, and we have to parse it using the MIME boundary
# extracted from the Content-Type header.
params = dict(params_list)
return multipart_byteranges_to_document_iters(
response, wsgi_to_bytes(params['boundary']), read_chunk_size)
|
Helper function to update an X-Backend-Etag-Is-At header whose value is a
list of alternative header names at which the actual object etag may be
found. This informs the object server where to look for the actual object
etag when processing conditional requests.
Since the proxy server and/or middleware may set alternative etag header
names, the value of X-Backend-Etag-Is-At is a comma separated list which
the object server inspects in order until it finds an etag value.
:param req: a swob Request
:param name: name of a sysmeta where alternative etag may be found
|
def update_etag_is_at_header(req, name):
"""
Helper function to update an X-Backend-Etag-Is-At header whose value is a
list of alternative header names at which the actual object etag may be
found. This informs the object server where to look for the actual object
etag when processing conditional requests.
Since the proxy server and/or middleware may set alternative etag header
names, the value of X-Backend-Etag-Is-At is a comma separated list which
the object server inspects in order until it finds an etag value.
:param req: a swob Request
:param name: name of a sysmeta where alternative etag may be found
"""
if ',' in name:
# HTTP header names should not have commas but we'll check anyway
raise ValueError('Header name must not contain commas')
existing = req.headers.get("X-Backend-Etag-Is-At")
req.headers["X-Backend-Etag-Is-At"] = csv_append(
existing, name)
|
Helper function to resolve an alternative etag value that may be stored in
metadata under an alternate name.
The value of the request's X-Backend-Etag-Is-At header (if it exists) is a
comma separated list of alternate names in the metadata at which an
alternate etag value may be found. This list is processed in order until an
alternate etag is found.
The left most value in X-Backend-Etag-Is-At will have been set by the left
most middleware, or if no middleware, by ECObjectController, if an EC
policy is in use. The left most middleware is assumed to be the authority
on what the etag value of the object content is.
The resolver will work from left to right in the list until it finds a
value that is a name in the given metadata. So the left most wins, IF it
exists in the metadata.
By way of example, assume the encrypter middleware is installed. If an
object is *not* encrypted then the resolver will not find the encrypter
middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will
then find the EC alternate etag (if EC policy). But if the object *is*
encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is
correct because it should be preferred over X-Object-Sysmeta-Ec-Etag.
:param req: a swob Request
:param metadata: a dict containing object metadata
:return: an alternate etag value if any is found, otherwise None
|
def resolve_etag_is_at_header(req, metadata):
"""
Helper function to resolve an alternative etag value that may be stored in
metadata under an alternate name.
The value of the request's X-Backend-Etag-Is-At header (if it exists) is a
comma separated list of alternate names in the metadata at which an
alternate etag value may be found. This list is processed in order until an
alternate etag is found.
The left most value in X-Backend-Etag-Is-At will have been set by the left
most middleware, or if no middleware, by ECObjectController, if an EC
policy is in use. The left most middleware is assumed to be the authority
on what the etag value of the object content is.
The resolver will work from left to right in the list until it finds a
value that is a name in the given metadata. So the left most wins, IF it
exists in the metadata.
By way of example, assume the encrypter middleware is installed. If an
object is *not* encrypted then the resolver will not find the encrypter
middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will
then find the EC alternate etag (if EC policy). But if the object *is*
encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is
correct because it should be preferred over X-Object-Sysmeta-Ec-Etag.
:param req: a swob Request
:param metadata: a dict containing object metadata
:return: an alternate etag value if any is found, otherwise None
"""
alternate_etag = None
metadata = HeaderKeyDict(metadata)
if "X-Backend-Etag-Is-At" in req.headers:
names = list_from_csv(req.headers["X-Backend-Etag-Is-At"])
for name in names:
if name in metadata:
alternate_etag = metadata[name]
break
return alternate_etag
|
Helper function to update an X-Backend-Ignore-Range-If-Metadata-Present
header whose value is a list of header names which, if any are present
on an object, mean the object server should respond with a 200 instead
of a 206 or 416.
:param req: a swob Request
:param name: name of a header which, if found, indicates the proxy will
want the whole object
|
def update_ignore_range_header(req, name):
"""
Helper function to update an X-Backend-Ignore-Range-If-Metadata-Present
header whose value is a list of header names which, if any are present
on an object, mean the object server should respond with a 200 instead
of a 206 or 416.
:param req: a swob Request
:param name: name of a header which, if found, indicates the proxy will
want the whole object
"""
if ',' in name:
# HTTP header names should not have commas but we'll check anyway
raise ValueError('Header name must not contain commas')
hdr = 'X-Backend-Ignore-Range-If-Metadata-Present'
req.headers[hdr] = csv_append(req.headers.get(hdr), name)
|
Helper function to remove Range header from request if metadata matching
the X-Backend-Ignore-Range-If-Metadata-Present header is found.
:param req: a swob Request
:param metadata: dictionary of object metadata
|
def resolve_ignore_range_header(req, metadata):
"""
Helper function to remove Range header from request if metadata matching
the X-Backend-Ignore-Range-If-Metadata-Present header is found.
:param req: a swob Request
:param metadata: dictionary of object metadata
"""
ignore_range_headers = set(
h.strip().lower()
for h in req.headers.get(
'X-Backend-Ignore-Range-If-Metadata-Present',
'').split(','))
if ignore_range_headers.intersection(
h.lower() for h in metadata):
req.headers.pop('Range', None)
|
Determine if replication network should be used.
:param headers: a dict of headers
:return: the value of the ``x-backend-use-replication-network`` item from
``headers``. If no ``headers`` are given or the item is not found then
False is returned.
|
def is_use_replication_network(headers=None):
"""
Determine if replication network should be used.
:param headers: a dict of headers
:return: the value of the ``x-backend-use-replication-network`` item from
``headers``. If no ``headers`` are given or the item is not found then
False is returned.
"""
if headers:
for h, v in headers.items():
if h.lower() == USE_REPLICATION_NETWORK_HEADER:
return config_true_value(v)
return False
|
Get the ip address and port that should be used for the given ``node``.
The normal ip address and port are returned unless the ``node`` or
``headers`` indicate that the replication ip address and port should be
used.
If the ``headers`` dict has an item with key
``x-backend-use-replication-network`` and a truthy value then the
replication ip address and port are returned. Otherwise if the ``node``
dict has an item with key ``use_replication`` and truthy value then the
replication ip address and port are returned. Otherwise the normal ip
address and port are returned.
:param node: a dict describing a node
:param headers: a dict of headers
:return: a tuple of (ip address, port)
|
def get_ip_port(node, headers):
"""
Get the ip address and port that should be used for the given ``node``.
The normal ip address and port are returned unless the ``node`` or
``headers`` indicate that the replication ip address and port should be
used.
If the ``headers`` dict has an item with key
``x-backend-use-replication-network`` and a truthy value then the
replication ip address and port are returned. Otherwise if the ``node``
dict has an item with key ``use_replication`` and truthy value then the
replication ip address and port are returned. Otherwise the normal ip
address and port are returned.
:param node: a dict describing a node
:param headers: a dict of headers
:return: a tuple of (ip address, port)
"""
return select_ip_port(
node, use_replication=is_use_replication_network(headers))
|
Helper function to check if a request with the header 'x-open-expired'
can access an object that has not yet been reaped by the object-expirer
based on the allow_open_expired global config.
:param app: the application instance
:param req: request object
|
def is_open_expired(app, req):
"""
Helper function to check if a request with the header 'x-open-expired'
can access an object that has not yet been reaped by the object-expirer
based on the allow_open_expired global config.
:param app: the application instance
:param req: request object
"""
return (config_true_value(app.allow_open_expired) and
config_true_value(req.headers.get('x-open-expired')))
|
Helper function to check if a request has either the headers
'x-backend-open-expired' or 'x-backend-replication' for the backend
to access expired objects.
:param request: request object
|
def is_backend_open_expired(request):
"""
Helper function to check if a request has either the headers
'x-backend-open-expired' or 'x-backend-replication' for the backend
to access expired objects.
:param request: request object
"""
x_backend_open_expired = config_true_value(request.headers.get(
'x-backend-open-expired', 'false'))
x_backend_replication = config_true_value(request.headers.get(
'x-backend-replication', 'false'))
return x_backend_open_expired or x_backend_replication
|
Helper function to construct a string from a base and the policy.
Used to encode the policy index into either a file name or a
directory name by various modules.
:param base: the base string
:param policy_or_index: StoragePolicy instance, or an index
(string or int), if None the legacy
storage Policy-0 is assumed.
:returns: base name with policy index added
:raises PolicyError: if no policy exists with the given policy_index
|
def get_policy_string(base, policy_or_index):
"""
Helper function to construct a string from a base and the policy.
Used to encode the policy index into either a file name or a
directory name by various modules.
:param base: the base string
:param policy_or_index: StoragePolicy instance, or an index
(string or int), if None the legacy
storage Policy-0 is assumed.
:returns: base name with policy index added
:raises PolicyError: if no policy exists with the given policy_index
"""
if isinstance(policy_or_index, BaseStoragePolicy):
policy = policy_or_index
else:
policy = POLICIES.get_by_index(policy_or_index)
if policy is None:
raise PolicyError("Unknown policy", index=policy_or_index)
return _get_policy_string(base, int(policy))
|
Helper function to convert a string representing a base and a
policy. Used to decode the policy from either a file name or
a directory name by various modules.
:param policy_string: base name with policy index added
:raises PolicyError: if given index does not map to a valid policy
:returns: a tuple, in the form (base, policy) where base is the base
string and policy is the StoragePolicy instance for the
index encoded in the policy_string.
|
def split_policy_string(policy_string):
"""
Helper function to convert a string representing a base and a
policy. Used to decode the policy from either a file name or
a directory name by various modules.
:param policy_string: base name with policy index added
:raises PolicyError: if given index does not map to a valid policy
:returns: a tuple, in the form (base, policy) where base is the base
string and policy is the StoragePolicy instance for the
index encoded in the policy_string.
"""
if '-' in policy_string:
base, policy_index = policy_string.rsplit('-', 1)
else:
base, policy_index = policy_string, None
policy = POLICIES.get_by_index(policy_index)
if get_policy_string(base, policy) != policy_string:
raise PolicyError("Unknown policy", index=policy_index)
return base, policy
|
Parse storage policies in ``swift.conf`` - note that validation
is done when the :class:`StoragePolicyCollection` is instantiated.
:param conf: ConfigParser parser object for swift.conf
|
def parse_storage_policies(conf):
"""
Parse storage policies in ``swift.conf`` - note that validation
is done when the :class:`StoragePolicyCollection` is instantiated.
:param conf: ConfigParser parser object for swift.conf
"""
policies = []
for section in conf.sections():
if not section.startswith('storage-policy:'):
continue
policy_index = section.split(':', 1)[1]
config_options = dict(conf.items(section))
policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE)
policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type]
policy = policy_cls.from_config(policy_index, config_options)
policies.append(policy)
return StoragePolicyCollection(policies)
|
Reload POLICIES from ``swift.conf``.
|
def reload_storage_policies():
"""
Reload POLICIES from ``swift.conf``.
"""
global _POLICIES
if six.PY2:
policy_conf = ConfigParser()
else:
# Python 3.2 disallows section or option duplicates by default
# strict=False allows us to preserve the older behavior
policy_conf = ConfigParser(strict=False)
policy_conf.read(utils.SWIFT_CONF_FILE)
try:
_POLICIES = parse_storage_policies(policy_conf)
except PolicyError as e:
raise SystemExit('ERROR: Invalid Storage Policy Configuration '
'in %s (%s)' % (utils.SWIFT_CONF_FILE, e))
|
Set and retrieve the datetime value of self.headers[header]
(Used by both request and response)
The header is parsed on retrieval and a datetime object is returned.
The header can be set using a datetime, numeric value, or str.
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Content-Length"
|
def _datetime_property(header):
"""
Set and retrieve the datetime value of self.headers[header]
(Used by both request and response)
The header is parsed on retrieval and a datetime object is returned.
The header can be set using a datetime, numeric value, or str.
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Content-Length"
"""
def getter(self):
value = self.headers.get(header, None)
if value is not None:
try:
parts = parsedate(self.headers[header])[:7]
return datetime(*(parts + (UTC,)))
except Exception:
return None
def setter(self, value):
if isinstance(value, (float,) + six.integer_types):
self.headers[header] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value))
elif isinstance(value, datetime):
self.headers[header] = value.strftime("%a, %d %b %Y %H:%M:%S GMT")
else:
self.headers[header] = value
return property(getter, setter,
doc=("Retrieve and set the %s header as a datetime, "
"set it with a datetime, int, or str") % header)
|
Set and retrieve the value of self.headers[header]
(Used by both request and response)
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Transfer-Encoding"
|
def _header_property(header):
"""
Set and retrieve the value of self.headers[header]
(Used by both request and response)
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Transfer-Encoding"
"""
def getter(self):
return self.headers.get(header, None)
def setter(self, value):
self.headers[header] = value
return property(getter, setter,
doc="Retrieve and set the %s header" % header)
|
Set and retrieve the value of self.headers[header]
(Used by both request and response)
On retrieval, it converts values to integers.
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Content-Length"
|
def _header_int_property(header):
"""
Set and retrieve the value of self.headers[header]
(Used by both request and response)
On retrieval, it converts values to integers.
If a value of None is given, the header is deleted.
:param header: name of the header, e.g. "Content-Length"
"""
def getter(self):
val = self.headers.get(header, None)
if val is not None:
val = int(val)
return val
def setter(self, value):
self.headers[header] = value
return property(getter, setter,
doc="Retrieve and set the %s header as an int" % header)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.