python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from __future__ import annotations # pylint: disable=unused-variable
import json
import logging
from pathlib import Path
import time
from typing import Dict, List, NewType, Iterable
import requests
from aistore.sdk.ais_source import AISSource
from aistore.sdk.etl_const import DEFAULT_ETL_TIMEOUT
from aistore.sdk.object_iterator import ObjectIterator
from aistore.sdk.const import (
ACT_COPY_BCK,
ACT_CREATE_BCK,
ACT_DESTROY_BCK,
ACT_ETL_BCK,
ACT_EVICT_REMOTE_BCK,
ACT_LIST,
ACT_MOVE_BCK,
ACT_SUMMARY_BCK,
HEADER_ACCEPT,
HEADER_BUCKET_PROPS,
HEADER_BUCKET_SUMM,
HTTP_METHOD_DELETE,
HTTP_METHOD_GET,
HTTP_METHOD_HEAD,
HTTP_METHOD_POST,
MSGPACK_CONTENT_TYPE,
PROVIDER_AIS,
QPARAM_BCK_TO,
QPARAM_COUNT_REMOTE_OBJS,
QPARAM_FLT_PRESENCE,
QPARAM_KEEP_REMOTE,
QPARAM_NAMESPACE,
QPARAM_PROVIDER,
URL_PATH_BUCKETS,
STATUS_ACCEPTED,
STATUS_OK,
)
from aistore.sdk.errors import (
InvalidBckProvider,
ErrBckAlreadyExists,
ErrBckNotFound,
UnexpectedHTTPStatusCode,
)
from aistore.sdk.multiobj import ObjectGroup, ObjectRange
from aistore.sdk.request_client import RequestClient
from aistore.sdk.object import Object
from aistore.sdk.types import (
ActionMsg,
BucketEntry,
BucketList,
BucketModel,
BsummCtrlMsg,
Namespace,
CopyBckMsg,
TransformBckMsg,
TCBckMsg,
ListObjectsMsg,
)
from aistore.sdk.list_object_flag import ListObjectFlag
from aistore.sdk.utils import validate_directory, get_file_size
Header = NewType("Header", requests.structures.CaseInsensitiveDict)
# pylint: disable=unused-variable,too-many-public-methods
class Bucket(AISSource):
"""
A class representing a bucket that contains user data.
Args:
client (RequestClient): Client for interfacing with AIS cluster
name (str): name of bucket
provider (str, optional): Provider of bucket (one of "ais", "aws", "gcp", ...), defaults to "ais"
namespace (Namespace, optional): Namespace of bucket, defaults to None
"""
def __init__(
self,
name: str,
client: RequestClient = None,
provider: str = PROVIDER_AIS,
namespace: Namespace = None,
):
self._client = client
self._name = name
self._provider = provider
self._namespace = namespace
self._qparam = {QPARAM_PROVIDER: provider}
if self.namespace:
self._qparam[QPARAM_NAMESPACE] = namespace.get_path()
@property
def client(self) -> RequestClient:
"""The client bound to this bucket."""
return self._client
@property
def qparam(self) -> Dict:
"""Default query parameters to use with API calls from this bucket."""
return self._qparam
@property
def provider(self) -> str:
"""The provider for this bucket."""
return self._provider
@property
def name(self) -> str:
"""The name of this bucket."""
return self._name
@property
def namespace(self) -> Namespace:
"""The namespace for this bucket."""
return self._namespace
def list_urls(self, prefix: str = "", etl_name: str = None) -> Iterable[str]:
"""
Get an iterator of full URLs to every object in this bucket matching the prefix
Args:
prefix (str, optional): Limit objects selected by a given string prefix
etl_name (str, optional): ETL to include in URLs
Returns:
Iterator of all object URLs matching the prefix
"""
for entry in self.list_objects_iter(prefix=prefix, props="name"):
yield self.object(entry.name).get_url(etl_name=etl_name)
def create(self, exist_ok=False):
"""
Creates a bucket in AIStore cluster.
Can only create a bucket for AIS provider on localized cluster. Remote cloud buckets do not support creation.
Args:
exist_ok (bool, optional): Ignore error if the cluster already contains this bucket
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
aistore.sdk.errors.InvalidBckProvider: Invalid bucket provider for requested operation
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
self._verify_ais_bucket()
try:
self.make_request(HTTP_METHOD_POST, ACT_CREATE_BCK)
except ErrBckAlreadyExists as err:
if not exist_ok:
raise err
return self
def delete(self, missing_ok=False):
"""
Destroys bucket in AIStore cluster.
In all cases removes both the bucket's content _and_ the bucket's metadata from the cluster.
Note: AIS will _not_ call the remote backend provider to delete the corresponding Cloud bucket
(iff the bucket in question is, in fact, a Cloud bucket).
Args:
missing_ok (bool, optional): Ignore error if bucket does not exist
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
aistore.sdk.errors.InvalidBckProvider: Invalid bucket provider for requested operation
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
self._verify_ais_bucket()
try:
self.make_request(HTTP_METHOD_DELETE, ACT_DESTROY_BCK)
except ErrBckNotFound as err:
if not missing_ok:
raise err
def rename(self, to_bck_name: str) -> str:
"""
Renames bucket in AIStore cluster.
Only works on AIS buckets. Returns job ID that can be used later to check the status of the asynchronous
operation.
Args:
to_bck_name (str): New bucket name for bucket to be renamed as
Returns:
Job ID (as str) that can be used to check the status of the operation
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
aistore.sdk.errors.InvalidBckProvider: Invalid bucket provider for requested operation
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
self._verify_ais_bucket()
params = self.qparam.copy()
params[QPARAM_BCK_TO] = Bucket(
name=to_bck_name, namespace=self.namespace
).get_path()
resp = self.make_request(HTTP_METHOD_POST, ACT_MOVE_BCK, params=params)
self._name = to_bck_name
return resp.text
def evict(self, keep_md: bool = False):
"""
Evicts bucket in AIStore cluster.
NOTE: only Cloud buckets can be evicted.
Args:
keep_md (bool, optional): If true, evicts objects but keeps the bucket's metadata (i.e., the bucket's name
and its properties)
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
aistore.sdk.errors.InvalidBckProvider: Invalid bucket provider for requested operation
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
self.verify_cloud_bucket()
params = self.qparam.copy()
params[QPARAM_KEEP_REMOTE] = str(keep_md)
self.make_request(HTTP_METHOD_DELETE, ACT_EVICT_REMOTE_BCK, params=params)
def head(self) -> Header:
"""
Requests bucket properties.
Returns:
Response header with the bucket properties
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
return self.client.request(
HTTP_METHOD_HEAD,
path=f"{URL_PATH_BUCKETS}/{self.name}",
params=self.qparam,
).headers
# pylint: disable=too-many-arguments
def summary(
self,
uuid: str = "",
prefix: str = "",
fast: bool = True,
cached: bool = True,
present: bool = True,
):
"""
Returns bucket summary (starts xaction job and polls for results).
Args:
uuid (str): Identifier for the bucket summary. Defaults to an empty string.
prefix (str): Prefix for objects to be included in the bucket summary.
Defaults to an empty string (all objects).
fast (bool): If True, performs and returns a quick summary. Defaults to True.
cached (bool): If True, summary entails cached entities. Defaults to True.
present (bool): If True, summary entails present entities. Defaults to True.
Raises:
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
aistore.sdk.errors.AISError: All other types of errors with AIStore
"""
bsumm_ctrl_msg = BsummCtrlMsg(
uuid=uuid, prefix=prefix, fast=fast, cached=cached, present=present
)
# Start the job and get the job ID
resp = self.make_request(
HTTP_METHOD_GET,
ACT_SUMMARY_BCK,
params=self.qparam,
value=bsumm_ctrl_msg.dict(),
)
# Initial response status code should be 202
if resp.status_code == STATUS_OK:
raise UnexpectedHTTPStatusCode([STATUS_ACCEPTED], resp.status_code)
job_id = resp.text.strip('"')
# Update the uuid in the control message
bsumm_ctrl_msg.uuid = job_id
# Sleep and request frequency in sec (starts at 200 ms)
sleep_time = 0.2
# Poll async task for http.StatusOK completion
while True:
resp = self.make_request(
HTTP_METHOD_GET,
ACT_SUMMARY_BCK,
params=self.qparam,
value=bsumm_ctrl_msg.dict(),
)
# If task completed successfully, break the loop
if resp.status_code == STATUS_OK:
break
# If task is still running, wait for some time and try again
if resp.status_code == STATUS_ACCEPTED:
time.sleep(sleep_time)
sleep_time = min(
10, sleep_time * 1.5
) # Increase sleep_time by 50%, but don't exceed 10 seconds
# Otherwise, if status code received is neither STATUS_OK or STATUS_ACCEPTED, raise an exception
else:
raise UnexpectedHTTPStatusCode(
[STATUS_OK, STATUS_ACCEPTED], resp.status_code
)
return json.loads(resp.content.decode("utf-8"))[0]
def info(self, flt_presence: int = 0, count_remote_objs: bool = True):
"""
Returns bucket summary and information/properties.
Args:
count_remote_objs (bool): If True, returned bucket info will entail remote objects as well
flt_presence (int): Describes the presence of buckets and objects with respect to their existence
or non-existence in the AIS cluster. Defaults to 0.
Expected values are:
0 - (object | bucket) exists inside and/or outside cluster
1 - same as 0 but no need to return summary
2 - bucket: is present | object: present and properly located
3 - same as 2 but no need to return summary
4 - objects: present anywhere/anyhow _in_ the cluster as: replica, ec-slices, misplaced
5 - not present - exists _outside_ cluster
Raises:
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
ValueError: `flt_presence` is not one of the expected values
aistore.sdk.errors.AISError: All other types of errors with AIStore
"""
if flt_presence not in range(6):
raise ValueError("`flt_presence` must be one of 0, 1, 2, 3, 4, or 5.")
params = self.qparam.copy()
params.update({QPARAM_FLT_PRESENCE: flt_presence})
if count_remote_objs:
params.update({QPARAM_COUNT_REMOTE_OBJS: count_remote_objs})
response = self.client.request(
HTTP_METHOD_HEAD,
path=f"{URL_PATH_BUCKETS}/{self.name}",
params=params,
)
bucket_props = json.loads(response.headers.get(HEADER_BUCKET_PROPS, "{}"))
bucket_summ = json.loads(response.headers.get(HEADER_BUCKET_SUMM, "{}"))
return bucket_props, bucket_summ
# pylint: disable=too-many-arguments
def copy(
self,
to_bck: Bucket,
prefix_filter: str = "",
prepend: str = "",
dry_run: bool = False,
force: bool = False,
) -> str:
"""
Returns job ID that can be used later to check the status of the asynchronous operation.
Args:
to_bck (Bucket): Destination bucket
prefix_filter (str, optional): Only copy objects with names starting with this prefix
prepend (str, optional): Value to prepend to the name of copied objects
dry_run (bool, optional): Determines if the copy should actually
happen or not
force (bool, optional): Override existing destination bucket
Returns:
Job ID (as str) that can be used to check the status of the operation
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
value = CopyBckMsg(
prefix=prefix_filter, prepend=prepend, dry_run=dry_run, force=force
).as_dict()
params = self.qparam.copy()
params[QPARAM_BCK_TO] = to_bck.get_path()
return self.make_request(
HTTP_METHOD_POST, ACT_COPY_BCK, value=value, params=params
).text
# pylint: disable=too-many-arguments
def list_objects(
self,
prefix: str = "",
props: str = "",
page_size: int = 0,
uuid: str = "",
continuation_token: str = "",
flags: List[ListObjectFlag] = None,
target: str = "",
) -> BucketList:
"""
Returns a structure that contains a page of objects, job ID, and continuation token (to read the next page, if
available).
Args:
prefix (str, optional): Return only objects that start with the prefix
props (str, optional): Comma-separated list of object properties to return. Default value is "name,size".
Properties: "name", "size", "atime", "version", "checksum", "cached", "target_url", "status", "copies",
"ec", "custom", "node".
page_size (int, optional): Return at most "page_size" objects.
The maximum number of objects in response depends on the bucket backend. E.g, AWS bucket cannot return
more than 5,000 objects in a single page.
NOTE: If "page_size" is greater than a backend maximum, the backend maximum objects are returned.
Defaults to "0" - return maximum number of objects.
uuid (str, optional): Job ID, required to get the next page of objects
continuation_token (str, optional): Marks the object to start reading the next page
flags (List[ListObjectFlag], optional): Optional list of ListObjectFlag enums to include as flags in the
request
target(str, optional): Only list objects on this specific target node
Returns:
BucketList: the page of objects in the bucket and the continuation token to get the next page
Empty continuation token marks the final page of the object list
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
value = ListObjectsMsg(
prefix=prefix,
page_size=page_size,
uuid=uuid,
props=props,
continuation_token=continuation_token,
flags=[] if flags is None else flags,
target=target,
).as_dict()
action = ActionMsg(action=ACT_LIST, value=value).dict()
bucket_list = self.client.request_deserialize(
HTTP_METHOD_GET,
path=f"{URL_PATH_BUCKETS}/{ self.name }",
headers={HEADER_ACCEPT: MSGPACK_CONTENT_TYPE},
res_model=BucketList,
json=action,
params=self.qparam,
)
for entry in bucket_list.entries:
entry.object = self.object(entry.name)
return bucket_list
def list_objects_iter(
self,
prefix: str = "",
props: str = "",
page_size: int = 0,
flags: List[ListObjectFlag] = None,
target: str = "",
) -> ObjectIterator:
"""
Returns an iterator for all objects in bucket
Args:
prefix (str, optional): Return only objects that start with the prefix
props (str, optional): Comma-separated list of object properties to return. Default value is "name,size".
Properties: "name", "size", "atime", "version", "checksum", "cached", "target_url", "status", "copies",
"ec", "custom", "node".
page_size (int, optional): return at most "page_size" objects
The maximum number of objects in response depends on the bucket backend. E.g, AWS bucket cannot return
more than 5,000 objects in a single page.
NOTE: If "page_size" is greater than a backend maximum, the backend maximum objects are returned.
Defaults to "0" - return maximum number objects
flags (List[ListObjectFlag], optional): Optional list of ListObjectFlag enums to include as flags in the
request
target(str, optional): Only list objects on this specific target node
Returns:
ObjectIterator: object iterator
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
def fetch_objects(uuid, token):
return self.list_objects(
prefix,
props,
page_size,
uuid=uuid,
continuation_token=token,
flags=flags,
target=target,
)
return ObjectIterator(fetch_objects)
def list_all_objects(
self,
prefix: str = "",
props: str = "",
page_size: int = 0,
flags: List[ListObjectFlag] = None,
target: str = "",
) -> List[BucketEntry]:
"""
Returns a list of all objects in bucket
Args:
prefix (str, optional): return only objects that start with the prefix
props (str, optional): comma-separated list of object properties to return. Default value is "name,size".
Properties: "name", "size", "atime", "version", "checksum", "cached", "target_url", "status", "copies",
"ec", "custom", "node".
page_size (int, optional): return at most "page_size" objects
The maximum number of objects in response depends on the bucket backend. E.g, AWS bucket cannot return
more than 5,000 objects in a single page.
NOTE: If "page_size" is greater than a backend maximum, the backend maximum objects are returned.
Defaults to "0" - return maximum number objects
flags (List[ListObjectFlag], optional): Optional list of ListObjectFlag enums to include as flags in the
request
target(str, optional): Only list objects on this specific target node
Returns:
List[BucketEntry]: list of objects in bucket
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
"""
uuid = ""
continuation_token = ""
obj_list = None
while True:
resp = self.list_objects(
prefix=prefix,
props=props,
page_size=page_size,
uuid=uuid,
continuation_token=continuation_token,
flags=flags,
target=target,
)
if obj_list:
obj_list = obj_list + resp.entries
obj_list = obj_list or resp.entries
if resp.continuation_token == "":
break
continuation_token = resp.continuation_token
uuid = resp.uuid
return obj_list
# pylint: disable=too-many-arguments
def transform(
self,
etl_name: str,
to_bck: Bucket,
timeout: str = DEFAULT_ETL_TIMEOUT,
prefix_filter: str = "",
prepend: str = "",
ext: Dict[str, str] = None,
force: bool = False,
dry_run: bool = False,
) -> str:
"""
Visits all selected objects in the source bucket and for each object, puts the transformed
result to the destination bucket
Args:
etl_name (str): name of etl to be used for transformations
to_bck (str): destination bucket for transformations
timeout (str, optional): Timeout of the ETL job (e.g. 5m for 5 minutes)
prefix_filter (str, optional): Only transform objects with names starting with this prefix
prepend (str, optional): Value to prepend to the name of resulting transformed objects
ext (Dict[str, str], optional): dict of new extension followed by extension to be replaced
(i.e. {"jpg": "txt"})
dry_run (bool, optional): determines if the copy should actually happen or not
force (bool, optional): override existing destination bucket
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
value = TCBckMsg(
ext=ext,
transform_msg=TransformBckMsg(etl_name=etl_name, timeout=timeout),
copy_msg=CopyBckMsg(
prefix=prefix_filter, prepend=prepend, force=force, dry_run=dry_run
),
).as_dict()
params = self.qparam.copy()
params[QPARAM_BCK_TO] = to_bck.get_path()
return self.make_request(
HTTP_METHOD_POST, ACT_ETL_BCK, value=value, params=params
).text
def put_files(
self,
path: str,
prefix_filter: str = "",
pattern: str = "*",
basename: bool = False,
prepend: str = None,
recursive: bool = False,
dry_run: bool = False,
verbose: bool = True,
) -> List[str]:
"""
Puts files found in a given filepath as objects to a bucket in AIS storage.
Args:
path (str): Local filepath, can be relative or absolute
prefix_filter (str, optional): Only put files with names starting with this prefix
pattern (str, optional): Regex pattern to filter files
basename (bool, optional): Whether to use the file names only as object names and omit the path information
prepend (str, optional): Optional string to use as a prefix in the object name for all objects uploaded
No delimiter ("/", "-", etc.) is automatically applied between the prepend value and the object name
recursive (bool, optional): Whether to recurse through the provided path directories
dry_run (bool, optional): Option to only show expected behavior without an actual put operation
verbose (bool, optional): Whether to print upload info to standard output
Returns:
List of object names put to a bucket in AIS
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
ValueError: The path provided is not a valid directory
"""
validate_directory(path)
file_iterator = (
Path(path).rglob(pattern) if recursive else Path(path).glob(pattern)
)
obj_names = []
dry_run_prefix = "Dry-run enabled. Proposed action:" if dry_run else ""
logger = logging.getLogger(f"{__name__}.put_files")
logger.disabled = not verbose
for file in file_iterator:
if not file.is_file() or not str(file.name).startswith(prefix_filter):
continue
obj_name = self._get_uploaded_obj_name(file, path, basename, prepend)
if not dry_run:
self.object(obj_name).put_file(str(file))
logger.info(
"%s File '%s' uploaded as object '%s' with size %s",
dry_run_prefix,
file,
obj_name,
get_file_size(file),
)
obj_names.append(obj_name)
logger.info(
"%s Specified files from %s uploaded to bucket %s",
dry_run_prefix,
path,
f"{self.provider}://{self.name}",
)
return obj_names
@staticmethod
def _get_uploaded_obj_name(file, root_path, basename, prepend):
obj_name = str(file.relative_to(root_path)) if not basename else file.name
if prepend:
return prepend + obj_name
return obj_name
def object(self, obj_name: str) -> Object:
"""
Factory constructor for an object in this bucket.
Does not make any HTTP request, only instantiates an object in a bucket owned by the client.
Args:
obj_name (str): Name of object
Returns:
The object created.
"""
return Object(
bucket=self,
name=obj_name,
)
def objects(
self,
obj_names: list = None,
obj_range: ObjectRange = None,
obj_template: str = None,
) -> ObjectGroup:
"""
Factory constructor for multiple objects belonging to this bucket.
Args:
obj_names (list): Names of objects to include in the group
obj_range (ObjectRange): Range of objects to include in the group
obj_template (str): String template defining objects to include in the group
Returns:
The ObjectGroup created
"""
return ObjectGroup(
bck=self,
obj_names=obj_names,
obj_range=obj_range,
obj_template=obj_template,
)
def make_request(
self,
method: str,
action: str,
value: dict = None,
params: dict = None,
) -> requests.Response:
"""
Use the bucket's client to make a request to the bucket endpoint on the AIS server
Args:
method (str): HTTP method to use, e.g. POST/GET/DELETE
action (str): Action string used to create an ActionMsg to pass to the server
value (dict): Additional value parameter to pass in the ActionMsg
params (dict, optional): Optional parameters to pass in the request
Returns:
Response from the server
"""
if self._client is None:
raise ValueError(
"Bucket requires a client to use functions. Try defining a client and accessing this bucket with "
"client.bucket()"
)
json_val = ActionMsg(action=action, value=value).dict()
return self._client.request(
method,
path=f"{URL_PATH_BUCKETS}/{self.name}",
json=json_val,
params=params if params else self.qparam,
)
def _verify_ais_bucket(self):
"""
Verify the bucket provider is AIS
"""
if self.provider is not PROVIDER_AIS:
raise InvalidBckProvider(self.provider)
def verify_cloud_bucket(self):
"""
Verify the bucket provider is a cloud provider
"""
if self.provider is PROVIDER_AIS:
raise InvalidBckProvider(self.provider)
def get_path(self) -> str:
"""
Get the path representation of this bucket
"""
namespace_path = self.namespace.get_path() if self.namespace else "@#"
return f"{ self.provider }/{ namespace_path }/{ self.name }/"
def as_model(self) -> BucketModel:
"""
Return a data-model of the bucket
Returns:
BucketModel representation
"""
return BucketModel(
name=self.name, namespace=self.namespace, provider=self.provider
)
| aistore-master | python/aistore/sdk/bucket.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from io import BufferedWriter
from typing import NewType, Iterable
import requests
from aistore.sdk.ais_source import AISSource
from aistore.sdk.const import (
DEFAULT_CHUNK_SIZE,
HTTP_METHOD_DELETE,
HTTP_METHOD_GET,
HTTP_METHOD_HEAD,
HTTP_METHOD_PUT,
QPARAM_ARCHPATH,
QPARAM_ETL_NAME,
ACT_PROMOTE,
HTTP_METHOD_POST,
URL_PATH_OBJECTS,
)
from aistore.sdk.object_reader import ObjectReader
from aistore.sdk.types import ActionMsg, PromoteAPIArgs
from aistore.sdk.utils import read_file_bytes, validate_file
Header = NewType("Header", requests.structures.CaseInsensitiveDict)
# pylint: disable=consider-using-with,unused-variable
class Object(AISSource):
"""
A class representing an object of a bucket bound to a client.
Args:
bucket (Bucket): Bucket to which this object belongs
name (str): name of object
"""
def __init__(self, bucket: "Bucket", name: str):
self._bucket = bucket
self._client = bucket.client
self._bck_name = bucket.name
self._qparams = bucket.qparam
self._name = name
self._object_path = f"{URL_PATH_OBJECTS}/{ self._bck_name}/{ self.name }"
@property
def bucket(self):
"""Bucket containing this object"""
return self._bucket
@property
def name(self):
"""Name of this object"""
return self._name
def list_urls(self, prefix: str = "", etl_name: str = None) -> Iterable[str]:
yield self.get_url(etl_name=etl_name)
def head(self) -> Header:
"""
Requests object properties.
Returns:
Response header with the object properties.
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
requests.exceptions.HTTPError(404): The object does not exist
"""
return self._client.request(
HTTP_METHOD_HEAD,
path=self._object_path,
params=self._qparams,
).headers
def get(
self,
archpath: str = "",
chunk_size: int = DEFAULT_CHUNK_SIZE,
etl_name: str = None,
writer: BufferedWriter = None,
) -> ObjectReader:
"""
Reads an object
Args:
archpath (str, optional): If the object is an archive, use `archpath` to extract a single file
from the archive
chunk_size (int, optional): chunk_size to use while reading from stream
etl_name (str, optional): Transforms an object based on ETL with etl_name
writer (BufferedWriter, optional): User-provided writer for writing content output.
User is responsible for closing the writer
Returns:
The stream of bytes to read an object or a file inside an archive.
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
"""
params = self._qparams.copy()
params[QPARAM_ARCHPATH] = archpath
if etl_name:
params[QPARAM_ETL_NAME] = etl_name
resp = self._client.request(
HTTP_METHOD_GET,
path=self._object_path,
params=params,
stream=True,
)
obj_reader = ObjectReader(
stream=resp,
response_headers=resp.headers,
chunk_size=chunk_size,
)
if writer:
writer.writelines(obj_reader)
return obj_reader
def get_url(self, archpath: str = "", etl_name: str = None):
"""
Get the full url to the object including base url and any query parameters
Args:
archpath (str, optional): If the object is an archive, use `archpath` to extract a single file
from the archive
etl_name (str, optional): Transforms an object based on ETL with etl_name
Returns:
Full URL to get object
"""
params = self._qparams.copy()
if archpath:
params[QPARAM_ARCHPATH] = archpath
if etl_name:
params[QPARAM_ETL_NAME] = etl_name
return self._client.get_full_url(self._object_path, params)
def put_content(self, content: bytes) -> Header:
"""
Puts bytes as an object to a bucket in AIS storage.
Args:
content (bytes): Bytes to put as an object.
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
"""
self._put_data(self.name, content)
def put_file(self, path: str = None):
"""
Puts a local file as an object to a bucket in AIS storage.
Args:
path (str): Path to local file
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
ValueError: The path provided is not a valid file
"""
validate_file(path)
self._put_data(self.name, read_file_bytes(path))
def _put_data(self, obj_name: str, data: bytes):
url = f"{URL_PATH_OBJECTS}/{ self._bck_name }/{ obj_name }"
self._client.request(
HTTP_METHOD_PUT,
path=url,
params=self._qparams,
data=data,
)
# pylint: disable=too-many-arguments
def promote(
self,
path: str,
target_id: str = "",
recursive: bool = False,
overwrite_dest: bool = False,
delete_source: bool = False,
src_not_file_share: bool = False,
) -> Header:
"""
Promotes a file or folder an AIS target can access to a bucket in AIS storage.
These files can be either on the physical disk of an AIS target itself or on a network file system
the cluster can access.
See more info here: https://aiatscale.org/blog/2022/03/17/promote
Args:
path (str): Path to file or folder the AIS cluster can reach
target_id (str, optional): Promote files from a specific target node
recursive (bool, optional): Recursively promote objects from files in directories inside the path
overwrite_dest (bool, optional): Overwrite objects already on AIS
delete_source (bool, optional): Delete the source files when done promoting
src_not_file_share (bool, optional): Optimize if the source is guaranteed to not be on a file share
Returns:
Object properties
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
AISError: Path does not exist on the AIS cluster storage
"""
url = f"{URL_PATH_OBJECTS}/{ self._bck_name }"
value = PromoteAPIArgs(
source_path=path,
object_name=self.name,
target_id=target_id,
recursive=recursive,
overwrite_dest=overwrite_dest,
delete_source=delete_source,
src_not_file_share=src_not_file_share,
).as_dict()
json_val = ActionMsg(action=ACT_PROMOTE, name=path, value=value).dict()
return self._client.request(
HTTP_METHOD_POST, path=url, params=self._qparams, json=json_val
).headers
def delete(self):
"""
Delete an object from a bucket.
Returns:
None
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
requests.exceptions.HTTPError(404): The object does not exist
"""
self._client.request(
HTTP_METHOD_DELETE,
path=self._object_path,
params=self._qparams,
)
| aistore-master | python/aistore/sdk/object.py |
from typing import List
from pydantic import BaseModel, Field
from aistore.sdk.types import BucketModel
# See ext/dsort/metric.go for cluster-side type definitions
# pylint: disable=too-few-public-methods
class TimeStats(BaseModel):
"""
Statistics for time spent on tasks
"""
total_ms: str
count: str
min_ms: str
max_ms: str
avg_ms: str
class ThroughputStats(BaseModel):
"""
Statistics on task throughput
"""
total: str
count: str
min_throughput: str
max_throughput: str
avg_throughput: str
class DetailedStats(TimeStats, ThroughputStats):
"""
Include fields from both time and throughput stats
"""
class PhaseInfo(BaseModel):
"""
Stats for a specific dSort phase
"""
started_time: str
end_time: str
elapsed: str
running: bool
finished: bool
class LocalExtraction(PhaseInfo):
"""
Metrics for first phase of dSort
"""
total_count: str
extracted_count: str
extracted_size: str
extracted_record_count: str
extracted_to_disk_count: str
extracted_to_disk_size: str
single_shard_stats: DetailedStats = None
class MetaSorting(PhaseInfo):
"""
Metrics for second phase of dSort
"""
sent_stats: TimeStats = None
recv_stats: TimeStats = None
class ShardCreation(PhaseInfo):
"""
Metrics for final phase of dSort
"""
to_create: str
created_count: str
moved_shard_count: str
req_stats: TimeStats = None
resp_stats: TimeStats = None
local_send_stats: DetailedStats = None
local_recv_stats: DetailedStats = None
single_shard_stats: DetailedStats = None
class DsortMetrics(BaseModel):
"""
All stats for a dSort run
"""
local_extraction: LocalExtraction
meta_sorting: MetaSorting
shard_creation: ShardCreation
aborted: bool = None
archived: bool = None
description: str = None
warnings: List[str] = None
errors: List[str] = None
extended: bool = None
class JobInfo(BaseModel):
"""
Info about a dsort Job, including metrics
"""
id: str
src_bck: BucketModel = Field(alias="src-bck")
dst_bck: BucketModel = Field(alias="dst-bck")
started_time: str = None
finish_time: str = None
extracted_duration: str = Field(alias="started_meta_sorting", default=None)
sorting_duration: str = Field(alias="started_shard_creation", default=None)
creation_duration: str = Field(alias="finished_shard_creation", default=None)
objects: int = Field(alias="loc-objs")
bytes: int = Field(alias="loc-bytes")
metrics: DsortMetrics = Field(alias="Metrics")
aborted: bool
archived: bool
# pylint: disable=missing-class-docstring
class Config:
allow_population_by_field_name = True
| aistore-master | python/aistore/sdk/dsort_types.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from __future__ import annotations # pylint: disable=unused-variable
import itertools
import logging
from datetime import datetime
from typing import List, Dict
import time
from aistore.sdk.bucket import Bucket
from aistore.sdk.const import (
HTTP_METHOD_GET,
HTTP_METHOD_PUT,
QPARAM_WHAT,
QPARAM_FORCE,
DEFAULT_JOB_WAIT_TIMEOUT,
WHAT_ONE_XACT_STATUS,
URL_PATH_CLUSTER,
ACT_START,
WHAT_QUERY_XACT_STATS,
)
from aistore.sdk.errors import Timeout, JobInfoNotFound
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import JobStatus, JobArgs, ActionMsg, JobSnapshot, BucketModel
from aistore.sdk.utils import probing_frequency
# pylint: disable=unused-variable
class Job:
"""
A class containing job-related functions.
Args:
client (RequestClient): Client for interfacing with AIS cluster
job_id (str, optional): ID of a specific job, empty for all jobs
job_kind (str, optional): Specific kind of job, empty for all kinds
"""
# pylint: disable=duplicate-code
def __init__(self, client: RequestClient, job_id: str = "", job_kind: str = ""):
self._client = client
self._job_id = job_id
self._job_kind = job_kind
@property
def job_id(self):
"""
Return job id
"""
return self._job_id
@property
def job_kind(self):
"""
Return job kind
"""
return self._job_kind
def status(
self,
) -> JobStatus:
"""
Return status of a job
Returns:
The job status including id, finish time, and error info.
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
"""
if not self._job_id:
raise ValueError("Cannot query status on a job without an assigned ID")
return self._client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
res_model=JobStatus,
json=JobArgs(id=self._job_id, kind=self._job_kind).as_dict(),
params={QPARAM_WHAT: WHAT_ONE_XACT_STATUS},
)
def wait(
self,
timeout: int = DEFAULT_JOB_WAIT_TIMEOUT,
verbose: bool = True,
):
"""
Wait for a job to finish
Args:
timeout (int, optional): The maximum time to wait for the job, in seconds. Default timeout is 5 minutes.
verbose (bool, optional): Whether to log wait status to standard output
Returns:
None
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
errors.Timeout: Timeout while waiting for the job to finish
"""
logger = logging.getLogger(f"{__name__}.wait")
logger.disabled = not verbose
passed = 0
sleep_time = probing_frequency(timeout)
while True:
if passed > timeout:
raise Timeout("job to finish")
status = self.status()
if status.end_time == 0:
time.sleep(sleep_time)
passed += sleep_time
logger.info("Waiting on job '%s'...", status.uuid)
continue
end_time = datetime.fromtimestamp(status.end_time / 1e9).time()
if status.err:
logger.error(
"Job '%s' failed at time '%s' with error: %s",
status.uuid,
end_time,
status.err,
)
elif status.aborted:
logger.error("Job '%s' aborted at time '%s'", status.uuid, end_time)
else:
logger.info("Job '%s' finished at time '%s'", status.uuid, end_time)
break
def wait_for_idle(
self,
timeout: int = DEFAULT_JOB_WAIT_TIMEOUT,
verbose: bool = True,
):
"""
Wait for a job to reach an idle state
Args:
timeout (int, optional): The maximum time to wait for the job, in seconds. Default timeout is 5 minutes.
verbose (bool, optional): Whether to log wait status to standard output
Returns:
None
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
errors.Timeout: Timeout while waiting for the job to finish
"""
action = f"job '{self._job_id}' to reach idle state"
logger = logging.getLogger(f"{__name__}.wait_for_idle")
logger.disabled = not verbose
passed = 0
sleep_time = probing_frequency(timeout)
while True:
snapshot_lists = self._query_job_snapshots().values()
snapshots = list(itertools.chain.from_iterable(snapshot_lists))
job_info_found = True
try:
if self._check_job_idle(snapshots):
logger.info("Job '%s' reached idle state", self._job_id)
return
except JobInfoNotFound:
logger.info("No information found for job %s, retrying", self._job_id)
job_info_found = False
if passed > timeout:
if len(snapshots) == 0:
raise Timeout(action, "No job information found.")
if not job_info_found:
raise Timeout(
action, f"No information found for job {self._job_id}."
)
raise Timeout(action)
time.sleep(sleep_time)
passed += sleep_time
logger.info("Waiting for %s", action)
def _check_job_idle(self, snapshots):
job_found = False
for snap in snapshots:
if snap.id != self._job_id:
continue
# If any targets are reporting the job not idle, continue waiting
if not snap.is_idle:
return False
job_found = True
if not job_found:
raise JobInfoNotFound(f"No info found for job {self._job_id}")
return True
def start(
self,
daemon_id: str = "",
force: bool = False,
buckets: List[Bucket] = None,
) -> str:
"""
Start a job and return its ID.
Args:
daemon_id (str, optional): For running a job that must run on a specific target node (e.g. resilvering).
force (bool, optional): Override existing restrictions for a bucket (e.g., run LRU eviction even if the
bucket has LRU disabled).
buckets (List[Bucket], optional): List of one or more buckets; applicable only for jobs that have bucket
scope (for details on job types, see `Table` in xact/api.go).
Returns:
The running job ID.
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
"""
job_args = JobArgs(kind=self._job_kind, daemon_id=daemon_id)
if buckets and len(buckets) > 0:
bucket_models = [
BucketModel(
name=bck.name, provider=bck.provider, namespace=bck.namespace
)
for bck in buckets
]
if len(bucket_models) == 1:
job_args.bucket = bucket_models[0]
else:
job_args.buckets = bucket_models
params = {QPARAM_FORCE: "true"} if force else {}
action = ActionMsg(action=ACT_START, value=job_args.as_dict()).dict()
resp = self._client.request(
HTTP_METHOD_PUT, path=URL_PATH_CLUSTER, json=action, params=params
)
return resp.text
def _query_job_snapshots(self) -> Dict[str, List[JobSnapshot]]:
value = JobArgs(id=self._job_id, kind=self._job_kind).as_dict()
params = {QPARAM_WHAT: WHAT_QUERY_XACT_STATS}
return self._client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
json=value,
params=params,
res_model=Dict[str, List[JobSnapshot]],
)
| aistore-master | python/aistore/sdk/job.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
from __future__ import annotations # pylint: disable=unused-variable
from aistore.sdk.bucket import Bucket
from aistore.sdk.const import (
PROVIDER_AIS,
)
from aistore.sdk.cluster import Cluster
from aistore.sdk.dsort import Dsort
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import Namespace
from aistore.sdk.job import Job
from aistore.sdk.etl import Etl
# pylint: disable=unused-variable
class Client:
"""
AIStore client for managing buckets, objects, ETL jobs
Args:
endpoint (str): AIStore endpoint
"""
def __init__(self, endpoint: str):
self._request_client = RequestClient(endpoint)
def bucket(
self, bck_name: str, provider: str = PROVIDER_AIS, namespace: Namespace = None
):
"""
Factory constructor for bucket object.
Does not make any HTTP request, only instantiates a bucket object.
Args:
bck_name (str): Name of bucket
provider (str): Provider of bucket, one of "ais", "aws", "gcp", ... (optional, defaults to ais)
namespace (Namespace): Namespace of bucket (optional, defaults to None)
Returns:
The bucket object created.
"""
return Bucket(
client=self._request_client,
name=bck_name,
provider=provider,
namespace=namespace,
)
def cluster(self):
"""
Factory constructor for cluster object.
Does not make any HTTP request, only instantiates a cluster object.
Returns:
The cluster object created.
"""
return Cluster(client=self._request_client)
def job(self, job_id: str = "", job_kind: str = ""):
"""
Factory constructor for job object, which contains job-related functions.
Does not make any HTTP request, only instantiates a job object.
Args:
job_id (str, optional): Optional ID for interacting with a specific job
job_kind (str, optional): Optional specific type of job empty for all kinds
Returns:
The job object created.
"""
return Job(client=self._request_client, job_id=job_id, job_kind=job_kind)
def etl(self, etl_name: str):
"""
Factory constructor for ETL object.
Contains APIs related to AIStore ETL operations.
Does not make any HTTP request, only instantiates an ETL object.
Args:
etl_name (str): Name of the ETL
Returns:
The ETL object created.
"""
return Etl(client=self._request_client, name=etl_name)
def dsort(self, dsort_id: str = ""):
"""
Factory constructor for dSort object.
Contains APIs related to AIStore dSort operations.
Does not make any HTTP request, only instantiates a dSort object.
Args:
dsort_id: ID of the dSort job
Returns:
dSort object created
"""
return Dsort(client=self._request_client, dsort_id=dsort_id)
| aistore-master | python/aistore/sdk/client.py |
from aistore.sdk.client import Client
from aistore.sdk.list_object_flag import ListObjectFlag
from aistore.sdk.bucket import Bucket
from aistore.sdk.namespace import Namespace
| aistore-master | python/aistore/sdk/__init__.py |
from __future__ import annotations
from enum import Enum
from typing import List
class ListObjectFlag(Enum):
"""
Flags to pass when listing objects in a bucket.
See api/apc/lsmsg.go
"""
CACHED = 0
ALL = 1
DELETED = 2
ARCH_DIR = 3
NAME_ONLY = 4
NAME_SIZE = 5
DONT_HEAD_REMOTE = 6
TRY_HEAD_REMOTE = 7
DONT_ADD_REMOTE = 8
USE_CACHE = 9
ONLY_REMOTE_PROPS = 10
@staticmethod
def join_flags(flags: List[ListObjectFlag]) -> int:
"""
Take a list of ListObjectFlag enums and return the integer value of the combined flags
Args:
flags: List of ListObjectFlag enums
Returns:
A single bit string with each digit corresponding to the flag's value from the right.
E.g. USE_CACHE = 9 and NAME_ONLY = 4 so if both flags are passed, the result will be 2^9 + 2^4 = 528
"""
res = 0
for flag in flags:
res = res ^ 2**flag.value
return res
| aistore-master | python/aistore/sdk/list_object_flag.py |
# Defaults
DEFAULT_ETL_COMM = "hpush"
DEFAULT_ETL_TIMEOUT = "5m"
DEFAULT_ETL_RUNTIME = "python3.8v2"
# ETL comm types
# ext/etl/api.go Hpush
ETL_COMM_HPUSH = "hpush"
# ext/etl/api.go Hpull
ETL_COMM_HPULL = "hpull"
# ext/etl/api.go Hrev
ETL_COMM_HREV = "hrev"
# ext/etl/api.go HpushStdin
ETL_COMM_IO = "io"
ETL_COMM_CODE = [ETL_COMM_IO, ETL_COMM_HPUSH, ETL_COMM_HREV, ETL_COMM_HPULL]
ETL_COMM_SPEC = [ETL_COMM_HPUSH, ETL_COMM_HREV, ETL_COMM_HPULL]
ETL_SUPPORTED_PYTHON_VERSIONS = ["3.10", "3.11"]
# templates for ETL
CODE_TEMPLATE = """
import pickle
import base64
import importlib
for mod in {}:
importlib.import_module(mod)
transform = pickle.loads(base64.b64decode('{}'))
{}
"""
| aistore-master | python/aistore/sdk/etl_const.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
from __future__ import annotations # pylint: disable=unused-variable
import base64
from typing import Any, Mapping, List, Optional, Dict
import msgspec
from pydantic import BaseModel, validator
from aistore.sdk.namespace import Namespace
from aistore.sdk.const import PROVIDER_AIS
from aistore.sdk.list_object_flag import ListObjectFlag
# pylint: disable=too-few-public-methods,unused-variable,missing-function-docstring
class ActionMsg(BaseModel):
"""
Represents the action message passed by the client via json
"""
action: str
name: str = ""
value: Any = None
class NetInfo(BaseModel):
"""
Represents a set of network-related info
"""
node_hostname: str = ""
daemon_port: str = ""
direct_url: str = ""
class Snode(BaseModel):
"""
Represents a system node
"""
daemon_id: str
daemon_type: str
public_net: NetInfo = None
intra_control_net: NetInfo = None
intra_data_net: NetInfo = None
flags: int = 0
class Smap(BaseModel):
"""
Represents a system map
"""
tmap: Mapping[str, Snode]
pmap: Mapping[str, Snode]
proxy_si: Snode
version: int = 0
uuid: str = ""
creation_time: str = ""
class BucketEntry(msgspec.Struct):
"""
Represents a single entry in a bucket -- an object
See cmn/objlist.go/LsoEntry
"""
n: str
cs: str = ""
a: str = ""
v: str = ""
t: str = ""
s: int = 0
c: int = 0
f: int = 0
object: Any = None
@property
def name(self):
return self.n
@property
def checksum(self):
return self.cs
@property
def atime(self):
return self.a
@property
def version(self):
return self.v
@property
def location(self):
return self.t
@property
def size(self):
return self.s
@property
def copies(self):
return self.c
@property
def flags(self):
return self.f
def is_cached(self):
return (self.flags & (1 << 6)) != 0
def is_ok(self):
return (self.flags & ((1 << 5) - 1)) == 0
class BucketList(msgspec.Struct):
"""
Represents the response when getting a list of bucket items, containing a list of BucketEntry objects
"""
UUID: str
ContinuationToken: str
Flags: int
Entries: List[BucketEntry] = None
@property
def uuid(self):
return self.UUID
@property
def continuation_token(self):
return self.ContinuationToken
@property
def flags(self):
return self.Flags
@property
def entries(self):
return [] if self.Entries is None else self.Entries
def get_entries(self):
"""
Deprecated -- use entries property
"""
return self.entries
class BucketModel(BaseModel):
"""
Represents the response from the API containing bucket info
"""
name: str
provider: str = PROVIDER_AIS
namespace: Namespace = None
def as_dict(self):
dict_rep = {"name": self.name, "provider": self.provider}
if self.namespace:
dict_rep["namespace"] = self.namespace
return dict_rep
class BsummCtrlMsg(BaseModel):
"""
Represents the bucket summary control message
"""
uuid: str
prefix: str
fast: bool
cached: bool
present: bool
class JobArgs(BaseModel):
"""
Represents the set of args to pass when making a job-related request
"""
id: str = ""
kind: str = ""
daemon_id: str = ""
bucket: BucketModel = None
buckets: List[BucketModel] = None
def as_dict(self):
return {
"ID": self.id,
"Kind": self.kind,
"DaemonID": self.daemon_id,
"Bck": self.bucket,
"Buckets": self.buckets,
}
class JobQuery(BaseModel):
"""
Structure to send the API when querying the cluster for multiple jobs
"""
active: bool = False
kind: str = ""
target: str = ""
def as_dict(self):
return {
"kind": self.kind,
"node": self.target,
"show_active": self.active,
}
class JobStatus(BaseModel):
"""
Represents the response of an API query to fetch job status
"""
uuid: str = ""
err: str = ""
end_time: int = 0
aborted: bool = False
class ETLInfo(BaseModel): # pylint: disable=too-few-public-methods,unused-variable
"""
Represents the API response when querying an ETL
"""
id: str = ""
xaction_id: str = ""
obj_count: int = 0
in_bytes: int = 0
out_bytes: int = 0
class ETLDetails(BaseModel):
"""
Represents the API response of queries on single ETL details
"""
id: str
communication: str
timeout: str
code: Optional[bytes]
spec: Optional[str]
dependencies: Optional[str]
runtime: Optional[str] # see ext/etl/runtime/all.go
chunk_size: int = 0
argument: str = ""
@validator("code")
def set_code(cls, code): # pylint: disable=no-self-argument
if code is not None:
code = base64.b64decode(code)
return code
@validator("spec")
def set_spec(cls, spec): # pylint: disable=no-self-argument
if spec is not None:
spec = base64.b64decode(spec)
return spec
class InitETLArgs(BaseModel):
"""
Represents the args shared by ETL initialization with code or spec
"""
etl_name: str
communication_type: str
timeout: str
arg_type: str = ""
class InitSpecETLArgs(InitETLArgs):
"""
Represents the set of args the sdk will pass to AIStore when making a request to initialize an ETL with a spec
"""
spec: str
def as_dict(self):
return {
"id": self.etl_name,
"timeout": self.timeout,
"communication": f"{self.communication_type}://",
"spec": self.spec,
"argument": self.arg_type,
}
class InitCodeETLArgs(InitETLArgs):
"""
Represents the set of args the sdk will pass to AIStore when making a request to initialize an ETL with code
"""
runtime: str
dependencies: str
functions: Dict[str, str]
code: str
chunk_size: int = None
def as_dict(self):
dict_rep = {
"id": self.etl_name,
"runtime": self.runtime,
"communication": f"{self.communication_type}://",
"timeout": self.timeout,
"funcs": self.functions,
"code": self.code,
"dependencies": self.dependencies,
"argument": self.arg_type,
}
if self.chunk_size:
dict_rep["chunk_size"] = self.chunk_size
return dict_rep
class PromoteAPIArgs(BaseModel):
"""
Represents the set of args the sdk will pass to AIStore when making a promote request and
provides conversion to the expected json format
"""
target_id: str = ""
source_path: str = ""
object_name: str = ""
recursive: bool = False
overwrite_dest: bool = False
delete_source: bool = False
src_not_file_share: bool = False
def as_dict(self):
return {
"tid": self.target_id,
"src": self.source_path,
"obj": self.object_name,
"rcr": self.recursive,
"ovw": self.overwrite_dest,
"dls": self.delete_source,
"notshr": self.src_not_file_share,
}
class JobStats(BaseModel):
"""
Structure for job statistics
"""
objects: int = 0
bytes: int = 0
out_objects: int = 0
out_bytes: int = 0
in_objects: int = 0
in_bytes: int = 0
class JobSnapshot(BaseModel):
"""
Structure for the data returned when querying a single job on a single target node
"""
id: str = ""
kind: str = ""
start_time: str = ""
end_time: str = ""
bucket: BucketModel = None
source_bck: str = ""
dest_bck: str = ""
rebalance_id: str = ""
stats: JobStats = None
aborted: bool = False
is_idle: bool = False
class CopyBckMsg(BaseModel):
"""
API message structure for copying a bucket
"""
prefix: str = ""
prepend: str
dry_run: bool
force: bool
def as_dict(self):
return {
"prefix": self.prefix,
"prepend": self.prepend,
"dry_run": self.dry_run,
"force": self.force,
}
class ListObjectsMsg(BaseModel):
"""
API message structure for listing objects in a bucket
"""
prefix: str
page_size: int
uuid: str
props: str
continuation_token: str
flags: List[ListObjectFlag]
target: str
def as_dict(self):
return {
"prefix": self.prefix,
"pagesize": self.page_size,
"uuid": self.uuid,
"props": self.props,
"continuation_token": self.continuation_token,
"flags": str(ListObjectFlag.join_flags(self.flags)),
"target": self.target,
}
class TransformBckMsg(BaseModel):
"""
API message structure for requesting an etl transform on a bucket
"""
etl_name: str
timeout: str
def as_dict(self):
return {"id": self.etl_name, "request_timeout": self.timeout}
class TCBckMsg(BaseModel):
"""
API message structure for transforming or copying between buckets.
Can be used on its own for an entire bucket or encapsulated in TCMultiObj to apply only to a selection of objects
"""
ext: Dict[str, str] = None
copy_msg: CopyBckMsg = None
transform_msg: TransformBckMsg = None
def as_dict(self):
dict_rep = {}
if self.ext:
dict_rep["ext"] = self.ext
if self.copy_msg:
for key, val in self.copy_msg.as_dict().items():
dict_rep[key] = val
if self.transform_msg:
for key, val in self.transform_msg.as_dict().items():
dict_rep[key] = val
return dict_rep
class TCMultiObj(BaseModel):
"""
API message structure for transforming or copying multiple objects between buckets
"""
to_bck: BucketModel
tc_msg: TCBckMsg = None
continue_on_err: bool
object_selection: dict
def as_dict(self):
dict_rep = self.object_selection
if self.tc_msg:
for key, val in self.tc_msg.as_dict().items():
dict_rep[key] = val
dict_rep["tobck"] = self.to_bck.as_dict()
dict_rep["coer"] = self.continue_on_err
return dict_rep
class ArchiveMultiObj(BaseModel):
"""
API message structure for multi-object archive requests
"""
archive_name: str
to_bck: BucketModel
mime: str = None
include_source_name = False
allow_append = False
continue_on_err = False
object_selection: dict
def as_dict(self):
dict_rep = self.object_selection
dict_rep["archname"] = self.archive_name
dict_rep["isbn"] = self.include_source_name
dict_rep["aate"] = self.allow_append
dict_rep["coer"] = self.continue_on_err
if self.mime:
dict_rep["mime"] = self.mime
if self.to_bck:
dict_rep["tobck"] = self.to_bck.as_dict()
return dict_rep
| aistore-master | python/aistore/sdk/types.py |
import json
import logging
import time
from typing import Dict
from aistore.sdk.const import (
HTTP_METHOD_POST,
URL_PATH_DSORT,
HTTP_METHOD_GET,
DEFAULT_DSORT_WAIT_TIMEOUT,
HTTP_METHOD_DELETE,
DSORT_ABORT,
DSORT_UUID,
)
from aistore.sdk.dsort_types import JobInfo
from aistore.sdk.errors import Timeout
from aistore.sdk.utils import validate_file, probing_frequency
class Dsort:
"""
Class for managing jobs for the dSort extension: https://github.com/NVIDIA/aistore/blob/master/docs/cli/dsort.md
"""
def __init__(self, client: "Client", dsort_id: str = ""):
self._client = client
self._dsort_id = dsort_id
@property
def dsort_id(self) -> str:
"""
Return dSort job id
"""
return self._dsort_id
def start(self, spec_file: str) -> str:
"""
Start a dSort job with a provided spec file location
Returns:
dSort job ID
"""
validate_file(spec_file)
with open(spec_file, "r", encoding="utf-8") as file_data:
spec = json.load(file_data)
self._dsort_id = self._client.request(
HTTP_METHOD_POST, path=URL_PATH_DSORT, json=spec
).text
return self._dsort_id
def abort(self):
"""
Abort a dSort job
"""
qparam = {DSORT_UUID: [self._dsort_id]}
self._client.request(
HTTP_METHOD_DELETE, path=f"{URL_PATH_DSORT}/{DSORT_ABORT}", params=qparam
)
def get_job_info(self) -> Dict[str, JobInfo]:
"""
Get info for a dsort job
Returns:
Dictionary of job info for all jobs associated with this dsort
"""
qparam = {DSORT_UUID: [self._dsort_id]}
return self._client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_DSORT,
res_model=Dict[str, JobInfo],
params=qparam,
)
def wait(
self,
timeout: int = DEFAULT_DSORT_WAIT_TIMEOUT,
verbose: bool = True,
):
"""
Wait for a dSort job to finish
Args:
timeout (int, optional): The maximum time to wait for the job, in seconds. Default timeout is 5 minutes.
verbose (bool, optional): Whether to log wait status to standard output
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
errors.Timeout: Timeout while waiting for the job to finish
"""
logger = logging.getLogger(f"{__name__}.wait")
logger.disabled = not verbose
passed = 0
sleep_time = probing_frequency(timeout)
while True:
if passed > timeout:
raise Timeout("dsort job to finish")
finished = True
for job_info in self.get_job_info().values():
if job_info.metrics.aborted:
logger.info("DSort job '%s' aborted", self._dsort_id)
return
# Shard creation is the last phase, so check if it's finished
finished = job_info.metrics.shard_creation.finished and finished
if finished:
logger.info("DSort job '%s' finished", self._dsort_id)
return
logger.info("Waiting on dsort job '%s'...", self._dsort_id)
time.sleep(sleep_time)
passed += sleep_time
| aistore-master | python/aistore/sdk/dsort.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
from abc import ABC, abstractmethod
from typing import Iterable
# pylint: disable=too-few-public-methods
class AISSource(ABC):
"""
Interface for all AIS class types providing access to AIS objects via URLs
"""
@abstractmethod
def list_urls(self, prefix: str = "", etl_name: str = None) -> Iterable[str]:
"""
Get an iterable of urls to reference the objects contained in this source (bucket, group, etc.)
Args:
prefix (str, optional): Only include objects with names matching this prefix
etl_name (str, optional): Apply an ETL when retrieving object contents
Returns:
Iterable over selected object URLS
"""
| aistore-master | python/aistore/sdk/ais_source.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from pathlib import Path
from typing import Iterator, Type, TypeVar
import braceexpand
import humanize
from msgspec import msgpack
import pydantic.tools
import requests
from pydantic import BaseModel, parse_raw_as
from requests import Response
from aistore.sdk.const import UTF_ENCODING, HEADER_CONTENT_TYPE, MSGPACK_CONTENT_TYPE
from aistore.sdk.errors import (
AISError,
ErrBckNotFound,
ErrRemoteBckNotFound,
ErrBckAlreadyExists,
ErrETLAlreadyExists,
)
T = TypeVar("T")
class HttpError(BaseModel):
"""
Represents the errors returned by the API
"""
status: int
message: str = ""
method: str = ""
url_path: str = ""
remote_addr: str = ""
caller: str = ""
node: str = ""
def _raise_error(text: str):
err = pydantic.tools.parse_raw_as(HttpError, text)
if 400 <= err.status < 500:
err = pydantic.tools.parse_raw_as(HttpError, text)
if "does not exist" in err.message:
if "cloud bucket" in err.message or "remote bucket" in err.message:
raise ErrRemoteBckNotFound(err.status, err.message)
if "bucket" in err.message:
raise ErrBckNotFound(err.status, err.message)
if "already exists" in err.message:
if "bucket" in err.message:
raise ErrBckAlreadyExists(err.status, err.message)
if "etl" in err.message:
raise ErrETLAlreadyExists(err.status, err.message)
raise AISError(err.status, err.message)
# pylint: disable=unused-variable
def handle_errors(resp: requests.Response):
"""
Error handling for requests made to the AIS Client
Args:
resp: requests.Response = Response received from the request
"""
error_text = resp.text
if isinstance(resp.text, bytes):
try:
error_text = error_text.decode(UTF_ENCODING)
except UnicodeDecodeError:
error_text = error_text.decode("iso-8859-1")
if error_text != "":
_raise_error(error_text)
resp.raise_for_status()
def probing_frequency(dur: int) -> float:
"""
Given a timeout, return an interval to wait between retries
Args:
dur: Duration of timeout
Returns:
Frequency to probe
"""
freq = min(dur / 8.0, 1.0)
freq = max(dur / 64.0, freq)
return max(freq, 0.1)
def read_file_bytes(filepath: str):
"""
Given a filepath, read the content as bytes
Args:
filepath: Existing local filepath
Returns: Raw bytes
"""
with open(filepath, "rb") as reader:
return reader.read()
def _check_path_exists(path: str):
if not Path(path).exists():
raise ValueError(f"Path: {path} does not exist")
def validate_file(path: str):
"""
Validate that a file exists and is a file
Args:
path: Path to validate
Raises:
ValueError: If path does not exist or is not a file
"""
_check_path_exists(path)
if not Path(path).is_file():
raise ValueError(f"Path: {path} is a directory, not a file")
def validate_directory(path: str):
"""
Validate that a directory exists and is a directory
Args:
path: Path to validate
Raises:
ValueError: If path does not exist or is not a directory
"""
_check_path_exists(path)
if not Path(path).is_dir():
raise ValueError(f"Path: {path} is a file, not a directory")
def get_file_size(file: Path) -> str:
"""
Get the size of a file and return it in human-readable format
Args:
file: File to read
Returns:
Size of file as human-readable string
"""
return (
humanize.naturalsize(file.stat().st_size) if file.stat().st_size else "unknown"
)
def expand_braces(template: str) -> Iterator[str]:
"""
Given a string template, apply bash-style brace expansion to return a list of strings
Args:
template: Valid brace expansion input, e.g. prefix-{0..10..2}-gap-{11..15}-suffix
Returns:
Iterator of brace expansion output
"""
# pylint: disable = fixme
# TODO Build custom expansion to validate consistent with cmn/cos/template.go TemplateRange
return braceexpand.braceexpand(template)
def decode_response(
res_model: Type[T],
resp: Response,
) -> T:
"""
Parse response content from the cluster into a Python class,
decoding with msgpack depending on content type in header
Args:
res_model (Type[T]): Resulting type to which the response should be deserialized
resp (Response): Response from the AIS cluster
"""
if resp.headers.get(HEADER_CONTENT_TYPE) == MSGPACK_CONTENT_TYPE:
return msgpack.decode(resp.content, type=res_model)
return parse_raw_as(res_model, resp.text)
| aistore-master | python/aistore/sdk/utils.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from __future__ import annotations # pylint: disable=unused-variable
from typing import List, Optional
from aistore.sdk.const import (
HTTP_METHOD_GET,
ACT_LIST,
PROVIDER_AIS,
QPARAM_WHAT,
QPARAM_PRIMARY_READY_REB,
QPARAM_PROVIDER,
WHAT_SMAP,
URL_PATH_BUCKETS,
URL_PATH_HEALTH,
URL_PATH_DAEMON,
URL_PATH_CLUSTER,
WHAT_ALL_XACT_STATUS,
WHAT_ALL_RUNNING_STATUS,
URL_PATH_ETL,
)
from aistore.sdk.types import BucketModel, JobStatus, JobQuery, ETLInfo
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import ActionMsg, Smap
# pylint: disable=unused-variable
class Cluster:
"""
A class representing a cluster bound to an AIS client.
"""
# pylint: disable=duplicate-code
def __init__(self, client: RequestClient):
self._client = client
@property
def client(self):
"""Client this cluster uses to make requests"""
return self._client
def get_info(self) -> Smap:
"""
Returns state of AIS cluster, including the detailed information about its nodes.
Returns:
aistore.sdk.types.Smap: Smap containing cluster information
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
"""
return self.client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_DAEMON,
res_model=Smap,
params={QPARAM_WHAT: WHAT_SMAP},
)
def list_buckets(self, provider: str = PROVIDER_AIS):
"""
Returns list of buckets in AIStore cluster.
Args:
provider (str, optional): Name of bucket provider, one of "ais", "aws", "gcp", "az", "hdfs" or "ht".
Defaults to "ais". Empty provider returns buckets of all providers.
Returns:
List[BucketModel]: A list of buckets
Raises:
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.ReadTimeout: Timed out waiting response from AIStore
"""
params = {QPARAM_PROVIDER: provider}
action = ActionMsg(action=ACT_LIST).dict()
return self.client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_BUCKETS,
res_model=List[BucketModel],
json=action,
params=params,
)
def list_jobs_status(self, job_kind="", target_id="") -> List[JobStatus]:
"""
List the status of jobs on the cluster
Args:
job_kind (str, optional): Only show jobs of a particular type
target_id (str, optional): Limit to jobs on a specific target node
Returns:
List of JobStatus objects
"""
res = self._client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
res_model=Optional[List[JobStatus]],
json=JobQuery(kind=job_kind, target=target_id).as_dict(),
params={QPARAM_WHAT: WHAT_ALL_XACT_STATUS},
)
if res is None:
return []
return res
def list_running_jobs(self, job_kind="", target_id="") -> List[str]:
"""
List the currently running jobs on the cluster
Args:
job_kind (str, optional): Only show jobs of a particular type
target_id (str, optional): Limit to jobs on a specific target node
Returns:
List of jobs in the format job_kind[job_id]
"""
return self._client.request_deserialize(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
res_model=List[str],
json=JobQuery(kind=job_kind, target=target_id, active=True).as_dict(),
params={QPARAM_WHAT: WHAT_ALL_RUNNING_STATUS},
)
def list_running_etls(self) -> List[ETLInfo]:
"""
Lists all running ETLs.
Note: Does not list ETLs that have been stopped or deleted.
Returns:
List[ETLInfo]: A list of details on running ETLs
"""
return self._client.request_deserialize(
HTTP_METHOD_GET, path=URL_PATH_ETL, res_model=List[ETLInfo]
)
def is_aistore_running(self) -> bool:
"""
Checks if cluster is ready or still setting up.
Returns:
bool: True if cluster is ready, or false if cluster is still setting up
"""
# compare with AIS Go API (api/cluster.go) for additional supported options
params = {QPARAM_PRIMARY_READY_REB: "true"}
try:
resp = self.client.request(
HTTP_METHOD_GET, path=URL_PATH_HEALTH, params=params
)
return resp.ok
except Exception:
return False
| aistore-master | python/aistore/sdk/cluster.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from typing import Optional, List, Callable
from aistore.sdk.types import BucketEntry
# pylint: disable=unused-variable
class ObjectIterator:
"""
Represents an iterable that will fetch all objects from a bucket, querying as needed with the specified function
Args:
list_objects (Callable): Function returning a BucketList from an AIS cluster
"""
_fetched: Optional[List[BucketEntry]] = []
_token: str = ""
_uuid: str = ""
def __init__(self, list_objects: Callable):
self._list_objects = list_objects
def __iter__(self):
return self
def __next__(self) -> BucketEntry:
# Iterator is exhausted.
if len(self._fetched) == 0 and self._token == "" and self._uuid != "":
raise StopIteration
# Read the next page of objects.
if len(self._fetched) == 0:
resp = self._list_objects(uuid=self._uuid, token=self._token)
self._fetched = resp.entries
self._uuid = resp.uuid
self._token = resp.continuation_token
# Empty page and token mean no more objects left.
if len(self._fetched) == 0 and self._token == "":
raise StopIteration
return self._fetched.pop(0)
| aistore-master | python/aistore/sdk/object_iterator.py |
from typing import Dict
from requests.structures import CaseInsensitiveDict
from aistore.sdk.const import (
HEADER_CONTENT_LENGTH,
AIS_CHECKSUM_TYPE,
AIS_CHECKSUM_VALUE,
AIS_ACCESS_TIME,
AIS_VERSION,
AIS_CUSTOM_MD,
)
# pylint: disable=too-few-public-methods
class ObjectAttributes:
"""
Represents the attributes parsed from the response headers returned from an API call to get an object
Args:
response_headers (CaseInsensitiveDict): Response header dict containing object attributes
"""
def __init__(self, response_headers: CaseInsensitiveDict):
self.response_headers = response_headers
@property
def size(self) -> int:
"""
Size of object content
"""
return int(self.response_headers.get(HEADER_CONTENT_LENGTH, 0))
@property
def checksum_type(self) -> str:
"""
Type of checksum, e.g. xxhash or md5
"""
return self.response_headers.get(AIS_CHECKSUM_TYPE, "")
@property
def checksum_value(self) -> str:
"""
Checksum value
"""
return self.response_headers.get(AIS_CHECKSUM_VALUE, "")
@property
def access_time(self) -> str:
"""
Time this object was accessed
"""
return self.response_headers.get(AIS_ACCESS_TIME, "")
@property
def obj_version(self) -> str:
"""
Object version
"""
return self.response_headers.get(AIS_VERSION, "")
@property
def custom_metadata(self) -> Dict[str, str]:
"""
Dictionary of custom metadata
"""
custom_md_header = self.response_headers.get(AIS_CUSTOM_MD, "")
if len(custom_md_header) > 0:
return self._parse_custom(custom_md_header)
return {}
@staticmethod
def _parse_custom(custom_md_header) -> Dict[str, str]:
"""
Parse the comma-separated list of optional custom metadata from the custom metadata header
Args:
custom_md_header: Header containing metadata csv
Returns:
Dictionary of custom metadata
"""
custom_metadata = {}
for entry in custom_md_header.split(","):
try:
assert isinstance(entry, str)
entry_list = entry.strip().split("=")
assert len(entry_list) == 2
custom_metadata[entry_list[0]] = entry_list[1]
except AssertionError:
continue
return custom_metadata
| aistore-master | python/aistore/sdk/object_attributes.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
class AISError(Exception):
"""
Raised when an error is encountered from a query to the AIS cluster
"""
def __init__(self, status_code: int, message: str):
self.status_code = status_code
self.message = message
super().__init__(f"STATUS:{status_code}, MESSAGE:{message}")
# pylint: disable=unused-variable
class InvalidBckProvider(Exception):
"""
Raised when the bucket provider is invalid for the requested operation
"""
def __init__(self, provider):
super().__init__(f"Invalid bucket provider {provider}")
# pylint: disable=unused-variable
class ErrRemoteBckNotFound(AISError):
"""
Raised when a remote bucket its required and missing for the requested operation
"""
def __init__(self, status_code, message):
super().__init__(status_code=status_code, message=message)
# pylint: disable=unused-variable
class ErrBckNotFound(AISError):
"""
Raised when a bucket is expected and not found
"""
def __init__(self, status_code, message):
super().__init__(status_code=status_code, message=message)
# pylint: disable=unused-variable
class ErrBckAlreadyExists(AISError):
"""
Raised when a bucket is created but already exists in AIS
"""
def __init__(self, status_code, message):
super().__init__(status_code=status_code, message=message)
# pylint: disable=unused-variable
class ErrETLAlreadyExists(AISError):
"""
Raised when an ETL is created but already exists in AIS
"""
def __init__(self, status_code, message):
super().__init__(status_code=status_code, message=message)
# pylint: disable=unused-variable
class Timeout(Exception):
"""
Raised when an operation takes too long to complete
"""
def __init__(self, action, message=""):
super().__init__(f"Timed out while waiting for {action}. {message}")
# pylint: disable=unused-variable
class InvalidObjectRangeIndex(Exception):
"""
Raised when incorrect range parameters are passed when creating an ObjectRange
"""
def __init__(self, message):
super().__init__(f"Invalid argument provided for object range index: {message}")
class JobInfoNotFound(Exception):
"""
Raised when information on a job's status could not be found on the AIS cluster
"""
def __init__(self, message):
super().__init__(f"Job information not found on the cluster: {message}")
class UnexpectedHTTPStatusCode(Exception):
"""
Raised when the status code from a response is not what's expected.
"""
def __init__(self, expected_status_codes, received_status_code):
expected_codes = ", ".join(str(code) for code in expected_status_codes)
super().__init__(
(
f"Unexpected status code received. "
f"Expected one of the following: {expected_codes}, "
f"but received: {received_status_code}"
)
)
| aistore-master | python/aistore/sdk/errors.py |
from pydantic import BaseModel
class Namespace(BaseModel):
"""
A bucket namespace defined by the uuid of the cluster and a name
"""
uuid: str = ""
name: str = ""
def get_path(self) -> str:
"""
Get the AIS-style path representation of the string -- @uuid#name
Returns:
Formatted namespace string
"""
return f"@{self.uuid}#{self.name}"
| aistore-master | python/aistore/sdk/namespace.py |
from typing import Iterator
import requests
from requests.structures import CaseInsensitiveDict
from aistore.sdk.const import DEFAULT_CHUNK_SIZE
from aistore.sdk.object_attributes import ObjectAttributes
class ObjectReader:
"""
Represents the data returned by the API when getting an object, including access to the content stream and object
attributes
"""
def __init__(
self,
response_headers: CaseInsensitiveDict,
stream: requests.Response,
chunk_size: int = DEFAULT_CHUNK_SIZE,
):
self._chunk_size = chunk_size
self._stream = stream
self._attributes = ObjectAttributes(response_headers)
@property
def attributes(self) -> ObjectAttributes:
"""
Object metadata attributes
Returns:
Object attributes parsed from the headers returned by AIS
"""
return self._attributes
def read_all(self) -> bytes:
"""
Read all byte data from the object content stream.
This uses a bytes cast which makes it slightly slower and requires all object content to fit in memory at once
Returns:
Object content as bytes
"""
obj_arr = bytearray()
for chunk in self:
obj_arr.extend(chunk)
return bytes(obj_arr)
def raw(self) -> bytes:
"""
Returns: Raw byte stream of object content
"""
return self._stream.raw
def __iter__(self) -> Iterator[bytes]:
"""
Creates a generator to read the stream content in chunks
Returns:
An iterator with access to the next chunk of bytes
"""
try:
for chunk in self._stream.iter_content(chunk_size=self._chunk_size):
yield chunk
finally:
self._stream.close()
| aistore-master | python/aistore/sdk/object_reader.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
HEADERS_KW = "headers"
# Standard Header Keys
HEADER_ACCEPT = "Accept"
HEADER_USER_AGENT = "User-Agent"
HEADER_CONTENT_TYPE = "Content-Type"
HEADER_CONTENT_LENGTH = "Content-Length"
# Standard Header Values
USER_AGENT_BASE = "ais/python"
JSON_CONTENT_TYPE = "application/json"
MSGPACK_CONTENT_TYPE = "application/msgpack"
# AIS Headers
AIS_CHECKSUM_TYPE = "ais-checksum-type"
AIS_CHECKSUM_VALUE = "ais-checksum-value"
AIS_ACCESS_TIME = "ais-atime"
AIS_VERSION = "ais-version"
AIS_CUSTOM_MD = "ais-custom-md"
# Bucket Props Header keys
HEADER_PREFIX = "ais-"
HEADER_BUCKET_PROPS = HEADER_PREFIX + "bucket-props"
HEADER_BUCKET_SUMM = HEADER_PREFIX + "bucket-summ"
# URL Params
# See api/apc/query.go
QPARAM_WHAT = "what"
QPARAM_ETL_NAME = "etl_name"
QPARAM_PROVIDER = "provider"
QPARAM_BCK_TO = "bck_to"
QPARAM_FLT_PRESENCE = "presence"
QPARAM_COUNT_REMOTE_OBJS = "count_remote_objs"
QPARAM_KEEP_REMOTE = "keep_bck_md"
QPARAM_ARCHPATH = "archpath"
QPARAM_FORCE = "frc"
QPARAM_PRIMARY_READY_REB = "prr"
QPARAM_NAMESPACE = "namespace"
DSORT_UUID = "uuid"
# URL Param values
# See api/apc/query.go
WHAT_SMAP = "smap"
WHAT_ONE_XACT_STATUS = "status"
WHAT_ALL_XACT_STATUS = "status_all"
WHAT_ALL_RUNNING_STATUS = "running_all"
WHAT_QUERY_XACT_STATS = "qryxstats"
# URL paths
URL_PATH_CLUSTER = "cluster"
URL_PATH_BUCKETS = "buckets"
URL_PATH_OBJECTS = "objects"
URL_PATH_HEALTH = "health"
URL_PATH_DAEMON = "daemon"
URL_PATH_ETL = "etl"
URL_PATH_DSORT = "sort"
DSORT_ABORT = "abort"
# Bucket providers
# See api/apc/provider.go
PROVIDER_AIS = "ais"
PROVIDER_AMAZON = "aws"
PROVIDER_AZURE = "azure"
PROVIDER_GOOGLE = "gcp"
PROVIDER_HDFS = "hdfs"
PROVIDER_HTTP = "ht"
# HTTP Methods
HTTP_METHOD_GET = "get"
HTTP_METHOD_POST = "post"
HTTP_METHOD_DELETE = "delete"
HTTP_METHOD_PUT = "put"
HTTP_METHOD_HEAD = "head"
# Actions
# See api/apc/actmsg.go
ACT_CREATE_BCK = "create-bck"
ACT_DESTROY_BCK = "destroy-bck"
ACT_COPY_BCK = "copy-bck"
ACT_ETL_BCK = "etl-bck"
ACT_EVICT_REMOTE_BCK = "evict-remote-bck"
ACT_LIST = "list"
ACT_MOVE_BCK = "move-bck"
ACT_PROMOTE = "promote"
ACT_SUMMARY_BCK = "summary-bck"
# Multi-object actions
ACT_DELETE_OBJECTS = "delete-listrange"
ACT_EVICT_OBJECTS = "evict-listrange"
ACT_PREFETCH_OBJECTS = "prefetch-listrange"
ACT_COPY_OBJECTS = "copy-listrange"
ACT_TRANSFORM_OBJECTS = "etl-listrange"
ACT_ARCHIVE_OBJECTS = "archive"
# Job actions
ACT_START = "start"
# Defaults
DEFAULT_CHUNK_SIZE = 32768
DEFAULT_JOB_WAIT_TIMEOUT = 300
DEFAULT_DSORT_WAIT_TIMEOUT = 300
# ENCODING
UTF_ENCODING = "utf-8"
# Status Codes
STATUS_ACCEPTED = 202
STATUS_OK = 200
STATUS_BAD_REQUEST = 400
| aistore-master | python/aistore/sdk/const.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
from abc import abstractmethod, ABC
from typing import Iterator, Dict
class ObjectCollection(ABC):
"""
Abstract class for collections of object names
"""
@abstractmethod
def get_value(self) -> Dict[str, any]:
"""
Get the json representation of the names to send to the API
Returns:
Dictionary of request entry to name representation
"""
@abstractmethod
def __iter__(self) -> Iterator[str]:
pass
| aistore-master | python/aistore/sdk/multiobj/object_collection.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import logging
from typing import List, Iterable
from aistore.sdk.ais_source import AISSource
from aistore.sdk.const import (
HTTP_METHOD_DELETE,
HTTP_METHOD_POST,
HTTP_METHOD_PUT,
ACT_DELETE_OBJECTS,
ACT_PREFETCH_OBJECTS,
ACT_EVICT_OBJECTS,
ACT_COPY_OBJECTS,
ACT_TRANSFORM_OBJECTS,
ACT_ARCHIVE_OBJECTS,
)
from aistore.sdk.etl_const import DEFAULT_ETL_TIMEOUT
from aistore.sdk.multiobj.object_names import ObjectNames
from aistore.sdk.multiobj.object_range import ObjectRange
from aistore.sdk.multiobj.object_template import ObjectTemplate
from aistore.sdk.types import (
TCMultiObj,
CopyBckMsg,
TransformBckMsg,
TCBckMsg,
ArchiveMultiObj,
)
# pylint: disable=unused-variable
class ObjectGroup(AISSource):
"""
A class representing multiple objects within the same bucket. Only one of obj_names, obj_range, or obj_template
should be provided.
Args:
bck (Bucket): Bucket the objects belong to
obj_names (list[str], optional): List of object names to include in this collection
obj_range (ObjectRange, optional): Range defining which object names in the bucket should be included
obj_template (str, optional): String argument to pass as template value directly to api
"""
def __init__(
self,
bck: "Bucket",
obj_names: list = None,
obj_range: ObjectRange = None,
obj_template: str = None,
):
self.bck = bck
num_args = sum(
1 if x is not None else 0 for x in [obj_names, obj_range, obj_template]
)
if num_args != 1:
raise ValueError(
"ObjectGroup accepts one and only one of: obj_names, obj_range, or obj_template"
)
if obj_range and not isinstance(obj_range, ObjectRange):
raise TypeError("obj_range must be of type ObjectRange")
if obj_range:
self._obj_collection = obj_range
elif obj_names:
self._obj_collection = ObjectNames(obj_names)
else:
self._obj_collection = ObjectTemplate(obj_template)
def list_urls(self, prefix: str = "", etl_name: str = None) -> Iterable[str]:
"""
Get an iterator of the full URL for every object in this group
Args:
prefix (str, optional): Limit objects selected by a given string prefix
etl_name (str, optional): ETL to include in URLs
Returns:
Iterator of all object URLs in the group
"""
for obj_name in self._obj_collection:
yield self.bck.object(obj_name).get_url(etl_name=etl_name)
def delete(self):
"""
Deletes a list or range of objects in a bucket
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
return self.bck.make_request(
HTTP_METHOD_DELETE,
ACT_DELETE_OBJECTS,
value=self._obj_collection.get_value(),
).text
def evict(self):
"""
Evicts a list or range of objects in a bucket so that they are no longer cached in AIS
NOTE: only Cloud buckets can be evicted.
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
self.bck.verify_cloud_bucket()
return self.bck.make_request(
HTTP_METHOD_DELETE,
ACT_EVICT_OBJECTS,
value=self._obj_collection.get_value(),
).text
def prefetch(self):
"""
Prefetches a list or range of objects in a bucket so that they are cached in AIS
NOTE: only Cloud buckets can be prefetched.
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
self.bck.verify_cloud_bucket()
return self.bck.make_request(
HTTP_METHOD_POST,
ACT_PREFETCH_OBJECTS,
value=self._obj_collection.get_value(),
).text
# pylint: disable=too-many-arguments
def copy(
self,
to_bck: "Bucket",
prepend: str = "",
continue_on_error: bool = False,
dry_run: bool = False,
force: bool = False,
):
"""
Copies a list or range of objects in a bucket
Args:
to_bck (Bucket): Destination bucket
prepend (str, optional): Value to prepend to the name of copied objects
continue_on_error (bool, optional): Whether to continue if there is an error copying a single object
dry_run (bool, optional): Skip performing the copy and just log the intended actions
force (bool, optional): Force this job to run over others in case it conflicts
(see "limited coexistence" and xact/xreg/xreg.go)
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
if dry_run:
logger = logging.getLogger(f"{__name__}.copy")
logger.info(
"Copy dry-run. Running with dry_run=False will copy the following objects from bucket '%s' to '%s': %s",
f"{self.bck.get_path()}",
f"{to_bck.get_path()}",
list(self._obj_collection),
)
copy_msg = CopyBckMsg(prepend=prepend, dry_run=dry_run, force=force)
value = TCMultiObj(
to_bck=to_bck.as_model(),
tc_msg=TCBckMsg(copy_msg=copy_msg),
object_selection=self._obj_collection.get_value(),
continue_on_err=continue_on_error,
).as_dict()
return self.bck.make_request(
HTTP_METHOD_POST, ACT_COPY_OBJECTS, value=value
).text
# pylint: disable=too-many-arguments
def transform(
self,
to_bck: "Bucket",
etl_name: str,
timeout: str = DEFAULT_ETL_TIMEOUT,
prepend: str = "",
continue_on_error: bool = False,
dry_run: bool = False,
force: bool = False,
):
"""
Performs ETL operation on a list or range of objects in a bucket, placing the results in the destination bucket
Args:
to_bck (Bucket): Destination bucket
etl_name (str): Name of existing ETL to apply
timeout (str): Timeout of the ETL job (e.g. 5m for 5 minutes)
prepend (str, optional): Value to prepend to the name of resulting transformed objects
continue_on_error (bool, optional): Whether to continue if there is an error transforming a single object
dry_run (bool, optional): Skip performing the transform and just log the intended actions
force (bool, optional): Force this job to run over others in case it conflicts
(see "limited coexistence" and xact/xreg/xreg.go)
Raises:
aistore.sdk.errors.AISError: All other types of errors with AIStore
requests.ConnectionError: Connection error
requests.ConnectionTimeout: Timed out connecting to AIStore
requests.exceptions.HTTPError: Service unavailable
requests.RequestException: "There was an ambiguous exception that occurred while handling..."
requests.ReadTimeout: Timed out receiving response from AIStore
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
if dry_run:
logger = logging.getLogger(f"{__name__}.transform")
logger.info(
"Transform dry-run. Running with dry_run=False will apply ETL '%s' to objects %s",
etl_name,
list(self._obj_collection),
)
copy_msg = CopyBckMsg(prepend=prepend, dry_run=dry_run, force=force)
transform_msg = TransformBckMsg(etl_name=etl_name, timeout=timeout)
value = TCMultiObj(
to_bck=to_bck.as_model(),
tc_msg=TCBckMsg(transform_msg=transform_msg, copy_msg=copy_msg),
object_selection=self._obj_collection.get_value(),
continue_on_err=continue_on_error,
).as_dict()
return self.bck.make_request(
HTTP_METHOD_POST, ACT_TRANSFORM_OBJECTS, value=value
).text
def archive(
self,
archive_name: str,
mime: str = "",
to_bck: "Bucket" = None,
include_source_name: bool = False,
allow_append: bool = False,
continue_on_err: bool = False,
):
"""
Create or append to an archive
Args:
archive_name (str): Name of archive to create or append
mime (str, optional): MIME type of the content
to_bck (Bucket, optional): Destination bucket, defaults to current bucket
include_source_name (bool, optional): Include the source bucket name in the archived objects' names
allow_append (bool, optional): Allow appending to an existing archive
continue_on_err (bool, optional): Whether to continue if there is an error archiving a single object
Returns:
Job ID (as str) that can be used to check the status of the operation
"""
val = ArchiveMultiObj(
object_selection=self._obj_collection.get_value(),
archive_name=archive_name,
mime=mime,
to_bck=to_bck.as_model() if to_bck else self.bck.as_model(),
include_source_name=include_source_name,
allow_append=allow_append,
continue_on_err=continue_on_err,
).as_dict()
return self.bck.make_request(
HTTP_METHOD_PUT, ACT_ARCHIVE_OBJECTS, value=val
).text
def list_names(self) -> List[str]:
"""
List all the object names included in this group of objects
Returns:
List of object names
"""
return list(self._obj_collection)
| aistore-master | python/aistore/sdk/multiobj/object_group.py |
from aistore.sdk.multiobj.object_collection import ObjectCollection
from aistore.sdk.multiobj.object_template import ObjectTemplate
from aistore.sdk.multiobj.object_range import ObjectRange
from aistore.sdk.multiobj.object_names import ObjectNames
from aistore.sdk.multiobj.object_group import ObjectGroup
| aistore-master | python/aistore/sdk/multiobj/__init__.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
from typing import List, Iterator, Dict
from aistore.sdk.multiobj.object_collection import ObjectCollection
class ObjectNames(ObjectCollection):
"""
A collection of object names, provided as a list of strings
Args:
names (List[str]): A list of object names
"""
def __init__(self, names: List[str]):
self._names = names
def get_value(self) -> Dict[str, any]:
return {"objnames": self._names}
def __iter__(self) -> Iterator[str]:
return iter(self._names)
| aistore-master | python/aistore/sdk/multiobj/object_names.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
from typing import Dict, Iterator
from aistore.sdk import utils
from aistore.sdk.multiobj.object_collection import ObjectCollection
class ObjectTemplate(ObjectCollection):
"""
A collection of object names specified by a template in the bash brace expansion format
Args:
template (str): A string template that defines the names of objects to include in the collection
"""
def __init__(self, template: str):
self._template = template
def __iter__(self) -> Iterator[str]:
return utils.expand_braces(self._template)
def get_value(self) -> Dict[str, str]:
return {"template": self._template}
| aistore-master | python/aistore/sdk/multiobj/object_template.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
from typing import Iterator
from aistore.sdk import utils
from aistore.sdk.errors import InvalidObjectRangeIndex
from aistore.sdk.multiobj.object_collection import ObjectCollection
# pylint: disable=unused-variable,too-few-public-methods
class ObjectRange(ObjectCollection):
"""
Class representing a range of object names
Args:
prefix (str): Prefix contained in all names of objects
min_index (int): Starting index in the name of objects
max_index (int): Last index in the name of all objects
pad_width (int, optional): Left-pad indices with zeros up to the width provided, e.g. pad_width = 3 will
transform 1 to 001
step (int, optional): Size of iterator steps between each item
suffix (str, optional): Suffix at the end of all object names
"""
# pylint: disable=too-many-arguments
def __init__(
self,
prefix: str,
min_index: int = None,
max_index: int = None,
pad_width: int = 0,
step: int = 1,
suffix: str = "",
):
self._prefix = prefix
self._step = step
self._suffix = suffix
self._validate_indices(min_index, max_index, pad_width)
min_set = isinstance(min_index, int)
if self._suffix and not min_set:
raise ValueError("Suffix cannot be used without indices")
self._min_index = str(min_index).zfill(pad_width) if min_set else None
self._max_index = (
str(max_index).zfill(pad_width) if isinstance(min_index, int) else None
)
@staticmethod
def _validate_indices(min_index, max_index, pad_width):
"""
Validate the indices passed to create a range: min_index < max_index and pad_width (if set) can fit the indices
provided.
Raises:
InvalidObjectRangeIndex: If the indices passed to the range are not valid
"""
indices = [isinstance(min_index, int), isinstance(max_index, int)]
if not all(indices):
if any(indices):
raise InvalidObjectRangeIndex(
"Provide both min_index and max_index or neither"
)
return
if min_index >= max_index:
raise InvalidObjectRangeIndex(f"{min_index} must be less than {max_index}")
if pad_width != 0 and len(str(min_index)) > pad_width:
raise InvalidObjectRangeIndex(
f"Number of digits in min index {min_index} must not be greater than pad width {pad_width}"
)
def _indices_set(self) -> bool:
if all([self._min_index, self._max_index]):
return True
return False
def __str__(self) -> str:
if self._indices_set():
return f"{self._prefix}{{{self._min_index}..{self._max_index}..{self._step}}}{self._suffix}"
return f"{self._prefix}"
def __iter__(self) -> Iterator[str]:
if not self._indices_set():
raise RuntimeError("Cannot iterate over object range with no indices")
return utils.expand_braces(str(self))
def get_value(self):
return {"template": str(self)}
| aistore-master | python/aistore/sdk/multiobj/object_range.py |
from aistore.pytorch.aisio import (
AISFileListerIterDataPipe as AISFileLister,
AISFileLoaderIterDataPipe as AISFileLoader,
AISSourceLister,
)
from aistore.pytorch.dataset import AISDataset
| aistore-master | python/aistore/pytorch/__init__.py |
"""
AIS IO Datapipe
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
from typing import Iterator, Tuple, List
from torch.utils.data.dataset import T_co
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from aistore.sdk.ais_source import AISSource
try:
from aistore.sdk import Client
from aistore.pytorch.utils import parse_url, unparse_url
HAS_AIS = True
except ImportError:
HAS_AIS = False
def _assert_aistore() -> None:
if not HAS_AIS:
raise ModuleNotFoundError(
"Package `aistore` is required to be installed to use this datapipe."
"Please run `pip install aistore` or `conda install aistore` to install the package"
"For more info visit: https://github.com/NVIDIA/aistore/blob/master/python/aistore/"
)
# pylint: disable=unused-variable
# pylint: disable=W0223
@functional_datapipe("ais_list_files")
class AISFileListerIterDataPipe(IterDataPipe[str]):
"""
Iterable Datapipe that lists files from the AIStore backends with the given URL prefixes.
(functional name: ``list_files_by_ais``).
Acceptable prefixes include but not limited to - `ais://bucket-name`, `ais://bucket-name/`
Note:
- This function also supports files from multiple backends (`aws://..`, `gcp://..`, `hdfs://..`, etc)
- Input must be a list and direct URLs are not supported.
- length is -1 by default, all calls to len() are invalid as
not all items are iterated at the start.
- This internally uses AIStore Python SDK.
Args:
source_datapipe(IterDataPipe[str]): a DataPipe that contains URLs/URL
prefixes to objects on AIS
length(int): length of the datapipe
url(str): AIStore endpoint
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, AISFileLister
>>> ais_prefixes = IterableWrapper(['gcp://bucket-name/folder/', 'aws:bucket-name/folder/',
>>> 'ais://bucket-name/folder/', ...])
>>> dp_ais_urls = AISFileLister(url='localhost:8080', source_datapipe=ais_prefixes)
>>> for url in dp_ais_urls:
... pass
>>> # Functional API
>>> dp_ais_urls = ais_prefixes.list_files_by_ais(url='localhost:8080')
>>> for url in dp_ais_urls:
... pass
"""
def __init__(
self, source_datapipe: IterDataPipe[str], url: str, length: int = -1
) -> None:
_assert_aistore()
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length: int = length
self.client = Client(url)
def __iter__(self) -> Iterator[str]:
for prefix in self.source_datapipe:
provider, bck_name, prefix = parse_url(prefix)
obj_iter = self.client.bucket(bck_name, provider).list_objects_iter(
prefix=prefix
)
for entry in obj_iter:
yield unparse_url(
provider=provider, bck_name=bck_name, obj_name=entry.name
)
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
# pylint: disable=unused-variable
# pylint: disable=W0223
@functional_datapipe("ais_load_files")
class AISFileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
"""
Iterable DataPipe that loads files from AIStore with the given URLs (functional name: ``load_files_by_ais``).
Iterates all files in BytesIO format and returns a tuple (url, BytesIO).
Note:
- This function also supports files from multiple backends (`aws://..`, `gcp://..`, etc)
- Input must be a list and direct URLs are not supported.
- This internally uses AIStore Python SDK.
- An `etl_name` can be provided to run an existing ETL on the AIS cluster.
See https://github.com/NVIDIA/aistore/blob/master/docs/etl.md for more info on AIStore ETL.
Args:
source_datapipe(IterDataPipe[str]): a DataPipe that contains URLs/URL prefixes to objects
length(int): length of the datapipe
url(str): AIStore endpoint
etl_name (str, optional): Optional etl on the AIS cluster to apply to each object
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, AISFileLister,AISFileLoader
>>> ais_prefixes = IterableWrapper(['gcp://bucket-name/folder/', 'aws:bucket-name/folder/',
>>> 'ais://bucket-name/folder/', ...])
>>> dp_ais_urls = AISFileLister(url='localhost:8080', source_datapipe=ais_prefixes)
>>> dp_cloud_files = AISFileLoader(url='localhost:8080', source_datapipe=dp_ais_urls)
>>> for url, file in dp_cloud_files:
... pass
>>> # Functional API
>>> dp_cloud_files = dp_ais_urls.load_files_by_ais(url='localhost:8080')
>>> for url, file in dp_cloud_files:
... pass
"""
def __init__(
self,
source_datapipe: IterDataPipe[str],
url: str,
length: int = -1,
etl_name: str = None,
) -> None:
_assert_aistore()
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length = length
self.client = Client(url)
self.etl_name = etl_name
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
provider, bck_name, obj_name = parse_url(url)
yield url, StreamWrapper(
self.client.bucket(bck_name=bck_name, provider=provider)
.object(obj_name=obj_name)
.get(etl_name=self.etl_name)
.raw()
)
def __len__(self) -> int:
return len(self.source_datapipe)
@functional_datapipe("ais_list_sources")
class AISSourceLister(IterDataPipe[str]):
def __init__(self, ais_sources: List[AISSource], prefix="", etl_name=None):
"""
Iterable DataPipe over the full URLs for each of the provided AIS source object types
Args:
ais_sources (List[AISSource]): List of types implementing the AISSource interface: Bucket, ObjectGroup,
Object, etc.
prefix (str, optional): Filter results to only include objects with names starting with this prefix
etl_name (str, optional): Pre-existing ETL on AIS to apply to all selected objects on the cluster side
"""
_assert_aistore()
self.sources = ais_sources
self.prefix = prefix
self.etl_name = etl_name
def __getitem__(self, index) -> T_co:
raise NotImplementedError
def __iter__(self) -> Iterator[T_co]:
for source in self.sources:
for url in source.list_urls(prefix=self.prefix, etl_name=self.etl_name):
yield url
| aistore-master | python/aistore/pytorch/aisio.py |
"""
AIS Plugin for PyTorch
PyTorch Dataset and DataLoader for AIS.
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
from typing import List, Union
from torch.utils.data import Dataset
from aistore.sdk import Client
from aistore.pytorch.utils import list_objects_info
# pylint: disable=too-few-public-methods
class AISBaseClass:
"""
A base class for creating AIS Datasets for PyTorch
Args:
client_url(str): AIS endpoint URL
urls_list(str or List[str]): single or list of url prefixes to load data
"""
def __init__(self, client_url: str, urls_list: Union[str, List[str]]) -> None:
self.client = Client(client_url)
if isinstance(urls_list, str):
urls_list = [urls_list]
self._object_info = {}
self._object_info = list_objects_info(self.client, urls_list)
# pylint: disable=unused-variable
class AISDataset(AISBaseClass, Dataset):
"""
A map-style dataset for objects in AIS.
If `etl_name` is provided, that ETL must already exist on the AIStore cluster
Args:
client_url (str): AIS endpoint URL
urls_list (str or List[str]): single or list of url prefixes to load data
etl_name (str, optional): Optional etl on the AIS cluster to apply to each object
"""
def __init__(
self, client_url: str, urls_list: Union[str, List[str]], etl_name=None
):
AISBaseClass.__init__(self, client_url, urls_list)
self.etl_name = etl_name
def __len__(self):
return len(self._object_info)
def __getitem__(self, index: int):
object_name = self._object_info[index]["object"]
obj = (
self.client.bucket(
bck_name=self._object_info[index]["bck_name"],
provider=self._object_info[index]["provider"],
)
.object(obj_name=object_name)
.get(etl_name=self.etl_name)
.read_all()
)
return object_name, obj
| aistore-master | python/aistore/pytorch/dataset.py |
"""
Utils for AIS PyTorch Plugin
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
from typing import List, Mapping, Tuple
from urllib.parse import urlparse, urlunparse
from aistore.sdk import Client
def parse_url(url: str) -> Tuple[str, str, str]:
"""
Parse AIS urls for bucket and object names
Args:
url (str): Complete URL of the object (eg. "ais://bucket1/file.txt")
Returns:
provider (str): AIS Backend
bck_name (str): Bucket name identifier
obj_name (str): Object name with extension
"""
parsed_url = urlparse(url)
path = parsed_url.path
if len(path) > 0 and path.startswith("/"):
path = path[1:]
# returns provider, bck_name, path
return parsed_url.scheme, parsed_url.netloc, path
# pylint: disable=unused-variable
def list_objects_info(client: Client, urls_list: List[str]) -> List[Mapping[str, str]]:
"""
Create list of [bucket_name, object_name] from all the object urls
Args:
client (Client): AIStore client object of the calling method
urls_list (List[str]): list of urls
Returns:
List[samples](List[Mapping[str, str]]): list of {provider, bucket, path to the object}
"""
samples = []
for url in urls_list:
provider, bck_name, path = parse_url(url)
objects = client.bucket(bck_name=bck_name, provider=provider).list_objects(
prefix=path
)
for obj_info in objects.entries:
samples.append(
{"provider": provider, "bck_name": bck_name, "object": obj_info.name}
)
return samples
def unparse_url(provider: str, bck_name: str, obj_name: str) -> str:
"""
To generate URL based on provider, bck_name and object name
Args:
provider(str): Provider name ('ais', 'gcp', etc)
bck_name(str): Bucket name
obj_name(str): Object name with extension.
Returns:
unparsed_url(str): Unparsed url (complete url)
"""
return urlunparse([provider, bck_name, obj_name, "", "", ""])
| aistore-master | python/aistore/pytorch/utils.py |
from setuptools import setup, find_packages
setup(
name="pyaisloader",
version="0.0.1",
entry_points={
"console_scripts": [
"pyaisloader=pyaisloader.main:main",
],
},
packages=find_packages(include=["pyaisloader", "pyaisloader.*"]),
install_requires=[
"colorama>=0.4.6",
"humanfriendly>=10.0",
"pendulum>=2.1.2",
"tabulate>=0.9.0",
"aistore>=1.3.0",
],
)
| aistore-master | python/pyaisloader/setup.py |
import itertools
import random
import time
from aistore.sdk.const import PROVIDER_AIS
from pyaisloader.utils.bucket_utils import (
add_one_object,
bucket_exists,
bucket_obj_count,
bucket_size,
)
from pyaisloader.utils.cli_utils import (
bold,
confirm_continue,
print_caution,
print_in_progress,
print_sep,
print_success,
terminate,
underline,
)
from pyaisloader.utils.concurrency_utils import multiworker_deploy
from pyaisloader.utils.parse_utils import format_size, format_time
from pyaisloader.utils.random_utils import generate_bytes, generate_random_str
from pyaisloader.utils.stat_utils import combine_results, print_results
class BenchmarkStats:
def __init__(self):
self.total_op_bytes = 0
self.total_op_time = 0
self.total_ops = 0
self.latencies = []
self.objs_created = []
def update(self, size, latency, obj_name=None):
self.total_ops += 1
self.total_op_bytes += size
self.latencies.append(latency)
self.total_op_time += latency
if obj_name:
self.objs_created.append(obj_name)
def produce_stats(self):
self.latencies = self.latencies or [0] # To avoid division by zero
self.result = {
"ops": self.total_ops,
"bytes": self.total_op_bytes,
"time": self.total_op_time,
"throughput": self.total_op_bytes / self.total_op_time
if self.total_op_time != 0
else 0,
"latency_min": min(self.latencies),
"latency_avg": sum(self.latencies) / len(self.latencies),
"latency_max": max(self.latencies),
}
class Benchmark:
"""Abstract class for all benchmarks"""
def __init__(self, bucket, workers, cleanup):
self.bucket = bucket
self.workers = workers
self.cleanup = cleanup
self.objs_created = []
# Track for intelligent clean-up (deletes bucket if bucket was created by benchmark, otherwise only deletes objects in bucket created by benchmark)
self.bck_created = False
self.setup()
def run(self, *args, **kwargs):
raise NotImplementedError("This method should be implemented by subclasses.")
def setup(self):
if self.bucket.provider != PROVIDER_AIS: # Cloud Bucket
print_caution("You are currently operating on a cloud storage bucket.")
confirm_continue()
if not bucket_exists(
self.bucket
): # Cloud buckets that don't exist are not permitted
terminate(
"Cloud bucket "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
+ " does not exist and AIStore Python SDK does not yet support cloud bucket creation (re-run with existing cloud bucket)."
)
else:
if bucket_exists(self.bucket):
print_caution(
"The bucket "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
+ " already exists."
)
confirm_continue()
else:
print_in_progress(
"Creating bucket "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
)
self.bucket.create()
self.bck_created = True
print_success(
"Created bucket "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
)
def prepopulate(self, type_list=False):
prefix = (
"PREPOP-" + generate_random_str() + "-"
) # Each worker with unique prefix
objs_created = []
prepopulated_bytes = 0
if type_list:
for suffix in range(self.target):
_, objs_created = self.__prepopulate_h(objs_created, prefix, suffix)
else:
suffix = 0
while prepopulated_bytes < self.target:
size, objs_created = self.__prepopulate_h(objs_created, prefix, suffix)
prepopulated_bytes += size
suffix += 1
return objs_created
def __prepopulate_h(self, objs_created, prefix, suffix):
content, size = generate_bytes(self.minsize, self.maxsize)
obj = self.bucket.object(prefix + (str(suffix)))
obj.put_content(content)
objs_created.append(obj.name)
return size, objs_created
def clean_up(self, new=True):
if new:
print_in_progress("Cleaning up", "\U0001F9F9")
if not self.bck_created and not self.objs_created:
print_caution("Nothing to delete! Skipping clean-up...")
return
if self.bck_created:
msg = (
"bucket " + bold(f"{self.bucket.provider}://{self.bucket.name}") + " ? "
)
else:
msg = (
bold(f"{len(self.objs_created)}")
+ " objects created by the benchmark (and pre-population) in "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
+ " ? "
)
decision = input(
"\n"
+ "Would you like to proceed w/ deletion of "
+ msg
+ bold("(Y/N)")
+ ": "
)
if decision.lower() in ["n", "no"]:
print_caution("Skipping clean-up...")
return
if decision.lower() in ["y", "yes"]:
if self.bck_created:
self.bucket.delete()
else:
self.bucket.objects(obj_names=self.objs_created).delete()
print_success("Completed clean-up")
else:
self.clean_up(False)
class PutGetMixedBenchmark(Benchmark):
def __init__(
self,
put_pct,
minsize=None,
maxsize=None,
duration=None,
totalsize=None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.put_pct = put_pct
self.duration = duration
self.totalsize = totalsize
self.minsize = minsize
self.maxsize = maxsize
def run(self):
if self.put_pct == 100:
self.__run_put()
elif self.put_pct == 0:
if self.totalsize is not None:
self.__run_prepopulate()
self.__run_get()
else:
self.__run_mixed()
def __run_put(self):
totalsize = None if self.totalsize is None else (self.totalsize // self.workers)
print_in_progress("Performing PUT benchmark")
results = multiworker_deploy(
self, self.put_benchmark, (self.duration, totalsize)
)
print_success("Completed PUT benchmark")
result = []
for worker_result, worker_objs_created in results:
result.append(worker_result)
self.objs_created.extend(worker_objs_created)
result = combine_results(result, self.workers)
if self.cleanup:
self.clean_up()
print_sep()
print("\n" + underline(bold("Benchmark Results (100% PUT):")))
print_results(result)
def __run_get(self):
if bucket_obj_count(self.bucket) == 0:
add_one_object(self)
self.get_objs_queue = self.bucket.list_all_objects()
print_in_progress("Performing GET benchmark")
result = multiworker_deploy(self, self.get_benchmark, (self.duration,))
print_success("Completed GET benchmark")
result = combine_results(result, self.workers)
if self.cleanup:
self.clean_up()
print_sep()
print("\n" + underline(bold("Benchmark Results (100% GET):")))
print_results(result)
def __run_mixed(self):
if bucket_obj_count(self.bucket) == 0:
add_one_object(self)
print_in_progress("Performing MIXED benchmark")
result = multiworker_deploy(self, self.mixed_benchmark, (self.duration,))
print_success("Completed MIXED benchmark")
workers_objs_created = [
obj for worker_result in result for obj in worker_result[2]
]
self.objs_created.extend(workers_objs_created)
results_put = [res[0] for res in result]
results_get = [res[1] for res in result]
result_put = combine_results(results_put, self.workers)
result_get = combine_results(results_get, self.workers)
if self.cleanup:
self.clean_up()
print_sep()
print("\n" + underline(bold("Benchmark Results for PUT operations:")))
print_results(result_put)
print("\n" + underline(bold("Benchmark Results for GET operations:")))
print_results(result_get)
def __run_prepopulate(self):
print_in_progress("Starting Pre-Population")
curr_bck_size = bucket_size(self.bucket)
if curr_bck_size < self.totalsize:
self.target = ((self.totalsize) - curr_bck_size) // self.workers
result = multiworker_deploy(
self,
self.prepopulate,
(False,),
)
self.objs_created.extend(list(itertools.chain(*result)))
remaining_bytes = ((self.totalsize) - curr_bck_size) % self.workers
if remaining_bytes != 0: #
self.target = remaining_bytes
objs_created = self.prepopulate(type_list=False)
self.objs_created.extend(objs_created)
print_success("Completed Pre-Population")
else:
print(
"\nBucket "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
+ f" currently has a total size of "
+ bold(f"{format_size(curr_bck_size)}")
+ f", which already meets the specified total size of "
+ bold(f"{format_size(self.totalsize)}")
+ ". "
)
print_success("Skipped Pre-Population")
def put_benchmark(self, duration, totalsize): # Done
prefix = generate_random_str() # Each worker with unique prefix
pstats = BenchmarkStats()
if duration and totalsize: # Time/Size Based
while pstats.total_op_time < duration and pstats.total_op_bytes < totalsize:
self.__put_benchmark_h(pstats, prefix, pstats.total_ops)
elif duration: # Time Based
while pstats.total_op_time < duration:
self.__put_benchmark_h(pstats, prefix, pstats.total_ops)
elif totalsize: # Size Based
while pstats.total_op_bytes < totalsize:
size, latency, obj = self.__put_benchmark_h(
pstats, prefix, pstats.total_ops
)
pstats.objs_created.append(obj.name)
pstats.update(size, latency, obj.name)
pstats.produce_stats()
return pstats.result, pstats.objs_created
def __put_benchmark_h(self, stats, prefix, suffix): # Done
content, size = generate_bytes(self.minsize, self.maxsize)
obj = self.bucket.object(prefix + str(suffix))
op_start = time.time()
obj.put_content(content)
op_end = time.time()
latency = op_end - op_start
stats.objs_created.append(obj.name)
stats.update(size, latency, obj.name)
return obj
def get_benchmark(self, duration): # Done
gstats = BenchmarkStats()
while gstats.total_op_time < duration:
self.__get_benchmark_h(gstats, self.get_objs_queue)
gstats.produce_stats()
return gstats.result
def __get_benchmark_h(self, stats, objs): # Done
op_start = time.time()
content = self.bucket.object(random.choice(objs).name).get()
content.read_all()
op_end = time.time()
latency = op_end - op_start
stats.update(content.attributes.size, latency)
def mixed_benchmark(self, duration): # Done
prefix = generate_random_str() # Each worker with unique prefix
gstats = BenchmarkStats()
pstats = BenchmarkStats()
objs = [obj.object for obj in self.bucket.list_all_objects()]
while pstats.total_op_time + gstats.total_op_time < duration:
# Choose whether to perform a PUT or a GET operation
if random.randint(0, 100) < self.put_pct:
obj = self.__put_benchmark_h(pstats, prefix, pstats.total_ops)
objs.append(obj)
else:
self.__get_benchmark_h(gstats, objs)
gstats.produce_stats()
pstats.produce_stats()
return pstats.result, gstats.result, pstats.objs_created
class ListBenchmark(Benchmark):
def __init__(self, num_objects=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_objects = num_objects
self.minsize = 1000
self.maxsize = 1000
# Pre-Population
if self.num_objects:
print_in_progress("Pre-Populating Bucket")
curr_bck_count = bucket_obj_count(self.bucket)
if self.num_objects > curr_bck_count:
self.target = (self.num_objects - curr_bck_count) // self.workers
result = multiworker_deploy(
self,
self.prepopulate,
(True,),
)
self.objs_created.extend(list(itertools.chain(*result)))
if ((self.num_objects - curr_bck_count) % self.workers) != 0:
self.target = (self.num_objects - curr_bck_count) % self.workers
objs_created = self.prepopulate(type_list=True)
self.objs_created.extend(objs_created)
print_success("Completed Pre-Population")
else:
print(
"\nBucket "
+ bold(f"{self.bucket.provider}://{self.bucket.name}")
+ f" currently has "
+ bold(f"{curr_bck_count}")
+ f" objects, which already meets the specified total count of "
+ bold(f"{self.num_objects}")
+ ". "
)
print_success("Skipped Pre-Population")
def __print_results(self):
num_listed_objs = len(self.listed_objs)
if num_listed_objs != 0:
print_sep()
print(
"\n"
+ underline(
bold(f"Benchmark Results (LIST {num_listed_objs} Objects):")
)
)
print(
"\n"
+ "The benchmark took approximately "
+ bold(format_time(self.benchmark_time))
+ " and listed "
+ bold(str(num_listed_objs))
+ " objects.\n"
)
else:
terminate(
f"The bucket {self.bucket.provider}://{self.bucket.name} is empty. Please populate the bucket before running the benchmark or use the option --num-objects (or -n)."
)
def run(self):
self.listed_objs = []
# Start benchmark
start_time = time.time()
print_in_progress("Performing LIST benchmark")
self.listed_objs = self.bucket.list_all_objects()
print_success("Completed LIST benchmark")
end_time = time.time()
self.benchmark_time = end_time - start_time
if self.cleanup:
self.clean_up()
self.__print_results()
| aistore-master | python/pyaisloader/pyaisloader/benchmark.py |
aistore-master | python/pyaisloader/pyaisloader/__init__.py |
|
import os
from aistore import Client
ENDPOINT = os.environ["AIS_ENDPOINT"]
client = Client(ENDPOINT)
| aistore-master | python/pyaisloader/pyaisloader/client_config.py |
import argparse
import pkg_resources
from pyaisloader.benchmark import PutGetMixedBenchmark, ListBenchmark
from pyaisloader.const import PROVIDERS
from pyaisloader.client_config import client
from pyaisloader.utils.parse_utils import parse_size, parse_time
from pyaisloader.utils.print_utils import bold
VERSION = pkg_resources.require("pyaisloader")[0].version
def prepend_default_arguments(parser):
parser.add_argument(
"-b",
"--bucket",
type=str,
required=True,
help="Bucket (e.g. ais://mybck, s3://mybck, gs://mybck)",
)
return parser
def append_default_arguments(parser):
parser.add_argument(
"-c",
"--cleanup",
action="store_true",
default=False,
help="Whether bucket should be destroyed or not upon benchmark completion",
)
parser.add_argument(
"-w", "--workers", type=int, required=True, help="Number of workers"
)
return parser
def main():
"""Parses the command line arguments and instantiates the correct benchmark."""
parser = argparse.ArgumentParser(description="CLI for running benchmarks.")
parser.add_argument(
"--version",
action="version",
version=f"pyaisloader {VERSION}",
help="Show version number and exit",
)
subparsers = parser.add_subparsers(
dest="type",
title="types",
description=(
'Choose a benchmark type. Type "PUT -h", "GET -h", '
'"MIXED -h", or "LIST -h" for more information about the specific benchmark.'
),
)
put_parser = subparsers.add_parser(
"PUT",
aliases=["put", "P", "p"],
help="100% PUT benchmark",
description="This command runs a 100% PUT benchmark.",
)
get_parser = subparsers.add_parser(
"GET",
aliases=["get", "G", "g"],
help="100% GET benchmark",
description="This command runs a 100% GET benchmark.",
)
mixed_parser = subparsers.add_parser(
"MIXED",
aliases=["mixed", "M", "m"],
help="MIXED benchmark",
description="This command runs a MIXED benchmark, with a customizable balance of PUT and GET operations.",
)
list_parser = subparsers.add_parser(
"LIST",
aliases=["list", "L", "l"],
help="LIST objects benchmark",
description="This command runs a LIST benchmark.",
)
put_parser = prepend_default_arguments(put_parser)
get_parser = prepend_default_arguments(get_parser)
mixed_parser = prepend_default_arguments(mixed_parser)
list_parser = prepend_default_arguments(list_parser)
put_parser.add_argument(
"-min",
"--minsize",
type=parse_size,
required=True,
help="Minimum size of objects to be PUT in bucket during the benchmark",
)
put_parser.add_argument(
"-max",
"--maxsize",
type=parse_size,
required=True,
help="Maximum size of objects to be PUT in bucket during the benchmark",
)
put_parser.add_argument(
"-s",
"--totalsize",
type=parse_size,
required=False,
help=(
"Total size to PUT during the benchmark "
"(if duration is not satisfied first)"
),
)
put_parser.add_argument(
"-d",
"--duration",
type=parse_time,
required=False,
help="Duration for which benchmark should be run",
)
get_parser.add_argument(
"-min",
"--minsize",
type=parse_size,
required=False,
help="Minimum size of objects to be PUT in bucket (if bucket is smaller than total size)",
)
get_parser.add_argument(
"-max",
"--maxsize",
type=parse_size,
required=False,
help="Maximum size of objects to be PUT in bucket (if bucket is smaller than total size)",
)
get_parser.add_argument(
"-s",
"--totalsize",
type=parse_size,
required=False,
help="Total size to which the bucket should be filled prior to start",
)
get_parser.add_argument(
"-d",
"--duration",
type=parse_time,
required=True,
help="Duration for which benchmark should be run",
)
mixed_parser.add_argument(
"-p",
"--putpct",
type=int,
default=50,
help="Percentage for PUT operations in MIXED benchmark",
)
mixed_parser.add_argument(
"-min",
"--minsize",
type=parse_size,
required=True,
help=("Minimum size of objects to be PUT in bucket during the benchmark "),
)
mixed_parser.add_argument(
"-max",
"--maxsize",
type=parse_size,
required=True,
help=("Maximum size of objects to be PUT in bucket during the benchmark "),
)
mixed_parser.add_argument(
"-d",
"--duration",
type=parse_time,
required=True,
help="Duration for which benchmark should be run",
)
list_parser.add_argument(
"-o",
"--objects",
type=int,
help="Number of objects bucket should contain prior to benchmark start",
)
put_parser = append_default_arguments(put_parser)
get_parser = append_default_arguments(get_parser)
mixed_parser = append_default_arguments(mixed_parser)
list_parser = append_default_arguments(list_parser)
args = parser.parse_args()
if args.type is None:
print(
f"\nWelcome to {bold('pyaisloader')}, a CLI for running benchmarks that leverage the AIStore Python SDK. \n\n"
"Available benchmark types include: PUT, GET, MIXED, and LIST. \n\n"
"For more details about each benchmark type, use 'pyaisloader [benchmark_type] -h' \nor 'pyaisloader [benchmark_type] --help' "
"(e.g. for more information about the PUT \nbenchmark, run 'pyaisloader PUT -h' or 'pyaisloader PUT --help').\n"
)
return
# Require that PUT benchmark specifies at least one of --totalsize or --duration
if args.type.lower() in ["put", "p"]:
if args.totalsize is None and args.duration is None:
parser.error("At least one of --totalsize or --duration must be provided.")
if args.type.lower() in ["get", "g"]:
if args.totalsize:
if args.minsize is None or args.maxsize is None:
parser.error(
"If pre-populating bucket, --totalsize, --minsize, and --maxsize are all required."
)
# Instantiate client and bucket object
provider, bck_name = args.bucket.split("://")
bucket = client.bucket(bck_name, provider=PROVIDERS[provider])
benchmark_type = args.type.lower()
if benchmark_type in ["put", "get", "mixed", "p", "g", "m"]:
if benchmark_type in ["put", "p"]:
benchmark = PutGetMixedBenchmark(
put_pct=100,
minsize=args.minsize,
maxsize=args.maxsize,
duration=args.duration,
totalsize=args.totalsize,
bucket=bucket,
workers=args.workers,
cleanup=args.cleanup,
)
elif benchmark_type in ["get", "g"]:
benchmark = PutGetMixedBenchmark(
put_pct=0,
minsize=args.minsize,
maxsize=args.maxsize,
duration=args.duration,
totalsize=args.totalsize,
bucket=bucket,
workers=args.workers,
cleanup=args.cleanup,
)
else:
benchmark = PutGetMixedBenchmark(
put_pct=args.putpct,
minsize=args.minsize,
maxsize=args.maxsize,
duration=args.duration,
bucket=bucket,
workers=args.workers,
cleanup=args.cleanup,
)
benchmark.run()
elif benchmark_type in ["list", "l"]:
benchmark = ListBenchmark(
num_objects=args.objects,
bucket=bucket,
workers=args.workers,
cleanup=args.cleanup,
)
benchmark.run()
if __name__ == "__main__":
main()
| aistore-master | python/pyaisloader/pyaisloader/main.py |
from aistore.sdk.const import PROVIDER_AIS, PROVIDER_AMAZON, PROVIDER_GOOGLE
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
PROVIDERS = {
"ais": PROVIDER_AIS,
"aws": PROVIDER_AMAZON,
"gcp": PROVIDER_GOOGLE,
"gs": PROVIDER_GOOGLE,
"s3": PROVIDER_AMAZON,
}
BOOLEAN_VALUES = {"false": False, "f": False, "true": True, "t": True}
| aistore-master | python/pyaisloader/pyaisloader/const.py |
import sys
from tabulate import tabulate
from colorama import Back, Fore, Style
from pyaisloader.const import BOLD, END, UNDERLINE
from pyaisloader.utils.parse_utils import format_size, format_time
def bold(msg):
return f"{BOLD}{msg}{END}"
def underline(msg):
return f"{UNDERLINE}{msg}{END}"
def print_sep():
print("\n" + "=" * 101)
def print_in_progress(msg, icon="\U0001F552"):
print(
"\n"
+ Back.LIGHTBLACK_EX
+ Fore.BLACK
+ bold("IN PROGRESS")
+ Style.RESET_ALL
+ bold(": ")
+ msg
+ f" {icon}"
)
def print_caution(msg):
print(
"\n"
+ Back.LIGHTYELLOW_EX
+ Fore.BLACK
+ bold("CAUTION")
+ Style.RESET_ALL
+ bold(": ")
+ msg
)
def print_success(msg):
print(
"\n"
+ Back.LIGHTGREEN_EX
+ Fore.BLACK
+ bold("SUCCESS")
+ Style.RESET_ALL
+ bold(": ")
+ msg
+ Fore.GREEN
+ " \u2714"
+ Style.RESET_ALL
)
def print_results(result):
headers_values = [
("# Ops Completed", result["ops"]),
("Total Size", format_size(result["bytes"])),
("Throughput", f"{format_size(result['throughput'])}/s"),
("Latency Min", format_time(result["latency_min"])),
("Latency Avg", format_time(result["latency_avg"])),
("Latency Max", format_time(result["latency_max"])),
]
table = [
[f"{BOLD}{name}{END}" for name, _ in headers_values],
[value for _, value in headers_values],
]
print("\n" + tabulate(table, tablefmt="simple_grid") + "\n")
def confirm_continue():
decision = input("\n" + "Would you like to proceed? " + bold("(Y/N)") + ": ")
if decision.lower() in ["n", "no"]:
terminate()
elif decision.lower() in ["y", "yes"]:
return
else:
confirm_continue()
def terminate(msg=None):
if msg:
print(
"\n"
+ Back.LIGHTRED_EX
+ Fore.BLACK
+ bold("TERMINATING")
+ Style.RESET_ALL
+ bold(": ")
+ msg
+ "\n"
)
sys.exit()
else:
print(
"\n"
+ Back.LIGHTRED_EX
+ Fore.BLACK
+ bold("TERMINATING")
+ Style.RESET_ALL
+ bold("...")
+ "\n"
)
sys.exit()
| aistore-master | python/pyaisloader/pyaisloader/utils/cli_utils.py |
import os
import random
import string
def generate_random_str():
chars = string.ascii_letters + string.digits
return "".join(random.choice(chars) for _ in range(5))
def generate_bytes(min_size, max_size):
size = random.randint(min_size, max_size)
content = os.urandom(size)
return content, size
| aistore-master | python/pyaisloader/pyaisloader/utils/random_utils.py |
from colorama import Back, Fore, Style
from pyaisloader.const import BOLD, END, UNDERLINE
from pyaisloader.utils.cli_utils import terminate
def bold(msg):
return f"{BOLD}{msg}{END}"
def underline(msg):
return f"{UNDERLINE}{msg}{END}"
def print_sep():
print("\n" + "=" * 101)
def confirm_continue():
decision = input("\n" + "Would you like to proceed? " + bold("(Y/N)") + ": ")
if decision.lower() in ["n", "no"]:
terminate()
elif decision.lower() in ["y", "yes"]:
return
else:
confirm_continue()
def print_in_progress(msg, icon="\U0001F552"):
print(
"\n"
+ Back.LIGHTBLACK_EX
+ Fore.BLACK
+ bold("IN PROGRESS")
+ Style.RESET_ALL
+ bold(": ")
+ msg
+ f" {icon}"
)
def print_caution(msg):
print(
"\n"
+ Back.LIGHTYELLOW_EX
+ Fore.BLACK
+ bold("CAUTION")
+ Style.RESET_ALL
+ bold(": ")
+ msg
)
def print_success(msg):
print(
"\n"
+ Back.LIGHTGREEN_EX
+ Fore.BLACK
+ bold("SUCCESS")
+ Style.RESET_ALL
+ bold(": ")
+ msg
+ Fore.GREEN
+ " \u2714"
+ Style.RESET_ALL
)
| aistore-master | python/pyaisloader/pyaisloader/utils/print_utils.py |
import humanfriendly
import pendulum
from pyaisloader.const import BOOLEAN_VALUES
def parse_time(time_str):
return humanfriendly.parse_timespan(time_str)
def parse_size(size_str):
return humanfriendly.parse_size(size_str)
def format_time(duration):
d = pendulum.duration(seconds=duration)
if d.minutes > 0:
return d.in_words()
if d.seconds > 0:
return f"{d.seconds} seconds"
if d.microseconds > 0:
if d.microseconds >= 1e6:
return f"{d.microseconds/1e6} seconds"
if d.microseconds >= 1e3:
return f"{d.microseconds/1e3} milliseconds"
return f"{d.microseconds} microseconds"
return "0 seconds"
def format_size(byte_count):
return humanfriendly.format_size(byte_count)
| aistore-master | python/pyaisloader/pyaisloader/utils/parse_utils.py |
from requests.exceptions import HTTPError
from pyaisloader.utils.print_utils import (
print_caution,
print_in_progress,
print_success,
)
from pyaisloader.utils.random_utils import generate_bytes
def bucket_exists(bucket):
try:
bucket.head()
return True
except HTTPError:
return False
def bucket_size(bucket):
_, bsumm = bucket.info()
present_size = int(bsumm["TotalSize"]["size_all_present_objs"])
remote_size = int(bsumm["TotalSize"]["size_all_remote_objs"])
if remote_size > 0:
return remote_size
else:
return present_size
def bucket_obj_count(bucket):
_, bsumm = bucket.info()
return int(bsumm["ObjCount"]["obj_count_present"]) + int(
bsumm["ObjCount"]["obj_count_remote"]
)
def cleanup(benchmark):
benchmark.bucket.objects(obj_names=benchmark.objs_created).delete()
def add_one_object(benchmark):
print_caution("Bucket is empty!")
print_in_progress("Adding one object")
content, _ = generate_bytes(1000, 1000)
obj_name = "initial-object"
benchmark.bucket.object(obj_name).put_content(content)
benchmark.objs_created.append(obj_name)
print_success("Added one object")
| aistore-master | python/pyaisloader/pyaisloader/utils/bucket_utils.py |
aistore-master | python/pyaisloader/pyaisloader/utils/__init__.py |
|
import concurrent.futures
from requests.exceptions import HTTPError
def bucket_exists(bucket):
try:
bucket.head()
return True
except HTTPError:
return False
def bucket_size(bck):
return int(bck.summary()["TotalSize"]["size_on_disk"])
def bucket_obj_count(bck):
summary = bck.summary()
return int(summary["ObjCount"]["obj_count_present"]) + int(
summary["ObjCount"]["obj_count_remote"]
)
def cleanup(benchmark):
benchmark.bucket.objects(obj_names=benchmark.objs_created).delete()
def multiworker_deploy(benchmark, worker_function, worker_args=None):
with concurrent.futures.ProcessPoolExecutor(
max_workers=benchmark.workers
) as executor:
# Prepare a list of argument tuples for the workers.
worker_args = [worker_args for _ in range(benchmark.workers)]
result = list(executor.map(worker_function, *zip(*worker_args)))
return result
| aistore-master | python/pyaisloader/pyaisloader/utils/concurrency_utils.py |
from tabulate import tabulate
from pyaisloader.const import BOLD, END
from pyaisloader.utils.parse_utils import format_size, format_time
def combine_results(results, num_workers):
result = {
"ops": sum(r["ops"] for r in results),
"bytes": sum(r["bytes"] for r in results),
"time": sum(r["time"] for r in results),
"throughput": sum(r["throughput"] for r in results),
"latency_min": min(r["latency_min"] for r in results),
"latency_avg": sum(r["latency_avg"] for r in results) / num_workers,
"latency_max": max(r["latency_max"] for r in results),
}
return result
def print_results(result):
headers_values = [
("# Ops Completed", result["ops"]),
("Total Size", format_size(result["bytes"])),
("Throughput", f"{format_size(result['throughput'])}/s"),
("Latency Min", format_time(result["latency_min"])),
("Latency Avg", format_time(result["latency_avg"])),
("Latency Max", format_time(result["latency_max"])),
]
table = [
[f"{BOLD}{name}{END}" for name, _ in headers_values],
[value for _, value in headers_values],
]
print("\n" + tabulate(table, tablefmt="simple_grid") + "\n")
| aistore-master | python/pyaisloader/pyaisloader/utils/stat_utils.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring
import io
import logging
import unittest
import boto3
from moto import mock_s3
from botocore.exceptions import ClientError
from aistore.sdk.const import UTF_ENCODING
from tests import (
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
AWS_SESSION_TOKEN,
AWS_DEFAULT_REGION,
)
from tests.utils import random_string
from tests.unit.botocore_patch import mock_s3_redirect
# pylint: disable=too-many-instance-attributes,missing-function-docstring,invalid-name,unused-variable
class BotocoreBaseTest(unittest.TestCase):
"""
Common test group for the botocore monkey patch;
Runs a small set of S3 operations.
We run this over and over, varying whether redirects
are issued, and whether our monkey patch is loaded
to handle them.
If botocore has been monkeypatched, it should
not get upset when redirected.
If botocore has not, it should get upset every time.
For units, we use moto to mock an S3 instance; to
control whether redirects are issued we use a
decorator (see mock_s3_redirect.py).
To control botocore's expected behavior we use the
redirect_errors_expected property.
"""
__test__ = False
mock_s3 = mock_s3()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.enable_redirects = False
self.redirect_errors_expected = False
# Use moto to mock S3 by default.
self.use_moto = True
# AIstore endpoint URL to use iff we're not using moto.
self.endpoint_url = kwargs.get("endpoint_url", "http://localhost:8080/s3")
def setUp(self):
self.control_bucket = random_string()
self.control_object = random_string()
self.another_bucket = random_string()
self.another_object = random_string()
if self.use_moto:
logging.debug("Using moto for S3 services")
# Disable any redirections until we're ready.
mock_s3_redirect.redirections_enabled = False
self.mock_s3.start()
self.s3 = boto3.client(
"s3", region_name="us-east-1"
) # pylint: disable=invalid-name
else:
logging.debug("Using aistore for S3 services")
self.s3 = boto3.client(
"s3",
region_name=AWS_DEFAULT_REGION,
endpoint_url=self.endpoint_url,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
aws_session_token=AWS_SESSION_TOKEN,
)
self.s3.create_bucket(Bucket=self.control_bucket)
self.s3.upload_fileobj(
io.BytesIO(b"Hello, world!"), self.control_bucket, self.control_object
)
if self.use_moto:
# Enable redirections if we've been asked to do so.
mock_s3_redirect.redirections_enabled = self.enable_redirects
def tearDown(self):
if self.use_moto:
self.mock_s3.stop()
else:
try:
self.s3.delete_object(
Bucket=self.control_bucket, Key=self.control_object
)
except Exception:
pass
try:
self.s3.delete_bucket(Bucket=self.control_bucket)
except Exception:
pass
try:
self.s3.delete_object(
Bucket=self.control_bucket, Key=self.another_object
)
except Exception:
pass
try:
self.s3.delete_bucket(Bucket=self.another_bucket)
except Exception:
pass
def test_bucket_create(self):
# When integration testing against a real aistore, this won't redirect.
redirect_errors_expected = (
False if not self.use_moto else self.redirect_errors_expected
)
with MightRedirect(redirect_errors_expected, operation="_bucket_response_put"):
logging.warning("Creating bucket %s", self.another_bucket)
self.s3.create_bucket(Bucket=self.another_bucket)
def test_bucket_list(self):
# Our redirect mock can't intercept bucket listing operations;
# so, always expect success
self.assertIn(
self.control_bucket, [b["Name"] for b in self.s3.list_buckets()["Buckets"]]
)
def test_object_create(self):
with MightRedirect(self.redirect_errors_expected):
self.s3.upload_fileobj(
io.BytesIO(b"Hello, world!"), self.control_bucket, self.another_object
)
def test_object_list(self):
# Our redirect mock can't intercept object listing operations;
# so, always expect success
self.assertEqual(
[
b["Key"]
for b in self.s3.list_objects(Bucket=self.control_bucket)["Contents"]
],
[self.control_object],
)
def test_object_get(self):
with MightRedirect(self.redirect_errors_expected):
stream_str = io.BytesIO()
self.s3.download_fileobj(
self.control_bucket, self.control_object, stream_str
)
self.assertEqual(
stream_str.getvalue().decode(UTF_ENCODING), "Hello, world!"
)
def test_caching(self):
with MightRedirect(self.redirect_errors_expected):
stream_str = io.BytesIO()
self.s3.download_fileobj(
self.control_bucket, self.control_object, stream_str
)
self.assertEqual(
stream_str.getvalue().decode(UTF_ENCODING), "Hello, world!"
)
self.s3.download_fileobj(
self.control_bucket, self.control_object, stream_str
)
self.assertEqual(
stream_str.getvalue().decode(UTF_ENCODING), "Hello, world!"
)
def test_object_delete(self):
with MightRedirect(self.redirect_errors_expected):
self.s3.delete_object(Bucket=self.control_bucket, Key=self.control_object)
def test_bucket_delete(self):
with MightRedirect(self.redirect_errors_expected):
self.s3.delete_object(Bucket=self.control_bucket, Key=self.control_object)
self.s3.delete_bucket(Bucket=self.control_bucket)
class MightRedirect:
"""
Context manager to handle botocore errors.
Some test sets expect botocore to issue errors
when it encounters redirects. Others expect
the opposite.
This allows us to control the expected behavior.
"""
max_retries = 3
def __init__(self, redirect_errors_expected=False, operation=None):
self.redirect_errors_expected = redirect_errors_expected
self.operation = operation
def __enter__(self):
return self
def __exit__(self, exc, value, traceback):
if self.redirect_errors_expected:
try:
if exc and value:
raise value
except ClientError as ex:
# Some operations don't pass through redirect errors directly
if self.operation in ["_bucket_response_put"]:
return True
if int(ex.response["Error"]["Code"]) in [302, 307]:
return True
instead = "No error"
if value:
instead = value
raise Exception(
"A ClientError with a redirect code was expected, "
+ "but didn't happen. Instead: "
+ instead
)
return False
| aistore-master | python/tests/botocore_common.py |
import os
AWS_SESSION_TOKEN = os.environ.get("AWS_SESSION_TOKEN", "testing")
AWS_DEFAULT_REGION = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "testing")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "testing")
AWS_SECURITY_TOKEN = os.environ.get("AWS_SECURITY_TOKEN", "testing")
| aistore-master | python/tests/__init__.py |
import os
import random
import shutil
import string
import tempfile
from pathlib import Path
from aistore.sdk import Client
from aistore.sdk.const import UTF_ENCODING
from aistore.sdk.errors import ErrBckNotFound
# pylint: disable=unused-variable
def random_string(length: int = 10):
return "".join(random.choices(string.ascii_lowercase, k=length))
# pylint: disable=unused-variable
def create_and_put_object(
client: Client,
bck_name: str,
obj_name: str,
provider: str = "ais",
obj_size: int = 0,
):
obj_size = obj_size if obj_size else random.randrange(10, 20)
obj_body = "".join(random.choices(string.ascii_letters, k=obj_size))
content = obj_body.encode(UTF_ENCODING)
temp_file = Path(tempfile.gettempdir()).joinpath(os.urandom(24).hex())
with open(temp_file, "wb") as file:
file.write(content)
file.flush()
client.bucket(bck_name, provider=provider).object(obj_name).put_file(file.name)
return content
def destroy_bucket(client: Client, bck_name: str):
try:
client.bucket(bck_name).delete()
except ErrBckNotFound:
pass
def cleanup_local(path: str):
try:
shutil.rmtree(path)
except FileNotFoundError:
pass
def create_and_put_objects(client, bucket, prefix, suffix, num_obj):
obj_names = [prefix + str(i) + suffix for i in range(num_obj)]
for obj_name in obj_names:
create_and_put_object(
client,
bck_name=bucket.name,
provider=bucket.provider,
obj_name=obj_name,
)
return obj_names
| aistore-master | python/tests/utils.py |
aistore-master | python/tests/unit/__init__.py |
|
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring,import-outside-toplevel,unused-import
from tests.botocore_common import BotocoreBaseTest
# pylint: disable=unused-variable
class UnpatchedTestCase(BotocoreBaseTest):
"""
Our control case.
When botocore is unpatched, and S3 issues no redirects,
we should not see any client errors.
"""
__test__ = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.enable_redirects = False
self.redirect_errors_expected = False
| aistore-master | python/tests/unit/botocore_patch/test_botocore_noredirects_unpatched.py |
aistore-master | python/tests/unit/botocore_patch/__init__.py |
|
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring
import logging
import wrapt
# Patch moto - an S3 stubbing library - to
# issue redirects for us.
# When running tests, fake HTTP redirects
# for the following.
#
# Not all response operatons found in moto's
# S3Response are found below; they don't all
# provide us with sufficient information to
# patch successfully.
test_ops = [
"_bucket_response_put",
"_bucket_response_post",
"_bucket_response_delete_keys",
"_key_response_get",
"_key_response_put",
"_key_response_delete",
"_key_response_post",
]
redirected_ops = test_ops
redirections_enabled = True # pylint: disable=invalid-name
def s3_response_wrapper(
wrapped, instance, args, kwargs
): # pylint: disable=unused-argument
"""
Patch various internal S3Response methods in moto to issue
redirects.
args and kwargs are somewhat inconsistent and vary per HTTP method.
in particular, HEAD doesn't have enough scrapable context for us to
redirect, so we don't bother.
"""
url = None
operation = wrapped.__name__
should_redirect = operation in redirected_ops and redirections_enabled
ret = wrapped(*args, **kwargs)
if not should_redirect:
return ret
logging.debug("s3_response_wrapper: intercepted %s", wrapped.__name__)
method = None
try:
method = operation.split("_").pop().upper()
if method == "GET":
bucket = args[0]
url = f"https://s3.amazonaws.com/{bucket}"
key = args[-1]
if isinstance(key, str):
url += f"/{key}"
attempt = int(
kwargs["headers"]["amz-sdk-request"].split(";")[0].split("=")[1]
)
elif method == "PUT":
url = args[0].__dict__["url"]
attempt = int(
args[0]
.__dict__["headers"]["amz-sdk-request"]
.split(";")[0]
.split("=")[1]
)
elif method == "DELETE":
bucket = args[1]
url = f"https://s3.amazonaws.com/{bucket}"
key = args[-1]
if isinstance(key, str):
url += f"/{key}"
attempt = int(args[0]["amz-sdk-request"].split(";")[0].split("=")[1])
elif method == "HEAD":
url = None
attempt = None
elif method == "POST":
url = None
attempt = None
logging.debug(
"s3_response_wrapper: parsed operation %s, method %s, url %s, attempt %s",
operation,
method,
url,
attempt,
)
except Exception: # pylint: disable=broad-except
pass
logging.debug("s3_response_wrapper: redirecting operation %s", operation)
if attempt < 5:
ret = list(ret)
ret[0] = 307
ret[1]["Location"] = url + "?andthenanotherthing"
ret[1]["Server"] = "AIStore"
ret = tuple(ret)
return ret
@wrapt.when_imported("moto.s3.responses")
def patch_moto(module): # pylint: disable=unused-variable
"""
Meta-mock our moto mocks to make them send redirects on occasion.
Bucket delete won't play nicely. Others are more helpful.
"""
for func in test_ops:
logging.debug("Patching S3Response.%s", func)
wrapt.wrap_function_wrapper(module, "S3Response." + func, s3_response_wrapper)
| aistore-master | python/tests/unit/botocore_patch/mock_s3_redirect.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring,import-outside-toplevel,unused-import
from tests.botocore_common import BotocoreBaseTest
# pylint: disable=unused-variable
class PatchedRedirectingTestCase(BotocoreBaseTest):
"""
This directly tests our monkey patch.
When botocore is patched, and S3 issues redirects,
we should not see any client errors.
"""
__test__ = True
def __init__(self, *args, **kwargs):
from aistore.botocore_patch import botocore
super().__init__(*args, **kwargs)
self.enable_redirects = True
self.redirect_errors_expected = False
| aistore-master | python/tests/unit/botocore_patch/test_botocore_redirects_patched.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring,import-outside-toplevel,unused-import
from tests.botocore_common import BotocoreBaseTest
# pylint: disable=unused-variable
class UnpatchedRedirectingTestCase(BotocoreBaseTest):
"""
Another control case.
When botocore is unpatched, and S3 issues redirects,
we should see some client errors.
"""
__test__ = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.enable_redirects = True
self.redirect_errors_expected = True
| aistore-master | python/tests/unit/botocore_patch/test_botocore_redirects_unpatched.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring,import-outside-toplevel,unused-import
from tests.botocore_common import BotocoreBaseTest
# pylint: disable=unused-variable
class PatchedTestCase(BotocoreBaseTest):
"""
A passthrough test to check we're not breaking botocore
simply by being imported.
When botocore is patched, and S3 issues no redirects,
we should not see any client errors.
"""
__test__ = True
def __init__(self, *args, **kwargs):
from aistore.botocore_patch import botocore
super().__init__(*args, **kwargs)
self.enable_redirects = False
self.redirect_errors_expected = False
| aistore-master | python/tests/unit/botocore_patch/test_botocore_noredirects_patched.py |
import unittest
from typing import Dict, List
from unittest.mock import Mock, patch, call
from aistore.sdk.const import (
QPARAM_WHAT,
QPARAM_FORCE,
HTTP_METHOD_GET,
HTTP_METHOD_PUT,
WHAT_ONE_XACT_STATUS,
URL_PATH_CLUSTER,
ACT_START,
WHAT_QUERY_XACT_STATS,
)
from aistore.sdk.errors import Timeout
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import JobStatus, JobArgs, BucketModel, ActionMsg, JobSnapshot
from aistore.sdk.utils import probing_frequency
from aistore.sdk.job import Job
# pylint: disable=unused-variable
class TestJob(unittest.TestCase):
def setUp(self):
self.mock_client = Mock()
self.job_id = "1234"
self.job_kind = "test job"
self.default_job = Job(self.mock_client)
self.job = Job(self.mock_client, self.job_id, self.job_kind)
def test_properties(self):
self.assertEqual(self.job_id, self.job.job_id)
self.assertEqual(self.job_kind, self.job.job_kind)
def test_job_status(self):
expected_request_val = JobArgs(id=self.job_id, kind=self.job_kind).as_dict()
returned_status = JobStatus()
self.mock_client.request_deserialize.return_value = returned_status
res = self.job.status()
self.assertEqual(returned_status, res)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
res_model=JobStatus,
json=expected_request_val,
params={QPARAM_WHAT: WHAT_ONE_XACT_STATUS},
)
def test_job_status_no_id(self):
job_no_id = Job(self.mock_client)
with self.assertRaises(ValueError):
job_no_id.status()
@patch("aistore.sdk.job.time.sleep")
@patch("aistore.sdk.job.Job.status")
def test_wait_default_params(self, mock_status, mock_sleep):
timeout = 300
frequency = probing_frequency(timeout)
expected_status_calls = [
call(),
call(),
call(),
]
expected_sleep_calls = [call(frequency), call(frequency)]
self.wait_exec_assert(
self.default_job,
mock_status,
mock_sleep,
expected_status_calls,
expected_sleep_calls,
)
@patch("aistore.sdk.job.time.sleep")
@patch("aistore.sdk.job.Job.status")
def test_wait(self, mock_status, mock_sleep):
timeout = 20
frequency = probing_frequency(timeout)
expected_status_calls = [call(), call(), call()]
expected_sleep_calls = [call(frequency), call(frequency)]
self.wait_exec_assert(
self.job,
mock_status,
mock_sleep,
expected_status_calls,
expected_sleep_calls,
timeout=timeout,
)
@patch("aistore.sdk.job.time.sleep")
@patch("aistore.sdk.job.Job.status")
# pylint: disable=unused-argument
def test_wait_timeout(self, mock_status, mock_sleep):
mock_status.return_value = JobStatus(end_time=0)
self.assertRaises(Timeout, self.job.wait)
# pylint: disable=too-many-arguments
def wait_exec_assert(
self,
job,
mock_status,
mock_sleep,
expected_status_calls,
expected_sleep_calls,
**kwargs
):
mock_status.side_effect = [
JobStatus(end_time=0),
JobStatus(end_time=0),
JobStatus(end_time=1),
]
job.wait(**kwargs)
mock_status.assert_has_calls(expected_status_calls)
mock_sleep.assert_has_calls(expected_sleep_calls)
self.assertEqual(3, mock_status.call_count)
self.assertEqual(2, mock_sleep.call_count)
@patch("aistore.sdk.job.time.sleep")
def test_wait_for_idle(self, mock_sleep):
snap_other_job_idle = JobSnapshot(id="other_id", is_idle=True)
snap_job_running = JobSnapshot(id=self.job_id, is_idle=False)
snap_job_idle = JobSnapshot(id=self.job_id, is_idle=True)
self.mock_client.request_deserialize.side_effect = [
{"d1": [snap_other_job_idle, snap_job_running], "d2": [snap_job_running]},
{"d1": [snap_job_running], "d2": [snap_job_idle]},
{"d1": [snap_job_idle], "d2": [snap_job_idle]},
]
timeout = 20
frequency = probing_frequency(timeout)
expected_request_val = JobArgs(id=self.job_id, kind=self.job_kind).as_dict()
expected_request_params = {QPARAM_WHAT: WHAT_QUERY_XACT_STATS}
expected_call = call(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
json=expected_request_val,
params=expected_request_params,
res_model=Dict[str, List[JobSnapshot]],
)
expected_client_requests = [expected_call for _ in range(2)]
expected_sleep_calls = [call(frequency), call(frequency)]
self.job.wait_for_idle(timeout=timeout)
self.mock_client.request_deserialize.assert_has_calls(expected_client_requests)
mock_sleep.assert_has_calls(expected_sleep_calls)
self.assertEqual(3, self.mock_client.request_deserialize.call_count)
self.assertEqual(2, mock_sleep.call_count)
@patch("aistore.sdk.job.time.sleep")
# pylint: disable=unused-argument
def test_wait_for_idle_timeout(self, mock_sleep):
res = {
"d1": [JobSnapshot(id=self.job_id, is_idle=True)],
"d2": [JobSnapshot(id=self.job_id, is_idle=False)],
}
self.mock_client.request_deserialize.return_value = res
self.assertRaises(Timeout, self.job.wait_for_idle)
@patch("aistore.sdk.job.time.sleep")
# pylint: disable=unused-argument
def test_wait_for_idle_no_snapshots(self, mock_sleep):
self.mock_client.request_deserialize.return_value = {}
with self.assertRaises(Timeout) as exc:
self.job.wait_for_idle()
self.assertEqual(
"Timed out while waiting for job '1234' to reach idle state. No job information found.",
str(exc.exception.args[0]),
)
@patch("aistore.sdk.job.time.sleep")
# pylint: disable=unused-argument
def test_wait_for_idle_no_job_in_snapshots(self, mock_sleep):
res = {
"d1": [JobSnapshot(id="1"), JobSnapshot(id="2")],
"d2": [JobSnapshot(id="2")],
}
self.mock_client.request_deserialize.return_value = res
with self.assertRaises(Timeout) as exc:
self.job.wait_for_idle()
self.assertEqual(
"Timed out while waiting for job '1234' to reach idle state. No information found for job 1234.",
str(exc.exception.args[0]),
)
def test_job_start_single_bucket(self):
daemon_id = "daemon id"
bucket = BucketModel(client=Mock(RequestClient), name="single bucket")
expected_json = JobArgs(
kind=self.job_kind, daemon_id=daemon_id, bucket=bucket
).as_dict()
self.job_start_exec_assert(
self.job,
expected_json,
{QPARAM_FORCE: "true"},
daemon_id=daemon_id,
force=True,
buckets=[bucket],
)
def test_job_start_bucket_list(self):
daemon_id = "daemon id"
buckets = [
BucketModel(client=Mock(RequestClient), name="first bucket"),
BucketModel(client=Mock(RequestClient), name="second bucket"),
]
expected_json = JobArgs(
kind=self.job_kind, daemon_id=daemon_id, buckets=buckets
).as_dict()
self.job_start_exec_assert(
self.job,
expected_json,
{QPARAM_FORCE: "true"},
daemon_id=daemon_id,
force=True,
buckets=buckets,
)
def test_job_start_default_params(self):
expected_act_value = JobArgs().as_dict()
self.job_start_exec_assert(self.default_job, expected_act_value, {})
def job_start_exec_assert(self, job, expected_json, expected_params, **kwargs):
expected_action = ActionMsg(action=ACT_START, value=expected_json).dict()
response_txt = "response"
response = Mock()
response.text = response_txt
self.mock_client.request.return_value = response
res = job.start(**kwargs)
self.assertEqual(response_txt, res)
self.mock_client.request.assert_called_with(
HTTP_METHOD_PUT,
path=URL_PATH_CLUSTER,
json=expected_action,
params=expected_params,
)
| aistore-master | python/tests/unit/sdk/test_job.py |
import json
import unittest
from unittest.mock import Mock, patch, mock_open
from msgspec import msgpack
from requests import Response
from aistore.sdk import utils
from aistore.sdk.const import MSGPACK_CONTENT_TYPE, HEADER_CONTENT_TYPE
from aistore.sdk.errors import (
AISError,
ErrRemoteBckNotFound,
ErrBckNotFound,
ErrBckAlreadyExists,
ErrETLAlreadyExists,
)
def test_cases(*args):
def decorator(func):
def wrapper(self, *inner_args, **kwargs):
for arg in args:
with self.subTest(arg=arg):
func(self, arg, *inner_args, **kwargs)
return wrapper
return decorator
# pylint: disable=unused-variable
class TestUtils(unittest.TestCase):
def test_handle_error_no_text(self):
mock_response = Mock(text="")
utils.handle_errors(mock_response)
mock_response.raise_for_status.assert_called()
def test_handle_error_decode_err(self):
err_status = 300
err_msg = "error message iso-8859-1"
expected_text = json.dumps({"status": err_status, "message": err_msg})
# Fail initial decoding, then return the decoded text
decode_err = UnicodeDecodeError("1", b"2", 3, 4, "5")
mock_iso_text = Mock(spec=bytes)
mock_iso_text.decode.side_effect = [decode_err, expected_text]
self.handle_err_exec_assert(AISError, err_status, err_msg, mock_iso_text)
@test_cases(399, 500)
def test_handle_error_ais_err(self, err_status):
err_msg = "error message"
expected_text = json.dumps({"status": err_status, "message": err_msg})
mock_text = Mock(spec=bytes)
mock_text.decode.return_value = expected_text
self.handle_err_exec_assert(AISError, err_status, err_msg, mock_text)
@test_cases(
("cloud bucket does not exist", ErrRemoteBckNotFound),
("remote bucket does not exist", ErrRemoteBckNotFound),
("bucket does not exist", ErrBckNotFound),
("bucket already exists", ErrBckAlreadyExists),
("etl already exists", ErrETLAlreadyExists),
)
def test_handle_error_no_remote_bucket(self, test_case):
err_msg, expected_err = test_case
err_status = 400
expected_text = json.dumps({"status": err_status, "message": err_msg})
mock_text = Mock(spec=bytes)
mock_text.decode.return_value = expected_text
self.handle_err_exec_assert(expected_err, err_status, err_msg, mock_text)
def handle_err_exec_assert(self, err_type, err_status, err_msg, mock_err_text):
mock_response = Mock(text=mock_err_text)
with self.assertRaises(err_type) as context:
utils.handle_errors(mock_response)
self.assertEqual(err_msg, context.exception.message)
self.assertEqual(err_status, context.exception.status_code)
@test_cases((0, 0.1), (-1, 0.1), (64, 1), (128, 2), (100000, 1562.5))
def test_probing_frequency(self, test_case):
self.assertEqual(test_case[1], utils.probing_frequency(test_case[0]))
@patch("pathlib.Path.is_file")
@patch("pathlib.Path.exists")
def test_validate_file(self, mock_exists, mock_is_file):
mock_exists.return_value = False
with self.assertRaises(ValueError):
utils.validate_file("any path")
mock_exists.return_value = True
mock_is_file.return_value = False
with self.assertRaises(ValueError):
utils.validate_file("any path")
mock_is_file.return_value = True
utils.validate_file("any path")
@patch("pathlib.Path.is_dir")
@patch("pathlib.Path.exists")
def test_validate_dir(self, mock_exists, mock_is_dir):
mock_exists.return_value = False
with self.assertRaises(ValueError):
utils.validate_directory("any path")
mock_exists.return_value = True
mock_is_dir.return_value = False
with self.assertRaises(ValueError):
utils.validate_directory("any path")
mock_is_dir.return_value = True
utils.validate_directory("any path")
def test_read_file_bytes(self):
data = b"Test data"
with patch("builtins.open", mock_open(read_data=data)):
res = utils.read_file_bytes("any path")
self.assertEqual(data, res)
@test_cases((123, "123 Bytes"), (None, "unknown"))
def test_get_file_size(self, test_case):
mock_file = Mock()
mock_file.stat.return_value = Mock(st_size=test_case[0])
self.assertEqual(test_case[1], utils.get_file_size(mock_file))
@test_cases(
("prefix-", ["prefix-"], None),
("prefix-{}", ["prefix-{}"], None),
("prefix-{0..1..2..3}", ["prefix-{0..1..2..3}"], None),
("prefix-{0..1..2}}", [], ValueError),
(
"prefix-{1..6..2}-gap-{12..14..1}-suffix",
[
"prefix-1-gap-12-suffix",
"prefix-1-gap-13-suffix",
"prefix-1-gap-14-suffix",
"prefix-3-gap-12-suffix",
"prefix-3-gap-13-suffix",
"prefix-3-gap-14-suffix",
"prefix-5-gap-12-suffix",
"prefix-5-gap-13-suffix",
"prefix-5-gap-14-suffix",
],
None,
),
)
def test_expand_braces(self, test_case):
input_str, output, expected_error = test_case
if not expected_error:
self.assertEqual(output, list(utils.expand_braces(input_str)))
else:
with self.assertRaises(expected_error):
utils.expand_braces(input_str)
@patch("aistore.sdk.utils.parse_raw_as")
def test_decode_response_json(self, mock_parse):
response_content = "text content"
parsed_content = "parsed content"
mock_response = Mock(Response)
mock_response.headers = {}
mock_response.text = response_content
mock_parse.return_value = parsed_content
res = utils.decode_response(str, mock_response)
self.assertEqual(parsed_content, res)
mock_parse.assert_called_with(str, response_content)
def test_decode_response_msgpack(self):
unpacked_content = {"content key": "content value"}
packed_content = msgpack.encode(unpacked_content)
mock_response = Mock(Response)
mock_response.headers = {HEADER_CONTENT_TYPE: MSGPACK_CONTENT_TYPE}
mock_response.content = packed_content
res = utils.decode_response(dict, mock_response)
self.assertEqual(unpacked_content, res)
| aistore-master | python/tests/unit/sdk/test_utils.py |
import unittest
from unittest.mock import Mock
from aistore.sdk.object_iterator import ObjectIterator
from aistore.sdk.types import BucketEntry
class TestObjectIterator(unittest.TestCase): # pylint: disable=unused-variable
def setUp(self) -> None:
self.callable_resp = Mock()
self.callable_resp.continuation_token = ""
self.callable = lambda uuid, token: self.callable_resp
self.obj_iterator = ObjectIterator(self.callable)
def test_iter(self):
self.assertEqual(self.obj_iterator, iter(self.obj_iterator))
def test_next_empty_resp(self):
with self.assertRaises(StopIteration):
self.callable_resp.entries = []
self.callable_resp.uuid = ""
self.callable_resp.continuation_token = ""
next(self.obj_iterator)
def test_next_iterator_exhausted(self):
entry_1 = Mock(BucketEntry)
entry_2 = Mock(BucketEntry)
entry_3 = Mock(BucketEntry)
self.callable_resp.entries = [entry_1, entry_2, entry_3]
self.callable_resp.uuid = "UUID"
self.assertEqual(entry_1, next(self.obj_iterator))
self.assertEqual(entry_2, next(self.obj_iterator))
self.assertEqual(entry_3, next(self.obj_iterator))
with self.assertRaises(StopIteration):
next(self.obj_iterator)
def test_next_multiple_pages(self):
entry_1 = Mock(BucketEntry)
entry_2 = Mock(BucketEntry)
entry_3 = Mock(BucketEntry)
self.callable_resp.entries = [entry_1, entry_2]
self.callable_resp.uuid = ""
self.assertEqual(entry_1, next(self.obj_iterator))
self.assertEqual(entry_2, next(self.obj_iterator))
self.callable_resp.entries = [entry_3]
self.assertEqual(entry_3, next(self.obj_iterator))
with self.assertRaises(StopIteration):
next(self.obj_iterator)
| aistore-master | python/tests/unit/sdk/test_object_iterator.py |
import base64
import unittest
from unittest.mock import Mock
from unittest.mock import patch
import cloudpickle
import aistore
from aistore.sdk.const import (
HTTP_METHOD_PUT,
HTTP_METHOD_GET,
HTTP_METHOD_POST,
HTTP_METHOD_DELETE,
URL_PATH_ETL,
UTF_ENCODING,
)
from aistore.sdk.etl_const import (
CODE_TEMPLATE,
ETL_COMM_HPUSH,
ETL_COMM_HPULL,
ETL_COMM_IO,
)
from aistore.sdk.etl import Etl, _get_default_runtime
from aistore.sdk.types import ETLDetails
class TestEtl(unittest.TestCase): # pylint: disable=unused-variable
def setUp(self) -> None:
self.mock_client = Mock()
self.etl_name = "etl-name"
self.etl = Etl(self.mock_client, self.etl_name)
def test_init_spec_default_params(self):
expected_action = {
"communication": "hpush://",
"timeout": "5m",
"argument": "",
}
self.init_spec_exec_assert(expected_action)
def test_init_spec_invalid_comm(self):
with self.assertRaises(ValueError):
self.etl.init_spec("template", communication_type="invalid")
def test_init_spec(self):
communication_type = ETL_COMM_HPUSH
timeout = "6m"
expected_action = {
"communication": f"{communication_type}://",
"timeout": timeout,
"argument": "",
}
self.init_spec_exec_assert(
expected_action, communication_type=communication_type, timeout=timeout
)
def init_spec_exec_assert(self, expected_action, **kwargs):
template = "pod spec template"
expected_action["spec"] = base64.b64encode(
template.encode(UTF_ENCODING)
).decode(UTF_ENCODING)
expected_action["id"] = self.etl_name
expected_response_text = self.etl_name
mock_response = Mock()
mock_response.text = expected_response_text
self.mock_client.request.return_value = mock_response
response = self.etl.init_spec(template, **kwargs)
self.assertEqual(expected_response_text, response)
self.mock_client.request.assert_called_with(
HTTP_METHOD_PUT, path=URL_PATH_ETL, json=expected_action
)
def test_init_code_default_runtime(self):
version_to_runtime = {
(3, 7): "python3.8v2",
(3, 1234): "python3.8v2",
(3, 8): "python3.8v2",
(3, 10): "python3.10v2",
(3, 11): "python3.11v2",
}
for version, runtime in version_to_runtime.items():
with patch.object(aistore.sdk.etl.sys, "version_info") as version_info:
version_info.major = version[0]
version_info.minor = version[1]
self.assertEqual(runtime, _get_default_runtime())
def test_init_code_default_params(self):
communication_type = ETL_COMM_HPUSH
expected_action = {
"runtime": _get_default_runtime(),
"communication": f"{communication_type}://",
"timeout": "5m",
"funcs": {"transform": "transform"},
"code": self.encode_fn([], self.transform_fn, communication_type),
"dependencies": base64.b64encode(b"cloudpickle==2.2.0").decode(
UTF_ENCODING
),
"argument": "",
}
self.init_code_exec_assert(expected_action)
def test_init_code_invalid_comm(self):
with self.assertRaises(ValueError):
self.etl.init_code(Mock(), communication_type="invalid")
def test_init_code(self):
runtime = "python-non-default"
communication_type = ETL_COMM_HPULL
timeout = "6m"
preimported = ["pytorch"]
user_dependencies = ["pytorch"]
chunk_size = 123
arg_type = "url"
expected_dependencies = user_dependencies.copy()
expected_dependencies.append("cloudpickle==2.2.0")
expected_dep_str = base64.b64encode(
"\n".join(expected_dependencies).encode(UTF_ENCODING)
).decode(UTF_ENCODING)
expected_action = {
"runtime": runtime,
"communication": f"{communication_type}://",
"timeout": timeout,
"funcs": {"transform": "transform"},
"code": self.encode_fn(preimported, self.transform_fn, communication_type),
"dependencies": expected_dep_str,
"chunk_size": chunk_size,
"argument": arg_type,
}
self.init_code_exec_assert(
expected_action,
preimported_modules=preimported,
dependencies=user_dependencies,
runtime=runtime,
communication_type=communication_type,
timeout=timeout,
chunk_size=chunk_size,
arg_type=arg_type,
)
@staticmethod
def transform_fn():
print("example action")
@staticmethod
def encode_fn(preimported_modules, func, comm_type):
transform = base64.b64encode(cloudpickle.dumps(func)).decode(UTF_ENCODING)
io_comm_context = "transform()" if comm_type == ETL_COMM_IO else ""
template = CODE_TEMPLATE.format(
preimported_modules, transform, io_comm_context
).encode(UTF_ENCODING)
return base64.b64encode(template).decode(UTF_ENCODING)
def init_code_exec_assert(self, expected_action, **kwargs):
expected_action["id"] = self.etl_name
expected_response_text = "response text"
mock_response = Mock()
mock_response.text = expected_response_text
self.mock_client.request.return_value = mock_response
response = self.etl.init_code(transform=self.transform_fn, **kwargs)
self.assertEqual(expected_response_text, response)
self.mock_client.request.assert_called_with(
HTTP_METHOD_PUT, path=URL_PATH_ETL, json=expected_action
)
def test_view(self):
mock_response = Mock()
self.mock_client.request_deserialize.return_value = mock_response
response = self.etl.view()
self.assertEqual(mock_response, response)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET, path=f"etl/{ self.etl_name }", res_model=ETLDetails
)
def test_start(self):
self.etl.start()
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST, path=f"etl/{ self.etl_name }/start"
)
def test_stop(self):
self.etl.stop()
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST, path=f"etl/{ self.etl_name }/stop"
)
def test_delete(self):
self.etl.delete()
self.mock_client.request.assert_called_with(
HTTP_METHOD_DELETE, path=f"etl/{ self.etl_name }"
)
| aistore-master | python/tests/unit/sdk/test_etl.py |
aistore-master | python/tests/unit/sdk/__init__.py |
|
import unittest
from aistore.sdk import ListObjectFlag
from tests.unit.sdk.test_utils import test_cases
# pylint: disable=unused-variable
class TestListObjectFlag(unittest.TestCase):
@test_cases(
([], 0),
([ListObjectFlag.ALL], 2),
([ListObjectFlag.ONLY_REMOTE_PROPS], 1024),
([ListObjectFlag.DELETED, ListObjectFlag.NAME_ONLY], 20),
)
def test_join_flags(self, test_case):
self.assertEqual(test_case[1], ListObjectFlag.join_flags(test_case[0]))
| aistore-master | python/tests/unit/sdk/test_list_object_flag.py |
import unittest
from unittest import mock
from unittest.mock import Mock, call, patch
from aistore.sdk.ais_source import AISSource
from aistore.sdk.bucket import Bucket, Header
from aistore.sdk.etl_const import DEFAULT_ETL_TIMEOUT
from aistore.sdk.object_iterator import ObjectIterator
from aistore.sdk import ListObjectFlag
from aistore.sdk.const import (
ACT_COPY_BCK,
ACT_CREATE_BCK,
ACT_DESTROY_BCK,
ACT_ETL_BCK,
ACT_EVICT_REMOTE_BCK,
ACT_LIST,
ACT_MOVE_BCK,
ACT_SUMMARY_BCK,
PROVIDER_AMAZON,
PROVIDER_AIS,
QPARAM_BCK_TO,
QPARAM_NAMESPACE,
QPARAM_PROVIDER,
QPARAM_KEEP_REMOTE,
QPARAM_COUNT_REMOTE_OBJS,
QPARAM_FLT_PRESENCE,
HTTP_METHOD_DELETE,
HTTP_METHOD_GET,
HTTP_METHOD_HEAD,
HTTP_METHOD_PUT,
HTTP_METHOD_POST,
URL_PATH_BUCKETS,
HEADER_ACCEPT,
HEADER_BUCKET_PROPS,
HEADER_BUCKET_SUMM,
MSGPACK_CONTENT_TYPE,
STATUS_ACCEPTED,
STATUS_BAD_REQUEST,
STATUS_OK,
)
from aistore.sdk.errors import (
InvalidBckProvider,
ErrBckAlreadyExists,
ErrBckNotFound,
UnexpectedHTTPStatusCode,
)
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import (
ActionMsg,
BucketList,
BucketEntry,
BsummCtrlMsg,
Namespace,
TCBckMsg,
TransformBckMsg,
CopyBckMsg,
)
BCK_NAME = "bucket_name"
# pylint: disable=too-many-public-methods,unused-variable
class TestBucket(unittest.TestCase):
def setUp(self) -> None:
self.mock_client = Mock(RequestClient)
self.amz_bck = Bucket(
name=BCK_NAME, client=self.mock_client, provider=PROVIDER_AMAZON
)
self.amz_bck_params = self.amz_bck.qparam.copy()
self.ais_bck = Bucket(name=BCK_NAME, client=self.mock_client)
self.ais_bck_params = self.ais_bck.qparam.copy()
def test_default_props(self):
bucket = Bucket(name=BCK_NAME, client=self.mock_client)
self.assertEqual({QPARAM_PROVIDER: PROVIDER_AIS}, bucket.qparam)
self.assertEqual(PROVIDER_AIS, bucket.provider)
self.assertIsNone(bucket.namespace)
def test_properties(self):
self.assertEqual(self.mock_client, self.ais_bck.client)
expected_ns = Namespace(uuid="ns-id", name="ns-name")
client = RequestClient("test client name")
bck = Bucket(
client=client,
name=BCK_NAME,
provider=PROVIDER_AMAZON,
namespace=expected_ns,
)
self.assertEqual(client, bck.client)
self.assertEqual(PROVIDER_AMAZON, bck.provider)
self.assertEqual(
{
QPARAM_PROVIDER: PROVIDER_AMAZON,
QPARAM_NAMESPACE: expected_ns.get_path(),
},
bck.qparam,
)
self.assertEqual(BCK_NAME, bck.name)
self.assertEqual(expected_ns, bck.namespace)
def test_ais_source(self):
self.assertIsInstance(self.ais_bck, AISSource)
def test_create_invalid_provider(self):
self.assertRaises(InvalidBckProvider, self.amz_bck.create)
def _assert_bucket_created(self, bck):
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=ACT_CREATE_BCK).dict(),
params=self.ais_bck.qparam,
)
self.assertIsInstance(bck, Bucket)
def test_create_success(self):
res = self.ais_bck.create()
self._assert_bucket_created(res)
def test_create_already_exists(self):
already_exists_err = ErrBckAlreadyExists(400, "message")
self.mock_client.request.side_effect = already_exists_err
with self.assertRaises(ErrBckAlreadyExists):
self.ais_bck.create()
res = self.ais_bck.create(exist_ok=True)
self._assert_bucket_created(res)
def test_rename_invalid_provider(self):
self.assertRaises(InvalidBckProvider, self.amz_bck.rename, "new_name")
def test_rename_success(self):
new_bck_name = "new_bucket"
expected_response = "rename_op_123"
self.ais_bck_params[QPARAM_BCK_TO] = f"{PROVIDER_AIS}/@#/{new_bck_name}/"
mock_response = Mock()
mock_response.text = expected_response
self.mock_client.request.return_value = mock_response
response = self.ais_bck.rename(new_bck_name)
self.assertEqual(expected_response, response)
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=ACT_MOVE_BCK).dict(),
params=self.ais_bck_params,
)
self.assertEqual(self.ais_bck.name, new_bck_name)
def test_delete_invalid_provider(self):
self.assertRaises(InvalidBckProvider, self.amz_bck.delete)
def test_delete_success(self):
self.ais_bck.delete()
self.mock_client.request.assert_called_with(
HTTP_METHOD_DELETE,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=ACT_DESTROY_BCK).dict(),
params=self.ais_bck.qparam,
)
def test_delete_missing(self):
self.mock_client.request.side_effect = ErrBckNotFound(400, "not found")
with self.assertRaises(ErrBckNotFound):
Bucket(client=self.mock_client, name="missing-bucket").delete()
self.ais_bck.delete(missing_ok=True)
self.mock_client.request.assert_called_with(
HTTP_METHOD_DELETE,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=ACT_DESTROY_BCK).dict(),
params=self.ais_bck.qparam,
)
def test_evict_invalid_provider(self):
self.assertRaises(InvalidBckProvider, self.ais_bck.evict)
def test_evict_success(self):
for keep_md in [True, False]:
self.amz_bck_params[QPARAM_KEEP_REMOTE] = str(keep_md)
self.amz_bck.evict(keep_md=keep_md)
self.mock_client.request.assert_called_with(
HTTP_METHOD_DELETE,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=ACT_EVICT_REMOTE_BCK).dict(),
params=self.amz_bck_params,
)
def test_head(self):
mock_header = Mock()
mock_header.headers = Header("value")
self.mock_client.request.return_value = mock_header
headers = self.ais_bck.head()
self.mock_client.request.assert_called_with(
HTTP_METHOD_HEAD,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
params=self.ais_bck.qparam,
)
self.assertEqual(headers, mock_header.headers)
def test_copy_default_params(self):
dest_bck = Bucket(
client=self.mock_client,
name="test-bck",
namespace=Namespace(uuid="namespace-id", name="ns-name"),
provider="any-provider",
)
action_value = {"prefix": "", "prepend": "", "dry_run": False, "force": False}
self._copy_exec_assert(dest_bck, action_value)
def test_copy(self):
prefix_filter = "existing-"
prepend_val = "prefix-"
dry_run = True
force = True
action_value = {
"prefix": prefix_filter,
"prepend": prepend_val,
"dry_run": dry_run,
"force": force,
}
self._copy_exec_assert(
self.ais_bck,
action_value,
prefix_filter=prefix_filter,
prepend=prepend_val,
dry_run=dry_run,
force=force,
)
def _copy_exec_assert(self, to_bck, expected_act_value, **kwargs):
expected_response = "copy-action-id"
mock_response = Mock()
mock_response.text = expected_response
self.mock_client.request.return_value = mock_response
self.ais_bck_params[QPARAM_BCK_TO] = to_bck.get_path()
expected_action = ActionMsg(
action=ACT_COPY_BCK, value=expected_act_value
).dict()
job_id = self.ais_bck.copy(to_bck=to_bck, **kwargs)
self.assertEqual(expected_response, job_id)
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=expected_action,
params=self.ais_bck_params,
)
def test_list_objects(self):
prefix = "prefix-"
page_size = 0
uuid = "1234"
props = "name"
continuation_token = "token"
flags = [ListObjectFlag.CACHED, ListObjectFlag.DELETED]
flag_value = "5"
target_id = "target-node"
expected_act_value = {
"prefix": prefix,
"pagesize": page_size,
"uuid": uuid,
"props": props,
"continuation_token": continuation_token,
"flags": flag_value,
"target": target_id,
}
self._list_objects_exec_assert(
expected_act_value,
prefix=prefix,
page_size=page_size,
uuid=uuid,
props=props,
continuation_token=continuation_token,
flags=flags,
target=target_id,
)
def test_list_objects_default_params(self):
expected_act_value = {
"prefix": "",
"pagesize": 0,
"uuid": "",
"props": "",
"continuation_token": "",
"flags": "0",
"target": "",
}
self._list_objects_exec_assert(expected_act_value)
def _list_objects_exec_assert(self, expected_act_value, **kwargs):
action = ActionMsg(action=ACT_LIST, value=expected_act_value).dict()
object_names = ["obj_name", "obj_name2"]
bucket_entries = [BucketEntry(n=name) for name in object_names]
mock_list = Mock(BucketList)
mock_list.entries = bucket_entries
self.mock_client.request_deserialize.return_value = mock_list
result = self.ais_bck.list_objects(**kwargs)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
headers={HEADER_ACCEPT: MSGPACK_CONTENT_TYPE},
res_model=BucketList,
json=action,
params=self.ais_bck_params,
)
self.assertEqual(result, mock_list)
self.assertEqual(object_names, [entry.object.name for entry in result.entries])
def test_list_objects_iter(self):
self.assertIsInstance(
self.ais_bck.list_objects_iter("prefix-", "obj props", 123), ObjectIterator
)
def test_list_all_objects(self):
list_1_id = "123"
list_1_cont = "cont"
prefix = "prefix-"
page_size = 5
props = "name"
flags = [ListObjectFlag.CACHED, ListObjectFlag.DELETED]
flag_value = "5"
target_id = "target-node"
expected_act_value_1 = {
"prefix": prefix,
"pagesize": page_size,
"uuid": "",
"props": props,
"continuation_token": "",
"flags": flag_value,
"target": target_id,
}
expected_act_value_2 = {
"prefix": prefix,
"pagesize": page_size,
"uuid": list_1_id,
"props": props,
"continuation_token": list_1_cont,
"flags": flag_value,
"target": target_id,
}
self._list_all_objects_exec_assert(
list_1_id,
list_1_cont,
expected_act_value_1,
expected_act_value_2,
prefix=prefix,
page_size=page_size,
props=props,
flags=flags,
target=target_id,
)
def test_list_all_objects_default_params(self):
list_1_id = "123"
list_1_cont = "cont"
expected_act_value_1 = {
"prefix": "",
"pagesize": 0,
"uuid": "",
"props": "",
"continuation_token": "",
"flags": "0",
"target": "",
}
expected_act_value_2 = {
"prefix": "",
"pagesize": 0,
"uuid": list_1_id,
"props": "",
"continuation_token": list_1_cont,
"flags": "0",
"target": "",
}
self._list_all_objects_exec_assert(
list_1_id, list_1_cont, expected_act_value_1, expected_act_value_2
)
def _list_all_objects_exec_assert(
self,
list_1_id,
list_1_cont,
expected_act_value_1,
expected_act_value_2,
**kwargs,
):
entry_1 = BucketEntry(n="entry1")
entry_2 = BucketEntry(n="entry2")
entry_3 = BucketEntry(n="entry3")
list_1 = BucketList(
UUID=list_1_id, ContinuationToken=list_1_cont, Flags=0, Entries=[entry_1]
)
list_2 = BucketList(
UUID="456", ContinuationToken="", Flags=0, Entries=[entry_2, entry_3]
)
# Test with empty list of entries
self.mock_client.request_deserialize.return_value = BucketList(
UUID="empty", ContinuationToken="", Flags=0
)
self.assertEqual([], self.ais_bck.list_all_objects(**kwargs))
# Test with non-empty lists
self.mock_client.request_deserialize.side_effect = [list_1, list_2]
self.assertEqual(
[entry_1, entry_2, entry_3], self.ais_bck.list_all_objects(**kwargs)
)
expected_calls = []
for expected_val in [expected_act_value_1, expected_act_value_2]:
expected_calls.append(
mock.call(
HTTP_METHOD_GET,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
headers={HEADER_ACCEPT: MSGPACK_CONTENT_TYPE},
res_model=BucketList,
json=ActionMsg(action=ACT_LIST, value=expected_val).dict(),
params=self.ais_bck_params,
)
)
for expected in expected_calls:
self.assertIn(expected, self.mock_client.request_deserialize.call_args_list)
def test_transform(self):
etl_name = "etl-name"
prepend_val = "prefix-"
prefix_filter = "required-prefix-"
ext = {"jpg": "txt"}
timeout = "4m"
force = True
dry_run = True
action_value = TCBckMsg(
ext=ext,
transform_msg=TransformBckMsg(etl_name=etl_name, timeout=timeout),
copy_msg=CopyBckMsg(
prefix=prefix_filter, prepend=prepend_val, force=force, dry_run=dry_run
),
).as_dict()
self._transform_exec_assert(
etl_name,
action_value,
prepend=prepend_val,
prefix_filter=prefix_filter,
ext=ext,
force=force,
dry_run=dry_run,
timeout=timeout,
)
def test_transform_default_params(self):
etl_name = "etl-name"
action_value = {
"id": etl_name,
"prefix": "",
"prepend": "",
"force": False,
"dry_run": False,
"request_timeout": DEFAULT_ETL_TIMEOUT,
}
self._transform_exec_assert(etl_name, action_value)
def _transform_exec_assert(self, etl_name, expected_act_value, **kwargs):
to_bck = Bucket(name="new-bucket")
self.ais_bck_params[QPARAM_BCK_TO] = to_bck.get_path()
expected_action = ActionMsg(action=ACT_ETL_BCK, value=expected_act_value).dict()
expected_response = "job-id"
mock_response = Mock()
mock_response.text = expected_response
self.mock_client.request.return_value = mock_response
result_id = self.ais_bck.transform(etl_name, to_bck, **kwargs)
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=expected_action,
params=self.ais_bck_params,
)
self.assertEqual(expected_response, result_id)
def test_object(self):
new_obj = self.ais_bck.object(obj_name="name")
self.assertEqual(self.ais_bck, new_obj.bucket)
@patch("aistore.sdk.object.read_file_bytes")
@patch("aistore.sdk.object.validate_file")
@patch("aistore.sdk.bucket.validate_directory")
@patch("pathlib.Path.glob")
def test_put_files(
self, mock_glob, mock_validate_dir, mock_validate_file, mock_read
):
path = "directory"
file_1_name = "file_1_name"
file_2_name = "file_2_name"
path_1 = Mock()
path_1.is_file.return_value = True
path_1.relative_to.return_value = file_1_name
path_1.stat.return_value = Mock(st_size=123)
path_2 = Mock()
path_2.relative_to.return_value = file_2_name
path_2.is_file.return_value = True
path_2.stat.return_value = Mock(st_size=4567)
file_1_data = b"bytes in the first file"
file_2_data = b"bytes in the second file"
mock_glob.return_value = [path_1, path_2]
expected_obj_names = [file_1_name, file_2_name]
mock_read.side_effect = [file_1_data, file_2_data]
res = self.ais_bck.put_files(path)
mock_validate_dir.assert_called_with(path)
mock_validate_file.assert_has_calls([call(str(path_1)), call(str(path_2))])
self.assertEqual(expected_obj_names, res)
expected_calls = [
call(
HTTP_METHOD_PUT,
path=f"objects/{BCK_NAME}/{file_1_name}",
params=self.ais_bck_params,
data=file_1_data,
),
call(
HTTP_METHOD_PUT,
path=f"objects/{BCK_NAME}/{file_2_name}",
params=self.ais_bck_params,
data=file_2_data,
),
]
self.mock_client.request.assert_has_calls(expected_calls)
def test_get_path(self):
namespace = Namespace(uuid="ns-id", name="ns-name")
bucket = Bucket(name=BCK_NAME, namespace=namespace, provider=PROVIDER_AMAZON)
expected_path = (
f"{PROVIDER_AMAZON}/@{namespace.uuid}#{namespace.name}/{bucket.name}/"
)
self.assertEqual(expected_path, bucket.get_path())
self.assertEqual(f"{PROVIDER_AIS}/@#/{bucket.name}/", self.ais_bck.get_path())
@patch("aistore.sdk.bucket.Bucket.object")
@patch("aistore.sdk.bucket.Bucket.list_objects_iter")
def test_list_urls(self, mock_list_obj, mock_object):
prefix = "my-prefix"
etl_name = "my-etl"
object_names = ["obj_name", "obj_name2"]
expected_obj_calls = []
# Should create an object reference and get url for every object returned by listing
for name in object_names:
expected_obj_calls.append(call(name))
expected_obj_calls.append(call().get_url(etl_name=etl_name))
mock_list_obj.return_value = [BucketEntry(n=name) for name in object_names]
list(self.ais_bck.list_urls(prefix=prefix, etl_name=etl_name))
mock_list_obj.assert_called_with(prefix=prefix, props="name")
mock_object.assert_has_calls(expected_obj_calls)
def test_make_request_no_client(self):
bucket = Bucket(name="name")
with self.assertRaises(ValueError):
bucket.make_request("method", "action")
def test_make_request_default_params(self):
method = "method"
action = "action"
self.ais_bck.make_request(method, action)
self.mock_client.request.assert_called_with(
method,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=action, value=None).dict(),
params=self.ais_bck.qparam,
)
def test_make_request(self):
method = "method"
action = "action"
value = {"request_key": "value"}
params = {"qparamkey": "qparamval"}
self.ais_bck.make_request(method, action, value, params)
self.mock_client.request.assert_called_with(
method,
path=f"{URL_PATH_BUCKETS}/{BCK_NAME}",
json=ActionMsg(action=action, value=value).dict(),
params=params,
)
def test_summary(self):
# Mock responses for request calls
response1 = Mock()
response1.status_code = STATUS_ACCEPTED
response1.text = '"job_id"'
response2 = Mock()
response2.status_code = STATUS_OK
response2.content = (
b'[{"name":"temporary","provider":"ais","namespace":{"uuid":"","name":""},'
b'"ObjCount":{"obj_count_present":"137160","obj_count_remote":"0"},'
b'"ObjSize":{"obj_min_size":1024,"obj_avg_size":1024,"obj_max_size":1024},'
b'"TotalSize":{"size_on_disk":"148832256","size_all_present_objs":"140451840",'
b'"size_all_remote_objs":"0","total_disks_size":"4955520307200"},'
b'"used_pct":0,"is_present":false}]\n'
)
# Set the side_effect of the request method to return the two responses in sequence
self.mock_client.request.side_effect = [response1, response2]
# Call the summary method
result = self.ais_bck.summary()
# Ensure that request was called with the correct sequence of calls
bsumm_ctrl_msg = BsummCtrlMsg(
uuid="", prefix="", fast=True, cached=True, present=True
).dict()
bsumm_ctrl_msg_with_uuid = BsummCtrlMsg(
uuid="job_id", prefix="", fast=True, cached=True, present=True
).dict()
calls = [
call(
HTTP_METHOD_GET,
path="buckets/bucket_name",
json={"action": ACT_SUMMARY_BCK, "name": "", "value": bsumm_ctrl_msg},
params=self.ais_bck.qparam,
),
call(
HTTP_METHOD_GET,
path="buckets/bucket_name",
json={
"action": ACT_SUMMARY_BCK,
"name": "",
"value": bsumm_ctrl_msg_with_uuid,
},
params=self.ais_bck.qparam,
),
]
self.mock_client.request.assert_has_calls(calls)
# Assert that the result has the expected structure
self.assertIsInstance(result, dict)
self.assertIn("name", result)
self.assertIn("provider", result)
self.assertIn("ObjCount", result)
self.assertIn("ObjSize", result)
self.assertIn("TotalSize", result)
def test_summary_error_handling(self):
# Mock responses for the first and second request call
first_response = Mock()
first_response.status_code = STATUS_ACCEPTED
second_response = Mock()
second_response.status_code = STATUS_BAD_REQUEST
second_response.text = '"job_id"'
# Set the side_effect of the request method to return the correct mock response
self.mock_client.request.side_effect = [first_response, second_response]
# Call the summary method and expect an UnexpectedHTTPStatusCode exception
with self.assertRaises(UnexpectedHTTPStatusCode):
self.ais_bck.summary()
# Verify that the request method was called twice
assert self.mock_client.request.call_count == 2
def test_info(self):
# Mock response for request calls
response = Mock()
response.status_code = 200
response.headers = {
HEADER_BUCKET_PROPS: '{"some": "props"}',
HEADER_BUCKET_SUMM: '{"some": "summary"}',
}
self.mock_client.request.return_value = response
# Call the info method
bucket_props, bucket_summ = self.ais_bck.info()
# Ensure the request was made correctly
self.mock_client.request.assert_called_once_with(
HTTP_METHOD_HEAD,
path=f"{URL_PATH_BUCKETS}/{self.ais_bck.name}",
params={
**self.ais_bck.qparam,
QPARAM_FLT_PRESENCE: 0,
QPARAM_COUNT_REMOTE_OBJS: True,
},
)
# Check the return values
self.assertEqual(bucket_props, {"some": "props"})
self.assertEqual(bucket_summ, {"some": "summary"})
# Test with invalid flt_presence
with self.assertRaises(ValueError):
self.ais_bck.info(flt_presence=6)
if __name__ == "__main__":
unittest.main()
| aistore-master | python/tests/unit/sdk/test_bucket.py |
import unittest
from typing import List, Optional
from unittest.mock import Mock, create_autospec
from aistore.sdk.bucket import Bucket
from aistore.sdk.cluster import Cluster
from aistore.sdk.const import (
HTTP_METHOD_GET,
QPARAM_WHAT,
QPARAM_PROVIDER,
ACT_LIST,
PROVIDER_AIS,
WHAT_SMAP,
URL_PATH_DAEMON,
URL_PATH_BUCKETS,
URL_PATH_HEALTH,
QPARAM_PRIMARY_READY_REB,
URL_PATH_CLUSTER,
WHAT_ALL_XACT_STATUS,
WHAT_ALL_RUNNING_STATUS,
URL_PATH_ETL,
)
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import Smap, ActionMsg, BucketModel, JobStatus, JobQuery, ETLInfo
class TestCluster(unittest.TestCase): # pylint: disable=unused-variable
def setUp(self) -> None:
self.mock_client = Mock(RequestClient)
self.cluster = Cluster(self.mock_client)
def test_get_info(self):
expected_result = create_autospec(Smap)
self.mock_client.request_deserialize.return_value = expected_result
result = self.cluster.get_info()
self.assertEqual(result, expected_result)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_DAEMON,
res_model=Smap,
params={QPARAM_WHAT: WHAT_SMAP},
)
def test_list_buckets(self):
provider = "any-provider"
expected_params = {QPARAM_PROVIDER: provider}
self.list_buckets_exec_assert(expected_params, provider=provider)
def test_list_buckets_default_param(self):
expected_params = {QPARAM_PROVIDER: PROVIDER_AIS}
self.list_buckets_exec_assert(expected_params)
def list_buckets_exec_assert(self, expected_params, **kwargs):
expected_result = [Mock(Bucket)]
self.mock_client.request_deserialize.return_value = expected_result
res = self.cluster.list_buckets(**kwargs)
self.assertEqual(expected_result, res)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_BUCKETS,
res_model=List[BucketModel],
json=ActionMsg(action=ACT_LIST).dict(),
params=expected_params,
)
def test_is_aistore_running_exception(self):
self.mock_client.request.side_effect = Exception
self.assertFalse(self.cluster.is_aistore_running())
def test_is_aistore_running(self):
expected_params = {QPARAM_PRIMARY_READY_REB: "true"}
response = Mock()
response.ok = True
self.mock_client.request.return_value = response
self.assertTrue(self.cluster.is_aistore_running())
response.ok = False
self.mock_client.request.return_value = response
self.assertFalse(self.cluster.is_aistore_running())
self.mock_client.request.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_HEALTH,
params=expected_params,
)
def test_list_jobs_status_default_params(self):
expected_request_val = JobQuery().as_dict()
self._list_jobs_status_exec_assert(expected_request_val)
def test_list_jobs_status(self):
job_kind = "kind"
target_id = "specific_node"
expected_request_val = JobQuery(kind=job_kind, target=target_id).as_dict()
self._list_jobs_status_exec_assert(
expected_request_val,
job_kind=job_kind,
target_id=target_id,
)
def test_list_jobs_status_no_result(self):
self.mock_client.request_deserialize.return_value = None
self.assertEqual([], self.cluster.list_jobs_status())
def _list_jobs_status_exec_assert(self, expected_request_val, **kwargs):
returned_status = JobStatus()
self.mock_client.request_deserialize.return_value = returned_status
res = self.cluster.list_jobs_status(**kwargs)
self.assertEqual(returned_status, res)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
res_model=Optional[List[JobStatus]],
json=expected_request_val,
params={QPARAM_WHAT: WHAT_ALL_XACT_STATUS},
)
def test_list_running_jobs_default_params(self):
expected_request_val = JobQuery(active=True).as_dict()
self._list_running_jobs_exec_assert(expected_request_val)
def test_list_running_jobs(self):
job_kind = "job-kind"
target_id = "my-target"
expected_request_val = JobQuery(
active=True, kind=job_kind, target=target_id
).as_dict()
self._list_running_jobs_exec_assert(
expected_request_val, job_kind=job_kind, target_id=target_id
)
def _list_running_jobs_exec_assert(self, expected_request_val, **kwargs):
mock_response = ["job_1_kind[job_1_id]", "job_2_kind[job_2_id]"]
self.mock_client.request_deserialize.return_value = mock_response
res = self.cluster.list_running_jobs(**kwargs)
self.assertEqual(mock_response, res)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_CLUSTER,
res_model=List[str],
json=expected_request_val,
params={QPARAM_WHAT: WHAT_ALL_RUNNING_STATUS},
)
def test_list_running_etls(self):
mock_response = Mock()
self.mock_client.request_deserialize.return_value = mock_response
response = self.cluster.list_running_etls()
self.assertEqual(mock_response, response)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET, path=URL_PATH_ETL, res_model=List[ETLInfo]
)
| aistore-master | python/tests/unit/sdk/test_cluster.py |
import unittest
from unittest.mock import Mock, patch, mock_open
from requests import Response
from requests.structures import CaseInsensitiveDict
from aistore.sdk.ais_source import AISSource
from aistore.sdk.const import (
HTTP_METHOD_HEAD,
DEFAULT_CHUNK_SIZE,
HTTP_METHOD_GET,
QPARAM_ARCHPATH,
QPARAM_ETL_NAME,
HTTP_METHOD_PUT,
HTTP_METHOD_DELETE,
HEADER_CONTENT_LENGTH,
AIS_CHECKSUM_VALUE,
AIS_CHECKSUM_TYPE,
AIS_ACCESS_TIME,
AIS_VERSION,
AIS_CUSTOM_MD,
HTTP_METHOD_POST,
ACT_PROMOTE,
URL_PATH_OBJECTS,
)
from aistore.sdk.object import Object
from aistore.sdk.object_reader import ObjectReader
from aistore.sdk.types import ActionMsg, PromoteAPIArgs
BCK_NAME = "bucket_name"
OBJ_NAME = "object_name"
REQUEST_PATH = f"{URL_PATH_OBJECTS}/{BCK_NAME}/{OBJ_NAME}"
# pylint: disable=unused-variable
class TestObject(unittest.TestCase):
def setUp(self) -> None:
self.mock_client = Mock()
self.mock_bucket = Mock()
self.mock_bucket.client = self.mock_client
self.mock_bucket.name = BCK_NAME
self.mock_writer = Mock()
self.mock_bucket.qparam = {}
self.expected_params = {}
self.object = Object(self.mock_bucket, OBJ_NAME)
def test_properties(self):
self.assertEqual(self.mock_bucket, self.object.bucket)
self.assertEqual(OBJ_NAME, self.object.name)
def test_ais_source(self):
self.assertIsInstance(self.object, AISSource)
def test_head(self):
self.object.head()
self.mock_client.request.assert_called_with(
HTTP_METHOD_HEAD,
path=REQUEST_PATH,
params=self.expected_params,
)
def test_get_default_params(self):
self.expected_params[QPARAM_ARCHPATH] = ""
self.get_exec_assert()
def test_get(self):
archpath_param = "archpath"
etl_name = "etl"
self.expected_params[QPARAM_ARCHPATH] = archpath_param
self.expected_params[QPARAM_ETL_NAME] = etl_name
self.get_exec_assert(
archpath=archpath_param,
chunk_size=3,
etl_name=etl_name,
writer=self.mock_writer,
)
def get_exec_assert(self, **kwargs):
content = b"123456789"
content_length = 9
ais_check_val = "xyz"
ais_check_type = "md5"
ais_atime = "time string"
ais_version = "3"
custom_metadata_dict = {"key1": "val1", "key2": "val2"}
custom_metadata = ", ".join(
["=".join(kv) for kv in custom_metadata_dict.items()]
)
resp_headers = CaseInsensitiveDict(
{
HEADER_CONTENT_LENGTH: content_length,
AIS_CHECKSUM_VALUE: ais_check_val,
AIS_CHECKSUM_TYPE: ais_check_type,
AIS_ACCESS_TIME: ais_atime,
AIS_VERSION: ais_version,
AIS_CUSTOM_MD: custom_metadata,
}
)
mock_response = Mock(Response)
mock_response.headers = resp_headers
mock_response.iter_content.return_value = content
mock_response.raw = content
expected_obj = ObjectReader(
response_headers=resp_headers,
stream=mock_response,
)
self.mock_client.request.return_value = mock_response
res = self.object.get(**kwargs)
self.assertEqual(expected_obj.raw(), res.raw())
self.assertEqual(content_length, res.attributes.size)
self.assertEqual(ais_check_type, res.attributes.checksum_type)
self.assertEqual(ais_check_val, res.attributes.checksum_value)
self.assertEqual(ais_atime, res.attributes.access_time)
self.assertEqual(ais_version, res.attributes.obj_version)
self.assertEqual(custom_metadata_dict, res.attributes.custom_metadata)
self.mock_client.request.assert_called_with(
HTTP_METHOD_GET,
path=REQUEST_PATH,
params=self.expected_params,
stream=True,
)
# Use the object reader iterator to call the stream with the chunk size
for _ in res:
continue
mock_response.iter_content.assert_called_with(
chunk_size=kwargs.get("chunk_size", DEFAULT_CHUNK_SIZE)
)
if "writer" in kwargs:
self.mock_writer.writelines.assert_called_with(res)
@patch("aistore.sdk.object.Object.get_url")
def test_list_urls(self, mock_get_url):
object_url = "single-object-url"
mock_get_url.return_value = object_url
etl_name = "test-etl"
res = self.object.list_urls(etl_name=etl_name)
self.assertEqual([object_url], list(res))
mock_get_url.assert_called_with(etl_name=etl_name)
def test_get_url(self):
expected_res = "full url"
archpath = "arch"
etl = "test-etl"
self.mock_client.get_full_url.return_value = expected_res
res = self.object.get_url(archpath=archpath, etl_name=etl)
self.assertEqual(expected_res, res)
self.mock_client.get_full_url.assert_called_with(
REQUEST_PATH, {QPARAM_ARCHPATH: archpath, QPARAM_ETL_NAME: etl}
)
@patch("pathlib.Path.is_file")
@patch("pathlib.Path.exists")
def test_put_file(self, mock_exists, mock_is_file):
mock_exists.return_value = True
mock_is_file.return_value = True
path = "any/filepath"
data = b"bytes in the file"
with patch("builtins.open", mock_open(read_data=data)):
self.object.put_file(path)
self.mock_client.request.assert_called_with(
HTTP_METHOD_PUT,
path=REQUEST_PATH,
params=self.expected_params,
data=data,
)
def test_put_content(self):
content = b"user-supplied-bytes"
self.object.put_content(content)
self.mock_client.request.assert_called_with(
HTTP_METHOD_PUT,
path=REQUEST_PATH,
params=self.expected_params,
data=content,
)
def test_promote_default_args(self):
filename = "promoted file"
expected_value = PromoteAPIArgs(source_path=filename, object_name=OBJ_NAME)
self.promote_exec_assert(filename, expected_value)
def test_promote(self):
filename = "promoted file"
target_id = "target node"
recursive = True
overwrite_dest = True
delete_source = True
src_not_file_share = True
expected_value = PromoteAPIArgs(
source_path=filename,
object_name=OBJ_NAME,
target_id=target_id,
recursive=recursive,
overwrite_dest=overwrite_dest,
delete_source=delete_source,
src_not_file_share=src_not_file_share,
)
self.promote_exec_assert(
filename,
expected_value,
target_id=target_id,
recursive=recursive,
overwrite_dest=overwrite_dest,
delete_source=delete_source,
src_not_file_share=src_not_file_share,
)
def promote_exec_assert(self, filename, expected_value, **kwargs):
request_path = f"{URL_PATH_OBJECTS}/{BCK_NAME}"
expected_json = ActionMsg(
action=ACT_PROMOTE, name=filename, value=expected_value.as_dict()
).dict()
self.object.promote(filename, **kwargs)
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST,
path=request_path,
params=self.expected_params,
json=expected_json,
)
def test_delete(self):
self.object.delete()
self.mock_client.request.assert_called_with(
HTTP_METHOD_DELETE, path=REQUEST_PATH, params=self.expected_params
)
| aistore-master | python/tests/unit/sdk/test_object.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
import unittest
from aistore.sdk import Client
from aistore.sdk.cluster import Cluster
from aistore.sdk.etl import Etl
from aistore.sdk.request_client import RequestClient
from aistore.sdk.types import Namespace
from aistore.sdk.job import Job
class TestClient(unittest.TestCase): # pylint: disable=unused-variable
def setUp(self) -> None:
self.endpoint = "https://aistore-endpoint"
self.client = Client(self.endpoint)
def test_bucket(self):
bck_name = "bucket_123"
provider = "bucketProvider"
namespace = Namespace(uuid="id", name="namespace")
bucket = self.client.bucket(bck_name, provider, namespace)
self.assertEqual(self.endpoint, bucket.client.endpoint)
self.assertIsInstance(bucket.client, RequestClient)
self.assertEqual(bck_name, bucket.name)
self.assertEqual(provider, bucket.provider)
self.assertEqual(namespace, bucket.namespace)
def test_cluster(self):
res = self.client.cluster()
self.assertEqual(self.endpoint, res.client.endpoint)
self.assertIsInstance(res.client, RequestClient)
self.assertIsInstance(res, Cluster)
def test_job(self):
job_id = "1234"
job_kind = "test kind"
res = self.client.job(job_id, job_kind)
self.assertIsInstance(res, Job)
self.assertEqual(job_id, res.job_id)
self.assertEqual(job_kind, res.job_kind)
def test_etl(self):
etl_name = "my-etl"
res = self.client.etl(etl_name)
self.assertIsInstance(res, Etl)
self.assertEqual(etl_name, res.name)
| aistore-master | python/tests/unit/sdk/test_client.py |
import unittest
from unittest.mock import patch, Mock
from requests import Response
from aistore.sdk.const import (
JSON_CONTENT_TYPE,
HEADER_USER_AGENT,
USER_AGENT_BASE,
HEADER_CONTENT_TYPE,
)
from aistore.sdk.request_client import RequestClient
from aistore.version import __version__ as sdk_version
class TestRequestClient(unittest.TestCase): # pylint: disable=unused-variable
def setUp(self) -> None:
self.endpoint = "https://aistore-endpoint"
self.mock_session = Mock()
with patch("aistore.sdk.request_client.requests") as mock_requests_lib:
mock_requests_lib.session.return_value = self.mock_session
self.request_client = RequestClient(self.endpoint)
self.request_headers = {
HEADER_CONTENT_TYPE: JSON_CONTENT_TYPE,
HEADER_USER_AGENT: f"{USER_AGENT_BASE}/{sdk_version}",
}
def test_properties(self):
self.assertEqual(self.endpoint + "/v1", self.request_client.base_url)
self.assertEqual(self.endpoint, self.request_client.endpoint)
self.assertEqual(self.mock_session, self.request_client.session)
@patch("aistore.sdk.request_client.RequestClient.request")
@patch("aistore.sdk.request_client.decode_response")
def test_request_deserialize(self, mock_decode, mock_request):
method = "method"
path = "path"
decoded_value = "test value"
custom_kw = "arg"
mock_decode.return_value = decoded_value
mock_response = Mock(Response)
mock_request.return_value = mock_response
res = self.request_client.request_deserialize(
method, path, str, keyword=custom_kw
)
self.assertEqual(decoded_value, res)
mock_request.assert_called_with(method, path, keyword=custom_kw)
mock_decode.assert_called_with(str, mock_response)
def test_request(self):
method = "method"
path = "path"
req_url = f"{self.request_client.base_url}/{path}"
mock_response = Mock()
mock_response.status_code = 200
self.mock_session.request.return_value = mock_response
res = self.request_client.request("method", "path", keyword="arg")
self.mock_session.request.assert_called_with(
method, req_url, headers=self.request_headers, keyword="arg"
)
self.assertEqual(mock_response, res)
for response_code in [199, 300]:
with patch("aistore.sdk.request_client.handle_errors") as mock_handle_err:
mock_response.status_code = response_code
self.mock_session.request.return_value = mock_response
res = self.request_client.request("method", "path", keyword="arg")
self.mock_session.request.assert_called_with(
method, req_url, headers=self.request_headers, keyword="arg"
)
self.assertEqual(mock_response, res)
mock_handle_err.assert_called_once()
def test_get_full_url(self):
path = "/testpath/to_obj"
params = {"p1key": "p1val", "p2key": "p2val"}
res = self.request_client.get_full_url(path, params)
self.assertEqual(
"https://aistore-endpoint/v1/testpath/to_obj?p1key=p1val&p2key=p2val", res
)
| aistore-master | python/tests/unit/sdk/test_request_client.py |
import unittest
from typing import Dict
from unittest.mock import Mock, patch, mock_open, call
from aistore.sdk.const import (
URL_PATH_DSORT,
HTTP_METHOD_POST,
DSORT_ABORT,
HTTP_METHOD_DELETE,
DSORT_UUID,
HTTP_METHOD_GET,
)
from aistore.sdk.dsort import Dsort
from aistore.sdk.dsort_types import DsortMetrics, JobInfo
from aistore.sdk.errors import Timeout
from aistore.sdk.utils import probing_frequency
class TestDsort(unittest.TestCase):
def setUp(self) -> None:
self.mock_client = Mock()
self.dsort_id = "123"
self.dsort = Dsort(client=self.mock_client, dsort_id=self.dsort_id)
@staticmethod
def _get_mock_job_info(finished, aborted=False):
mock_metrics = Mock(DsortMetrics)
mock_metrics.aborted = aborted
mock_metrics.shard_creation = Mock(finished=finished)
mock_job_info = Mock(JobInfo)
mock_job_info.metrics = mock_metrics
return mock_job_info
def test_properties(self):
self.assertEqual(self.dsort_id, self.dsort.dsort_id)
@patch("aistore.sdk.dsort.validate_file")
@patch("aistore.sdk.dsort.json")
# pylint: disable=unused-argument
def test_start(self, mock_json, mock_validate_file):
new_id = "456"
spec = {"test_spec_entry": "test_spec_value"}
mock_request_return_val = Mock(text=new_id)
mock_json.load.return_value = spec
self.mock_client.request.return_value = mock_request_return_val
with patch("builtins.open", mock_open()):
res = self.dsort.start("spec_file")
self.assertEqual(new_id, res)
self.assertEqual(new_id, self.dsort.dsort_id)
self.mock_client.request.assert_called_with(
HTTP_METHOD_POST, path=URL_PATH_DSORT, json=spec
)
def test_abort(self):
self.dsort.abort()
self.mock_client.request.assert_called_with(
HTTP_METHOD_DELETE,
path=f"{URL_PATH_DSORT}/{DSORT_ABORT}",
params={DSORT_UUID: [self.dsort_id]},
)
def test_get_job_info(self):
mock_job_info = {"id_1": Mock(JobInfo)}
self.mock_client.request_deserialize.return_value = mock_job_info
res = self.dsort.get_job_info()
self.assertEqual(mock_job_info, res)
self.mock_client.request_deserialize.assert_called_with(
HTTP_METHOD_GET,
path=URL_PATH_DSORT,
res_model=Dict[str, JobInfo],
params={DSORT_UUID: [self.dsort_id]},
)
@patch("aistore.sdk.dsort.time.sleep")
@patch("aistore.sdk.dsort.Dsort.get_job_info")
def test_wait_default_timeout(self, mock_get_job_info, mock_sleep):
timeout = 300
frequency = probing_frequency(timeout)
expected_job_info_calls = [
call(),
call(),
call(),
]
expected_sleep_calls = [call(frequency), call(frequency)]
self._wait_test_helper(
self.dsort,
mock_get_job_info,
mock_sleep,
expected_job_info_calls,
expected_sleep_calls,
)
@patch("aistore.sdk.dsort.time.sleep")
@patch("aistore.sdk.dsort.Dsort.get_job_info")
def test_wait(self, mock_get_job_info, mock_sleep):
timeout = 20
frequency = probing_frequency(timeout)
expected_job_info_calls = [call(), call(), call()]
expected_sleep_calls = [call(frequency), call(frequency)]
self._wait_test_helper(
self.dsort,
mock_get_job_info,
mock_sleep,
expected_job_info_calls,
expected_sleep_calls,
timeout=timeout,
)
@patch("aistore.sdk.dsort.time.sleep")
@patch("aistore.sdk.dsort.Dsort.get_job_info")
# pylint: disable=unused-argument
def test_wait_timeout(self, mock_get_job_info, mock_sleep):
mock_get_job_info.return_value = {
"key": self._get_mock_job_info(finished=False, aborted=False)
}
self.assertRaises(Timeout, self.dsort.wait)
@patch("aistore.sdk.dsort.time.sleep")
@patch("aistore.sdk.dsort.Dsort.get_job_info")
def test_wait_aborted(self, mock_get_job_info, mock_sleep):
timeout = 300
frequency = probing_frequency(timeout)
expected_metrics_calls = [
call(),
call(),
]
expected_sleep_calls = [call(frequency)]
mock_get_job_info.side_effect = [
{"key": self._get_mock_job_info(finished=False)},
{"key": self._get_mock_job_info(finished=False, aborted=True)},
{"key": self._get_mock_job_info(finished=False)},
]
self._wait_exec_assert(
self.dsort,
mock_get_job_info,
mock_sleep,
expected_metrics_calls,
expected_sleep_calls,
)
# pylint: disable=too-many-arguments
def _wait_test_helper(
self,
dsort,
mock_get_job_info,
mock_sleep,
expected_job_info_calls,
expected_sleep_calls,
**kwargs,
):
mock_get_job_info.side_effect = [
{"job_id": self._get_mock_job_info(finished=False)},
{"job_id": self._get_mock_job_info(finished=False)},
{"job_id": self._get_mock_job_info(finished=True)},
]
self._wait_exec_assert(
dsort,
mock_get_job_info,
mock_sleep,
expected_job_info_calls,
expected_sleep_calls,
**kwargs,
)
def _wait_exec_assert(
self,
dsort,
mock_get_job_info,
mock_sleep,
expected_job_info_calls,
expected_sleep_calls,
**kwargs,
):
dsort.wait(**kwargs)
mock_get_job_info.assert_has_calls(expected_job_info_calls)
mock_sleep.assert_has_calls(expected_sleep_calls)
self.assertEqual(len(expected_job_info_calls), mock_get_job_info.call_count)
self.assertEqual(len(expected_sleep_calls), mock_sleep.call_count)
| aistore-master | python/tests/unit/sdk/test_dsort.py |
import unittest
from unittest.mock import patch
from aistore.sdk.multiobj import ObjectTemplate
# pylint: disable=unused-variable
class TestObjectTemplate(unittest.TestCase):
def setUp(self):
self.template_str = "prefix-{1..6..2}-gap-{12..14..1}-suffix"
self.obj_range_template = ObjectTemplate(self.template_str)
def test_get_value(self):
self.assertEqual(
{"template": self.template_str}, self.obj_range_template.get_value()
)
@patch("aistore.sdk.multiobj.object_template.utils.expand_braces")
def test_iter(self, mock_expand):
expansion_result = ["mock expansion result", "result2"]
mock_expand.return_value.__next__.side_effect = expansion_result
self.assertEqual(
expansion_result,
list(self.obj_range_template),
)
| aistore-master | python/tests/unit/sdk/multiobj/test_object_template.py |
aistore-master | python/tests/unit/sdk/multiobj/__init__.py |
|
import unittest
from aistore.sdk.errors import InvalidObjectRangeIndex
from aistore.sdk.multiobj import ObjectRange
from tests.unit.sdk.test_utils import test_cases
# pylint: disable=unused-variable
class TestObjectRange(unittest.TestCase):
def setUp(self):
self.prefix = "prefix-"
self.suffix = "-suffix"
self.min_index = 4
self.max_index = 9
self.pad_width = 3
self.step = 2
def test_object_range_defaults(self):
object_range = ObjectRange(
prefix=self.prefix, min_index=self.min_index, max_index=self.max_index
)
self.assertEqual("prefix-{4..9..1}", str(object_range))
def test_object_range(self):
object_range = ObjectRange(
prefix=self.prefix,
min_index=self.min_index,
max_index=self.max_index,
pad_width=self.pad_width,
step=self.step,
suffix=self.suffix,
)
self.assertEqual("prefix-{004..009..2}-suffix", str(object_range))
def test_object_range_prefix_only(self):
object_range = ObjectRange(prefix=self.prefix)
self.assertEqual("prefix-", str(object_range))
def test_object_range_invalid_suffix(self):
with self.assertRaises(ValueError):
ObjectRange(prefix=self.prefix, suffix="anything")
@test_cases(
(1, 25, 0, True),
(25, 1, 0, False),
(20, 25, 1, False),
(None, 25, 1, False),
(0, None, 1, False),
(20, 25, 2, True),
(20, 25, 3, True),
)
def test_validate_indices(self, test_case):
min_index, max_index, pad_width, valid = test_case
if valid:
ObjectRange(
prefix=self.prefix,
min_index=min_index,
max_index=max_index,
pad_width=pad_width,
)
return
with self.assertRaises(InvalidObjectRangeIndex):
ObjectRange(
prefix=self.prefix,
min_index=min_index,
max_index=max_index,
pad_width=pad_width,
)
def test_iter(self):
object_range = ObjectRange(
prefix=self.prefix,
min_index=self.min_index,
max_index=self.max_index,
pad_width=self.pad_width,
step=self.step,
suffix=self.suffix,
)
expected_range = ["prefix-004-suffix", "prefix-006-suffix", "prefix-008-suffix"]
self.assertEqual(expected_range, list(object_range))
| aistore-master | python/tests/unit/sdk/multiobj/test_object_range.py |
import unittest
from unittest.mock import Mock, patch, call
from aistore.sdk import Bucket
from aistore.sdk.const import (
HTTP_METHOD_DELETE,
ACT_DELETE_OBJECTS,
ACT_EVICT_OBJECTS,
HTTP_METHOD_POST,
ACT_PREFETCH_OBJECTS,
ACT_COPY_OBJECTS,
ACT_TRANSFORM_OBJECTS,
ACT_ARCHIVE_OBJECTS,
PROVIDER_AMAZON,
HTTP_METHOD_PUT,
)
from aistore.sdk.etl_const import DEFAULT_ETL_TIMEOUT
from aistore.sdk.multiobj import ObjectGroup, ObjectRange
from aistore.sdk.types import Namespace, BucketModel, ArchiveMultiObj
# pylint: disable=unused-variable,too-many-instance-attributes
class TestObjectGroup(unittest.TestCase):
def setUp(self) -> None:
self.mock_bck = Mock()
self.mock_bck.name = "mock-bucket"
self.mock_bck.provider = "mock-bck-provider"
self.mock_response_text = "Response Text"
mock_response = Mock()
mock_response.text = self.mock_response_text
self.mock_bck.make_request.return_value = mock_response
self.mock_bck_model = BucketModel(
name=self.mock_bck.name, provider=self.mock_bck.provider
)
self.mock_bck.as_model.return_value = self.mock_bck_model
namespace = Namespace(name="ns-name", uuid="ns-id")
provider = "any provider"
self.dest_bucket = Bucket(
name="to-bucket", namespace=namespace, provider=provider
)
self.obj_names = ["obj-1", "obj-2"]
self.object_group = ObjectGroup(self.mock_bck, obj_names=self.obj_names)
self.expected_value = {"objnames": self.obj_names}
def test_object_group_parameters(self):
obj_names = ["list", "of", "names"]
obj_range = ObjectRange(prefix="prefix-")
obj_template = "prefix-{0..3}"
with self.assertRaises(ValueError):
ObjectGroup(
self.mock_bck,
obj_names=obj_names,
obj_range=obj_range,
)
with self.assertRaises(ValueError):
ObjectGroup(
self.mock_bck,
obj_names=obj_names,
obj_template=obj_template,
)
with self.assertRaises(ValueError):
ObjectGroup(
self.mock_bck,
obj_range=obj_range,
obj_template=obj_template,
)
def object_group_test_helper(
self, object_group_function, http_method, action, expected_value, **kwargs
):
resp_text = object_group_function(**kwargs)
self.assertEqual(self.mock_response_text, resp_text)
self.mock_bck.make_request.assert_called_with(
http_method,
action,
value=expected_value,
)
def test_delete(self):
self.object_group_test_helper(
self.object_group.delete,
HTTP_METHOD_DELETE,
ACT_DELETE_OBJECTS,
self.expected_value,
)
def test_evict(self):
self.object_group_test_helper(
self.object_group.evict,
HTTP_METHOD_DELETE,
ACT_EVICT_OBJECTS,
self.expected_value,
)
def test_prefetch(self):
self.object_group_test_helper(
self.object_group.prefetch,
HTTP_METHOD_POST,
ACT_PREFETCH_OBJECTS,
self.expected_value,
)
def test_copy(self):
self.expected_value["prefix"] = ""
self.expected_value["prepend"] = ""
self.expected_value["dry_run"] = False
self.expected_value["force"] = False
self.expected_value["tobck"] = self.dest_bucket.as_model()
self.expected_value["coer"] = False
# Test default args
self.object_group_test_helper(
self.object_group.copy,
HTTP_METHOD_POST,
ACT_COPY_OBJECTS,
self.expected_value,
to_bck=self.dest_bucket,
)
# Test provided optional args
prepend_val = "new_prefix-"
self.expected_value["prepend"] = prepend_val
self.expected_value["force"] = True
self.expected_value["dry_run"] = True
self.expected_value["coer"] = True
self.object_group_test_helper(
self.object_group.copy,
HTTP_METHOD_POST,
ACT_COPY_OBJECTS,
self.expected_value,
to_bck=self.dest_bucket,
prepend=prepend_val,
force=True,
dry_run=True,
continue_on_error=True,
)
@patch("aistore.sdk.multiobj.object_group.logging")
def test_copy_dry_run(self, mock_logging):
mock_logger = Mock()
mock_logging.getLogger.return_value = mock_logger
self.object_group.copy(to_bck=self.dest_bucket, dry_run=True)
mock_logger.info.assert_called()
def test_transform(self):
etl_name = "any active etl"
self.expected_value["prefix"] = ""
self.expected_value["prepend"] = ""
self.expected_value["dry_run"] = False
self.expected_value["force"] = False
self.expected_value["id"] = etl_name
self.expected_value["request_timeout"] = DEFAULT_ETL_TIMEOUT
self.expected_value["tobck"] = self.dest_bucket.as_model()
self.expected_value["coer"] = False
# Test default args
self.object_group_test_helper(
self.object_group.transform,
HTTP_METHOD_POST,
ACT_TRANSFORM_OBJECTS,
self.expected_value,
to_bck=self.dest_bucket,
etl_name=etl_name,
)
# Test provided optional args
timeout = "30s"
prepend_val = "new_prefix-"
self.expected_value["coer"] = True
self.expected_value["prepend"] = prepend_val
self.expected_value["request_timeout"] = timeout
self.expected_value["dry_run"] = True
self.expected_value["force"] = True
self.object_group_test_helper(
self.object_group.transform,
HTTP_METHOD_POST,
ACT_TRANSFORM_OBJECTS,
self.expected_value,
to_bck=self.dest_bucket,
prepend=prepend_val,
etl_name=etl_name,
timeout=timeout,
dry_run=True,
force=True,
continue_on_error=True,
)
@patch("aistore.sdk.multiobj.object_group.logging")
def test_transform_dry_run(self, mock_logging):
mock_logger = Mock()
mock_logging.getLogger.return_value = mock_logger
self.object_group.transform(
to_bck=self.dest_bucket, etl_name="any etl", dry_run=True
)
mock_logger.info.assert_called()
def test_list_names(self):
self.assertEqual(self.obj_names, self.object_group.list_names())
def test_archive_default_params(self):
archive_name = "test-arch"
expected_value = ArchiveMultiObj(
object_selection=self.expected_value,
archive_name=archive_name,
to_bck=self.mock_bck_model,
).as_dict()
self.object_group_test_helper(
self.object_group.archive,
HTTP_METHOD_PUT,
ACT_ARCHIVE_OBJECTS,
expected_value=expected_value,
archive_name=archive_name,
)
def test_archive(self):
archive_name = "test-arch"
namespace = Namespace(name="ns-name", uuid="ns-id")
to_bck = Bucket(
name="dest-bck-name", namespace=namespace, provider=PROVIDER_AMAZON
)
mime = "text"
include_source = True
allow_append = True
continue_on_err = True
expected_value = ArchiveMultiObj(
object_selection=self.expected_value,
archive_name=archive_name,
to_bck=to_bck.as_model(),
mime=mime,
include_source_name=include_source,
allow_append=allow_append,
continue_on_err=continue_on_err,
).as_dict()
self.object_group_test_helper(
self.object_group.archive,
HTTP_METHOD_PUT,
ACT_ARCHIVE_OBJECTS,
expected_value=expected_value,
archive_name=archive_name,
to_bck=to_bck,
mime=mime,
include_source_name=include_source,
allow_append=allow_append,
continue_on_err=continue_on_err,
)
def test_list_urls(self):
etl_name = "myetl"
expected_obj_calls = []
# Should create an object reference and get url for every object returned by listing
for name in self.obj_names:
expected_obj_calls.append(call(name))
expected_obj_calls.append(call().get_url(etl_name=etl_name))
list(self.object_group.list_urls(etl_name=etl_name))
self.mock_bck.object.assert_has_calls(expected_obj_calls)
| aistore-master | python/tests/unit/sdk/multiobj/test_object_group.py |
import unittest
from aistore.sdk.multiobj import ObjectNames
# pylint: disable=unused-variable
class TestObjectNames(unittest.TestCase):
def setUp(self):
self.name_list = ["obj-1", "obj-2"]
self.obj_names = ObjectNames(self.name_list)
def test_get_value(self):
self.assertEqual({"objnames": self.name_list}, self.obj_names.get_value())
def test_iter(self):
self.assertEqual(self.name_list, list(self.obj_names))
| aistore-master | python/tests/unit/sdk/multiobj/test_object_names.py |
aistore-master | python/tests/unit/pytorch/__init__.py |
|
import unittest
from unittest.mock import Mock
from torch.utils.data import IterDataPipe
from aistore.pytorch.aisio import AISSourceLister
from aistore.sdk.ais_source import AISSource
class TestDataPipes(unittest.TestCase):
def test_source_lister(self):
ais_source_1 = Mock(AISSource)
ais_source_2 = Mock(AISSource)
source_1_urls = ["url1", "url2"]
source_2_urls = ["url3", "url4"]
ais_source_1.list_urls.return_value = source_1_urls
ais_source_2.list_urls.return_value = source_2_urls
expected_res = source_1_urls + source_2_urls
prefix = "obj-prefix-"
etl_name = "test-etl"
source_lister = AISSourceLister(
[ais_source_1, ais_source_2], prefix=prefix, etl_name=etl_name
)
self.assertIsInstance(source_lister, IterDataPipe)
self.assertEqual(expected_res, list(source_lister))
ais_source_1.list_urls.assert_called_with(prefix=prefix, etl_name=etl_name)
ais_source_2.list_urls.assert_called_with(prefix=prefix, etl_name=etl_name)
| aistore-master | python/tests/unit/pytorch/test_datapipes.py |
import os
from aistore.sdk.const import PROVIDER_AIS
CLUSTER_ENDPOINT = os.environ.get("AIS_ENDPOINT", "http://localhost:8080")
REMOTE_BUCKET = os.environ.get("BUCKET", "")
REMOTE_SET = REMOTE_BUCKET != "" and not REMOTE_BUCKET.startswith(PROVIDER_AIS + ":")
TEST_TIMEOUT = 30
TEST_TIMEOUT_LONG = 120
OBJECT_COUNT = 10
| aistore-master | python/tests/integration/__init__.py |
#
# Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
#
# pylint: disable=missing-module-docstring,import-outside-toplevel,unused-import
from tests.botocore_common import BotocoreBaseTest
from tests.integration import CLUSTER_ENDPOINT
# pylint: disable=unused-variable
class IntegrationTestCase(BotocoreBaseTest):
"""
Run botocore against a real aistore, with
our patch in place, expecting it to handle
redirects for us without ClientErrors.
"""
__test__ = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from aistore.botocore_patch import botocore
self.use_moto = False
self.endpoint_url = CLUSTER_ENDPOINT + "/s3"
self.redirect_errors_expected = False
| aistore-master | python/tests/integration/botocore_patch/test_botocore.py |
aistore-master | python/tests/integration/botocore_patch/__init__.py |
|
#
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
import unittest
from pathlib import Path
import requests
from aistore.sdk import ListObjectFlag
from aistore.sdk.const import PROVIDER_AIS, UTF_ENCODING
from aistore.sdk.errors import InvalidBckProvider, AISError, ErrBckNotFound
from tests.integration.sdk.remote_enabled_test import RemoteEnabledTest
from tests.unit.sdk.test_utils import test_cases
from tests.utils import random_string, cleanup_local
from tests.integration import REMOTE_BUCKET, OBJECT_COUNT
# If remote bucket is not set, skip all cloud-related tests
REMOTE_SET = REMOTE_BUCKET != "" and not REMOTE_BUCKET.startswith(PROVIDER_AIS + ":")
INNER_DIR = "directory"
TOP_LEVEL_FILES = {
"top_level_file.txt": b"test data to verify",
"other_top_level_file.txt": b"other file test data to verify",
}
LOWER_LEVEL_FILES = {"lower_level_file.txt": b"data in inner file"}
def _create_files(folder, file_dict):
for filename, data in file_dict.items():
lower_file = folder.joinpath(filename)
with open(lower_file, "wb") as file:
file.write(data)
# pylint: disable=unused-variable, too-many-public-methods
class TestBucketOps(RemoteEnabledTest):
def setUp(self) -> None:
super().setUp()
self.local_test_files = (
Path().absolute().joinpath("bucket-ops-test-" + random_string(8))
)
def tearDown(self) -> None:
super().tearDown()
cleanup_local(str(self.local_test_files))
def _create_put_files_structure(self, top_level_files, lower_level_files):
self.local_test_files.mkdir(exist_ok=True)
_create_files(self.local_test_files, top_level_files)
inner_dir = self.local_test_files.joinpath(INNER_DIR)
inner_dir.mkdir()
_create_files(inner_dir, lower_level_files)
def test_bucket(self):
new_bck_name = random_string(10)
self._create_bucket(new_bck_name)
res = self.client.cluster().list_buckets()
bucket_names = {bck.name for bck in res}
self.assertIn(new_bck_name, bucket_names)
def test_bucket_invalid_name(self):
with self.assertRaises(ErrBckNotFound):
self.client.bucket("INVALID_BCK_NAME").list_objects()
def test_bucket_invalid_aws_name(self):
with self.assertRaises(AISError):
self.client.bucket("INVALID_BCK_NAME", "aws").list_objects()
def test_head(self):
try:
self.bucket.head()
except requests.exceptions.HTTPError as err:
self.assertEqual(err.response.status_code, 404)
def test_rename(self):
from_bck_name = self.bck_name + "from"
to_bck_name = self.bck_name + "to"
from_bck = self._create_bucket(from_bck_name)
self.client.cluster().list_buckets()
self.assertEqual(from_bck_name, from_bck.name)
job_id = from_bck.rename(to_bck_name=to_bck_name)
self.assertNotEqual(job_id, "")
# wait for rename to finish
self.client.job(job_id).wait()
# new bucket should be created and accessible
to_bck = self.client.bucket(to_bck_name)
to_bck.head()
self.assertEqual(to_bck_name, to_bck.name)
# old bucket should be inaccessible
try:
from_bck.head()
except requests.exceptions.HTTPError as err:
self.assertEqual(err.response.status_code, 404)
def test_copy(self):
from_bck_name = self.bck_name + "from"
to_bck_name = self.bck_name + "to"
from_bck = self._create_bucket(from_bck_name)
to_bck = self._create_bucket(to_bck_name)
prefix = "prefix-"
new_prefix = "new-"
content = b"test"
expected_name = prefix + "-obj"
from_bck.object(expected_name).put_content(content)
from_bck.object("notprefix-obj").put_content(content)
job_id = from_bck.copy(to_bck, prefix_filter=prefix, prepend=new_prefix)
self.assertNotEqual(job_id, "")
self.client.job(job_id).wait()
entries = to_bck.list_all_objects()
self.assertEqual(1, len(entries))
self.assertEqual(new_prefix + expected_name, entries[0].name)
@unittest.skipIf(
not REMOTE_SET,
"Remote bucket is not set",
)
def test_evict(self):
self._create_objects()
objects = self.bucket.list_objects(
props="name,cached", prefix=self.obj_prefix
).entries
self._verify_objects_cache_status(objects, True)
self.bucket.evict(keep_md=True)
objects = self.bucket.list_objects(
props="name,cached", prefix=self.obj_prefix
).entries
self.assertEqual(OBJECT_COUNT, len(objects))
self._verify_objects_cache_status(objects, False)
def test_evict_local(self):
# If the bucket is local, eviction should fail
if not REMOTE_SET:
with self.assertRaises(InvalidBckProvider):
self.bucket.evict()
return
# Create a local bucket to test with if self.bucket is a cloud bucket
local_bucket = self._create_bucket(self.bck_name + "-local")
with self.assertRaises(InvalidBckProvider):
local_bucket.evict()
def _verify_objects_cache_status(self, objects, expected_status):
self.assertTrue(len(objects) > 0)
for obj in objects:
self.assertTrue(obj.is_ok())
self.assertEqual(expected_status, obj.is_cached())
def test_put_files_invalid(self):
with self.assertRaises(ValueError):
self.bucket.put_files("non-existent-dir")
self.local_test_files.mkdir()
filename = self.local_test_files.joinpath("file_not_dir")
with open(filename, "w", encoding=UTF_ENCODING):
pass
with self.assertRaises(ValueError):
self.bucket.put_files(filename)
def _verify_obj_res(self, expected_res_dict, expect_err=False):
if expect_err:
for obj_name in expected_res_dict:
with self.assertRaises(AISError):
self.bucket.object(self.obj_prefix + obj_name).get()
else:
for obj_name, expected_data in expected_res_dict.items():
res = self.bucket.object(self.obj_prefix + obj_name).get()
self.assertEqual(expected_data, res.read_all())
def test_put_files_default_args(self):
self._create_put_files_structure(TOP_LEVEL_FILES, LOWER_LEVEL_FILES)
self.bucket.put_files(self.local_test_files, prepend=self.obj_prefix)
self._verify_obj_res(TOP_LEVEL_FILES)
self._verify_obj_res(LOWER_LEVEL_FILES, expect_err=True)
def test_put_files_recursive(self):
self._create_put_files_structure(TOP_LEVEL_FILES, LOWER_LEVEL_FILES)
self.bucket.put_files(
self.local_test_files, recursive=True, prepend=self.obj_prefix
)
self._verify_obj_res(TOP_LEVEL_FILES)
# Lower level file object names will include their relative path by default
expected_lower_res = {}
for obj_name, expected_data in LOWER_LEVEL_FILES.items():
obj_name = str(Path(INNER_DIR).joinpath(obj_name))
expected_lower_res[obj_name] = expected_data
self._verify_obj_res(expected_lower_res)
def test_put_files_recursive_basename(self):
self._create_put_files_structure(TOP_LEVEL_FILES, LOWER_LEVEL_FILES)
self.bucket.put_files(
self.local_test_files,
recursive=True,
basename=True,
prepend=self.obj_prefix,
)
# Expect all objects to be prefixed by custom_name and with no relative path in the name due to basename opt
joined_file_data = {**TOP_LEVEL_FILES, **LOWER_LEVEL_FILES}
expected_res = {}
for obj_name, expected_data in joined_file_data.items():
expected_res[obj_name] = expected_data
self._verify_obj_res(expected_res)
def test_put_files_filtered(self):
self.local_test_files.mkdir()
included_filename = "prefix-file.txt"
excluded_by_pattern = "extra_top_file.py"
excluded_by_prefix = "non-prefix-file.txt"
for file in [included_filename, excluded_by_pattern, excluded_by_prefix]:
with open(self.local_test_files.joinpath(file), "wb"):
pass
self.bucket.put_files(
self.local_test_files,
prepend=self.obj_prefix,
prefix_filter="prefix-",
pattern="*.txt",
)
self.bucket.object(self.obj_prefix + included_filename).get()
with self.assertRaises(AISError):
self.bucket.object(excluded_by_pattern).get()
with self.assertRaises(AISError):
self.bucket.object(excluded_by_prefix).get()
def test_put_files_dry_run(self):
self._create_put_files_structure(TOP_LEVEL_FILES, LOWER_LEVEL_FILES)
self.bucket.put_files(
self.local_test_files, dry_run=True, prepend=self.obj_prefix
)
# Verify the put files call does not actually create objects
self._verify_obj_res(TOP_LEVEL_FILES, expect_err=True)
@test_cases((None, OBJECT_COUNT), (7, 7), (OBJECT_COUNT * 2, OBJECT_COUNT))
def test_list_objects(self, test_case):
page_size, response_size = test_case
# Only create the bucket entries on the first subtest run
if len(self.bucket.list_all_objects(prefix=self.obj_prefix)) == 0:
self._create_objects()
if page_size:
resp = self.bucket.list_objects(page_size=page_size, prefix=self.obj_prefix)
else:
resp = self.bucket.list_objects(prefix=self.obj_prefix)
self.assertEqual(response_size, len(resp.entries))
def test_list_all_objects(self):
short_page_len = 17
self._create_objects()
objects = self.bucket.list_all_objects(prefix=self.obj_prefix)
self.assertEqual(OBJECT_COUNT, len(objects))
objects = self.bucket.list_all_objects(
page_size=short_page_len, prefix=self.obj_prefix
)
self.assertEqual(OBJECT_COUNT, len(objects))
def test_list_object_iter(self):
obj_names = set(self._create_objects())
# Empty iterator if there are no objects matching the prefix.
obj_iter = self.bucket.list_objects_iter(prefix="invalid-obj-")
self.assertEqual(0, len(list(obj_iter)))
# Read all `bucket_size` objects by prefix.
obj_iter = self.bucket.list_objects_iter(page_size=10, prefix=self.obj_prefix)
for obj in obj_iter:
obj_names.remove(obj.name)
self.assertEqual(0, len(obj_names))
def test_list_object_flags(self):
self._create_objects()
objects = self.bucket.list_all_objects(
flags=[ListObjectFlag.NAME_ONLY, ListObjectFlag.CACHED],
prefix=self.obj_prefix,
)
self.assertEqual(OBJECT_COUNT, len(objects))
for obj in objects:
self.assertEqual(0, obj.size)
objects = self.bucket.list_all_objects(
flags=[ListObjectFlag.NAME_SIZE], prefix=self.obj_prefix
)
self.assertEqual(OBJECT_COUNT, len(objects))
for obj in objects:
self.assertTrue(obj.size > 0)
def test_summary(self):
summ_test_bck = self._create_bucket("summary-test")
# Initially, the bucket should be empty
bucket_summary = summ_test_bck.summary()
self.assertEqual(bucket_summary["ObjCount"]["obj_count_present"], "0")
self.assertEqual(bucket_summary["TotalSize"]["size_all_present_objs"], "0")
self.assertEqual(bucket_summary["TotalSize"]["size_all_remote_objs"], "0")
self.assertEqual(bucket_summary["used_pct"], 0)
summ_test_bck.object("test-object").put_content("test-content")
bucket_summary = summ_test_bck.summary()
# Now, the bucket should have 1 object
self.assertEqual(bucket_summary["ObjCount"]["obj_count_present"], "1")
self.assertNotEqual(bucket_summary["TotalSize"]["size_all_present_objs"], "0")
summ_test_bck.delete()
# Accessing the summary of a deleted bucket should raise an error
with self.assertRaises(ErrBckNotFound):
summ_test_bck.summary()
def test_info(self):
info_test_bck = self._create_bucket("info-test")
# Initially, the bucket should be empty
_, bck_summ = info_test_bck.info(flt_presence=0)
# For an empty bucket, the object count and total size should be zero
self.assertEqual(bck_summ["ObjCount"]["obj_count_present"], "0")
self.assertEqual(bck_summ["TotalSize"]["size_all_present_objs"], "0")
self.assertEqual(bck_summ["TotalSize"]["size_all_remote_objs"], "0")
self.assertEqual(bck_summ["provider"], "ais")
self.assertEqual(bck_summ["name"], "info-test")
# Upload an object to the bucket
info_test_bck.object("test-object").put_content("test-content")
_, bck_summ = info_test_bck.info()
# Now the bucket should have one object and non-zero size
self.assertEqual(bck_summ["ObjCount"]["obj_count_present"], "1")
self.assertNotEqual(bck_summ["TotalSize"]["size_all_present_objs"], "0")
self.assertEqual(bck_summ["TotalSize"]["size_all_remote_objs"], "0")
self.assertEqual(bck_summ["provider"], "ais")
self.assertEqual(bck_summ["name"], "info-test")
info_test_bck.delete()
# Accessing the info of a deleted bucket should raise an error
with self.assertRaises(ErrBckNotFound):
info_test_bck.summary()
if __name__ == "__main__":
unittest.main()
| aistore-master | python/tests/integration/sdk/test_bucket_ops.py |
aistore-master | python/tests/integration/sdk/__init__.py |
|
import io
import json
import shutil
import tarfile
import unittest
from pathlib import Path
from aistore import Client
from tests.integration import CLUSTER_ENDPOINT, TEST_TIMEOUT
from tests.unit.sdk.test_utils import test_cases
from tests.utils import random_string
class TestDsortOps(unittest.TestCase):
def setUp(self) -> None:
self.client = Client(CLUSTER_ENDPOINT)
self.temp_dir = Path("tmp")
try:
self.temp_dir.mkdir()
except FileExistsError:
shutil.rmtree(self.temp_dir)
self.temp_dir.mkdir()
self.buckets = []
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir)
for bucket in self.buckets:
self.client.bucket(bucket).delete(missing_ok=True)
def _upload_dir(self, dir_name, bck_name):
bck = self.client.bucket(bck_name).create(exist_ok=True)
self.buckets.append(bck_name)
bck.put_files(dir_name)
@staticmethod
def _generate_tar(filename, prefix, tar_format, num_files):
with tarfile.open(filename, "w|", format=tar_format) as tar:
for i in range(num_files):
# Create a file name and write random text to it
filename = f"shard-{prefix}-file-{i}.txt"
with open(filename, "w", encoding="utf-8") as text:
text.write(random_string())
# Add the file to the tarfile
tar.add(filename)
# Remove the file after adding it to the tarfile
Path(filename).unlink()
def _generate_shards(self, tar_type, tar_enum, num_shards, num_files):
shard_names = []
out_dir = Path(self.temp_dir).joinpath(tar_type)
out_dir.mkdir(exist_ok=True)
for shard_index in range(num_shards):
name = f"{tar_type}-{shard_index}.tar"
filename = out_dir.joinpath(name)
self._generate_tar(filename, shard_index, tar_enum, num_files)
shard_names.append(name)
self._upload_dir(out_dir, tar_type)
return shard_names
def _get_object_content_map(self, bucket_name, object_names):
expected_contents = {}
for obj in object_names:
output_bytes = self.client.bucket(bucket_name).object(obj).get().read_all()
output = io.BytesIO(output_bytes)
with tarfile.open(fileobj=output) as result_tar:
for tar in result_tar:
expected_contents[tar.name] = result_tar.extractfile(
tar.name
).read()
return expected_contents
def _start_with_spec(self, input_bck_name, out_bck_name, input_object_prefix):
spec = {
"input_extension": ".tar",
"input_bck": {"name": input_bck_name},
"output_bck": {"name": out_bck_name},
"input_format": {"template": input_object_prefix + "-{0..1}"},
"output_format": "out-shard-{0..9}",
"output_shard_size": "20MB",
"description": "Dsort Integration Test",
}
spec_file = self.temp_dir.joinpath("spec.json")
with open(spec_file, "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(spec, indent=4))
dsort = self.client.dsort()
dsort.start(spec_file)
return dsort
@test_cases(("gnu", tarfile.GNU_FORMAT, 2, 3), ("pax", tarfile.PAX_FORMAT, 2, 3))
def test_dsort(self, test_case):
tar_type, tar_format, num_shards, num_files = test_case
# create bucket for output
out_bck_name = tar_type + "-out"
self.client.bucket(out_bck_name).create(exist_ok=True)
self.buckets.append(out_bck_name)
# create tars as objects in buckets
shards = self._generate_shards(tar_type, tar_format, num_shards, num_files)
# Read created objects to get expected output after dsort
expected_contents = self._get_object_content_map(
bucket_name=tar_type, object_names=shards
)
dsort = self._start_with_spec(
input_bck_name=tar_type,
out_bck_name=out_bck_name,
input_object_prefix=tar_type,
)
dsort.wait(timeout=TEST_TIMEOUT)
output_bytes = (
self.client.bucket(out_bck_name).object("out-shard-0.tar").get().read_all()
)
output = io.BytesIO(output_bytes)
result_contents = {}
with tarfile.open(fileobj=output) as result_tar:
for tar in result_tar:
result_contents[tar.name] = result_tar.extractfile(tar.name).read()
self.assertEqual(expected_contents, result_contents)
def test_abort(self):
input_bck_name = "abort"
out_bck_name = "out"
self.client.bucket(input_bck_name).create(exist_ok=True)
self.buckets.append(input_bck_name)
self.client.bucket(out_bck_name).create(exist_ok=True)
self.buckets.append(out_bck_name)
# Create enough files to make the dSort job slow enough to abort
self._generate_shards(input_bck_name, tarfile.GNU_FORMAT, 10, 1000)
dsort = self._start_with_spec(
input_bck_name=input_bck_name,
out_bck_name=out_bck_name,
input_object_prefix=input_bck_name,
)
dsort.abort()
dsort.wait(timeout=TEST_TIMEOUT)
for job_info in dsort.get_job_info().values():
self.assertTrue(job_info.metrics.aborted)
self.assertEqual(1, len(job_info.metrics.errors))
| aistore-master | python/tests/integration/sdk/test_dsort_ops.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
from itertools import cycle
import unittest
import hashlib
import sys
import time
import pytest
from aistore.sdk import Client, Bucket
from aistore.sdk.etl_const import ETL_COMM_HPUSH, ETL_COMM_IO
from aistore.sdk.errors import AISError
from aistore.sdk.etl_templates import MD5, ECHO
from tests.integration import CLUSTER_ENDPOINT
from tests.utils import create_and_put_object, random_string
ETL_NAME_CODE = "etl-" + random_string(5)
ETL_NAME_CODE_IO = "etl-" + random_string(5)
ETL_NAME_CODE_STREAM = "etl-" + random_string(5)
ETL_NAME_SPEC = "etl-" + random_string(5)
ETL_NAME_SPEC_COMP = "etl-" + random_string(5)
# pylint: disable=unused-variable
class TestETLOps(unittest.TestCase):
def setUp(self) -> None:
self.bck_name = random_string()
print("URL END PT ", CLUSTER_ENDPOINT)
self.client = Client(CLUSTER_ENDPOINT)
self.bucket = self.client.bucket(bck_name=self.bck_name).create()
self.obj_name = "temp-obj1.jpg"
self.obj_size = 128
self.content = create_and_put_object(
client=self.client,
bck_name=self.bck_name,
obj_name=self.obj_name,
obj_size=self.obj_size,
)
create_and_put_object(
client=self.client, bck_name=self.bck_name, obj_name="obj2.jpg"
)
self.current_etl_count = len(self.client.cluster().list_running_etls())
def tearDown(self) -> None:
# Try to destroy all temporary buckets if there are left.
for bucket in self.client.cluster().list_buckets():
self.client.bucket(bucket.name).delete(missing_ok=True)
# delete all the etls
for etl in self.client.cluster().list_running_etls():
self.client.etl(etl.id).stop()
self.client.etl(etl.id).delete()
# pylint: disable=too-many-statements,too-many-locals
@pytest.mark.etl
def test_etl_apis(self):
# code
def transform(input_bytes):
md5 = hashlib.md5()
md5.update(input_bytes)
return md5.hexdigest().encode()
code_etl = self.client.etl(ETL_NAME_CODE)
code_etl.init_code(transform=transform)
obj = self.bucket.object(self.obj_name).get(etl_name=code_etl.name).read_all()
self.assertEqual(obj, transform(bytes(self.content)))
self.assertEqual(
self.current_etl_count + 1, len(self.client.cluster().list_running_etls())
)
# code (io comm)
def main():
md5 = hashlib.md5()
chunk = sys.stdin.buffer.read()
md5.update(chunk)
sys.stdout.buffer.write(md5.hexdigest().encode())
code_io_etl = self.client.etl(ETL_NAME_CODE_IO)
code_io_etl.init_code(transform=main, communication_type=ETL_COMM_IO)
obj_io = (
self.bucket.object(self.obj_name).get(etl_name=code_io_etl.name).read_all()
)
self.assertEqual(obj_io, transform(bytes(self.content)))
code_io_etl.stop()
code_io_etl.delete()
# spec
template = MD5.format(communication_type=ETL_COMM_HPUSH)
spec_etl = self.client.etl(ETL_NAME_SPEC)
spec_etl.init_spec(template=template)
obj = self.bucket.object(self.obj_name).get(etl_name=spec_etl.name).read_all()
self.assertEqual(obj, transform(bytes(self.content)))
self.assertEqual(
self.current_etl_count + 2, len(self.client.cluster().list_running_etls())
)
self.assertIsNotNone(code_etl.view())
self.assertIsNotNone(spec_etl.view())
temp_bck1 = self.client.bucket(random_string()).create()
# Transform Bucket with MD5 Template
job_id = self.bucket.transform(
etl_name=spec_etl.name, to_bck=temp_bck1, prefix_filter="temp-"
)
self.client.job(job_id).wait()
starting_obj = self.bucket.list_objects().entries
transformed_obj = temp_bck1.list_objects().entries
# Should transform only the object defined by the prefix filter
self.assertEqual(len(starting_obj) - 1, len(transformed_obj))
md5_obj = temp_bck1.object(self.obj_name).get().read_all()
# Verify bucket-level transformation and object-level transformation are the same
self.assertEqual(obj, md5_obj)
# Start ETL with ECHO template
template = ECHO.format(communication_type=ETL_COMM_HPUSH)
echo_spec_etl = self.client.etl(ETL_NAME_SPEC_COMP)
echo_spec_etl.init_spec(template=template)
temp_bck2 = self.client.bucket(random_string()).create()
# Transform bucket with ECHO template
job_id = self.bucket.transform(
etl_name=echo_spec_etl.name,
to_bck=temp_bck2,
ext={"jpg": "txt"},
)
self.client.job(job_id).wait()
# Verify extension rename
for obj_iter in temp_bck2.list_objects().entries:
self.assertEqual(obj_iter.name.split(".")[1], "txt")
echo_obj = temp_bck2.object("temp-obj1.txt").get().read_all()
# Verify different bucket-level transformations are not the same (compare ECHO transformation and MD5
# transformation)
self.assertNotEqual(md5_obj, echo_obj)
echo_spec_etl.stop()
echo_spec_etl.delete()
# Transform w/ non-existent ETL name raises exception
with self.assertRaises(AISError):
self.bucket.transform(
etl_name="faulty-name", to_bck=Bucket(random_string())
)
# Stop ETLs
code_etl.stop()
spec_etl.stop()
self.assertEqual(
len(self.client.cluster().list_running_etls()), self.current_etl_count
)
# Start stopped ETLs
code_etl.start()
spec_etl.start()
self.assertEqual(
len(self.client.cluster().list_running_etls()), self.current_etl_count + 2
)
# Delete stopped ETLs
code_etl.stop()
spec_etl.stop()
code_etl.delete()
spec_etl.delete()
# Starting deleted ETLs raises error
with self.assertRaises(AISError):
code_etl.start()
with self.assertRaises(AISError):
spec_etl.start()
@pytest.mark.etl
def test_etl_apis_stress(self):
num_objs = 200
content = {}
for i in range(num_objs):
obj_name = f"obj{ i }"
content[obj_name] = create_and_put_object(
client=self.client, bck_name=self.bck_name, obj_name=obj_name
)
# code (hpush)
def transform(input_bytes):
md5 = hashlib.md5()
md5.update(input_bytes)
return md5.hexdigest().encode()
md5_hpush_etl = self.client.etl(ETL_NAME_CODE)
md5_hpush_etl.init_code(transform=transform)
# code (io comm)
def main():
md5 = hashlib.md5()
chunk = sys.stdin.buffer.read()
md5.update(chunk)
sys.stdout.buffer.write(md5.hexdigest().encode())
md5_io_etl = self.client.etl(ETL_NAME_CODE_IO)
md5_io_etl.init_code(transform=main, communication_type=ETL_COMM_IO)
start_time = time.time()
job_id = self.bucket.transform(
etl_name=md5_hpush_etl.name, to_bck=Bucket("transformed-etl-hpush")
)
self.client.job(job_id).wait()
print("Transform bucket using HPUSH took ", time.time() - start_time)
start_time = time.time()
job_id = self.bucket.transform(
etl_name=md5_io_etl.name, to_bck=Bucket("transformed-etl-io")
)
self.client.job(job_id).wait()
print("Transform bucket using IO took ", time.time() - start_time)
for key, value in content.items():
transformed_obj_hpush = (
self.bucket.object(key).get(etl_name=md5_hpush_etl.name).read_all()
)
transformed_obj_io = (
self.bucket.object(key).get(etl_name=md5_io_etl.name).read_all()
)
self.assertEqual(transform(bytes(value)), transformed_obj_hpush)
self.assertEqual(transform(bytes(value)), transformed_obj_io)
@pytest.mark.etl
def test_etl_apis_stream(self):
def transform(reader, writer):
checksum = hashlib.md5()
for byte in reader:
checksum.update(byte)
writer.write(checksum.hexdigest().encode())
code_stream_etl = self.client.etl(ETL_NAME_CODE_STREAM)
code_stream_etl.init_code(transform=transform, chunk_size=32768)
obj = (
self.bucket.object(self.obj_name)
.get(etl_name=code_stream_etl.name)
.read_all()
)
md5 = hashlib.md5()
md5.update(self.content)
self.assertEqual(obj, md5.hexdigest().encode())
@pytest.mark.etl
def test_etl_api_xor(self):
def transform(reader, writer):
checksum = hashlib.md5()
key = b"AISTORE"
for byte in reader:
out = bytes([_a ^ _b for _a, _b in zip(byte, cycle(key))])
writer.write(out)
checksum.update(out)
writer.write(checksum.hexdigest().encode())
xor_etl = self.client.etl("etl-xor1")
xor_etl.init_code(transform=transform, chunk_size=32)
transformed_obj = (
self.bucket.object(self.obj_name).get(etl_name=xor_etl.name).read_all()
)
data, checksum = transformed_obj[:-32], transformed_obj[-32:]
computed_checksum = hashlib.md5(data).hexdigest().encode()
self.assertEqual(checksum, computed_checksum)
@pytest.mark.etl
def test_etl_transform_url(self):
def url_transform(url):
return url.encode("utf-8")
url_etl = self.client.etl("etl-hpull-url")
url_etl.init_code(
transform=url_transform, arg_type="url", communication_type="hpull"
)
res = self.bucket.object(self.obj_name).get(etl_name=url_etl.name).read_all()
result_url = res.decode("utf-8")
self.assertTrue(self.bucket.name in result_url)
self.assertTrue(self.obj_name in result_url)
if __name__ == "__main__":
unittest.main()
| aistore-master | python/tests/integration/sdk/test_etl_ops.py |
import unittest
from aistore.sdk.const import PROVIDER_AIS
from aistore import Client
from tests.integration import (
REMOTE_SET,
REMOTE_BUCKET,
CLUSTER_ENDPOINT,
OBJECT_COUNT,
TEST_TIMEOUT_LONG,
)
from tests.utils import random_string, destroy_bucket, create_and_put_objects
class RemoteEnabledTest(unittest.TestCase):
"""
This class is intended to be used with all tests that work with remote buckets.
It provides helper methods for dealing with remote buckets and objects and tracking them for proper cleanup.
This includes prefixing all objects with a unique value and deleting all objects after tests finish
to avoid collisions with multiple instances using the same bucket.
To use this class with another test class, simply inherit from this rather than TestCase.
To extend setUp behavior in a child class, define them as normal for a TestCase then call
super().setUp() before adding additional setup steps (same process for tearDown)
"""
def setUp(self) -> None:
self.bck_name = random_string()
self.client = Client(CLUSTER_ENDPOINT)
self.buckets = []
self.obj_prefix = f"{self._testMethodName}-{random_string(6)}-"
if REMOTE_SET:
self.cloud_objects = []
provider, bck_name = REMOTE_BUCKET.split("://")
self.bucket = self.client.bucket(bck_name, provider=provider)
self.provider = provider
else:
self.provider = PROVIDER_AIS
self.bucket = self._create_bucket(self.bck_name)
def tearDown(self) -> None:
"""
Cleanup after each test, destroy the bucket if it exists
"""
if REMOTE_SET:
entries = self.bucket.list_all_objects(prefix=self.obj_prefix)
obj_names = [entry.name for entry in entries]
obj_names.extend(self.cloud_objects)
if len(obj_names) > 0:
job_id = self.bucket.objects(obj_names=obj_names).delete()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT_LONG)
for bck in self.buckets:
destroy_bucket(self.client, bck)
def _create_bucket(self, bck_name, provider=PROVIDER_AIS):
"""
Create a bucket and store its name for later cleanup
Args:
bck_name: Name of new bucket
provider: Provider for new bucket
"""
bck = self.client.bucket(bck_name, provider=provider)
self.buckets.append(bck_name)
bck.create()
return bck
def _create_objects(self, num_obj=OBJECT_COUNT, suffix=""):
"""
Create a list of objects using a unique test prefix and track them for later cleanup
Args:
num_obj: Number of objects to create
suffix: Optional suffix for each object name
"""
obj_names = create_and_put_objects(
self.client,
self.bucket,
self.obj_prefix,
suffix,
num_obj,
)
if REMOTE_SET:
self.cloud_objects.extend(obj_names)
return obj_names
def _check_all_objects_cached(self, num_obj, expected_cached):
"""
List all objects with this test prefix and validate the cache status
Args:
num_obj: Number of objects we expect to find
expected_cached: Whether we expect them to be cached
"""
objects = self.bucket.list_objects(
props="name,cached", prefix=self.obj_prefix
).entries
self.assertEqual(num_obj, len(objects))
self._validate_objects_cached(objects, expected_cached)
def _validate_objects_cached(self, objects, expected_cached):
"""
Validate that all objects provided are either cached or not
Args:
objects: List of objects to check
expected_cached: Whether we expect them to be cached
"""
for obj in objects:
self.assertTrue(obj.is_ok())
if expected_cached:
self.assertTrue(obj.is_cached())
else:
self.assertFalse(obj.is_cached())
| aistore-master | python/tests/integration/sdk/remote_enabled_test.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import hashlib
import unittest
import tarfile
import io
import pytest
from aistore.sdk.const import PROVIDER_AIS
from aistore.sdk.errors import InvalidBckProvider
from tests.integration import REMOTE_SET, TEST_TIMEOUT, OBJECT_COUNT
from tests.integration.sdk.remote_enabled_test import RemoteEnabledTest
from tests.utils import random_string
# pylint: disable=unused-variable,too-many-instance-attributes
class TestObjectGroupOps(RemoteEnabledTest):
def setUp(self) -> None:
super().setUp()
self.obj_names = self._create_objects(suffix="-suffix")
def test_delete(self):
object_group = self.bucket.objects(obj_names=self.obj_names[1:])
job_id = object_group.delete()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT)
existing_objects = self.bucket.list_objects(prefix=self.obj_prefix).entries
self.assertEqual(1, len(existing_objects))
self.assertEqual(self.obj_names[0], existing_objects[0].name)
@unittest.skipIf(
not REMOTE_SET,
"Remote bucket is not set",
)
def test_evict(self):
object_group = self.bucket.objects(obj_names=self.obj_names[1:])
job_id = object_group.evict()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT)
self._verify_cached_objects(OBJECT_COUNT, [0])
def test_evict_objects_local(self):
local_bucket = self.client.bucket(random_string(), provider=PROVIDER_AIS)
with self.assertRaises(InvalidBckProvider):
local_bucket.objects(obj_names=[]).evict()
@unittest.skipIf(
not REMOTE_SET,
"Remote bucket is not set",
)
def test_prefetch_list(self):
obj_group = self.bucket.objects(obj_names=self.obj_names[1:])
self._evict_all_objects()
# Fetch back a specific object group and verify cache status
job_id = obj_group.prefetch()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT * 2)
self._verify_cached_objects(OBJECT_COUNT, range(1, OBJECT_COUNT))
def test_prefetch_objects_local(self):
local_bucket = self.client.bucket(random_string(), provider=PROVIDER_AIS)
with self.assertRaises(InvalidBckProvider):
local_bucket.objects(obj_names=[]).prefetch()
def test_copy_objects(self):
to_bck_name = "destination-bucket"
to_bck = self._create_bucket(to_bck_name)
self.assertEqual(0, len(to_bck.list_all_objects(prefix=self.obj_prefix)))
self.assertEqual(
OBJECT_COUNT, len(self.bucket.list_all_objects(prefix=self.obj_prefix))
)
new_prefix = "prefix-"
copy_job = self.bucket.objects(obj_names=self.obj_names[1:5]).copy(
to_bck, prepend=new_prefix
)
self.client.job(job_id=copy_job).wait_for_idle(timeout=TEST_TIMEOUT)
self.assertEqual(
4, len(to_bck.list_all_objects(prefix=new_prefix + self.obj_prefix))
)
def test_archive_objects_without_copy(self):
arch_name = self.obj_prefix + "-archive-without-copy.tar"
self._archive_exec_assert(arch_name, self.bucket, self.bucket)
def test_archive_objects_with_copy(self):
arch_name = self.obj_prefix + "-archive-with-copy.tar"
dest_bck = self._create_bucket(random_string())
self._archive_exec_assert(arch_name, self.bucket, dest_bck, to_bck=dest_bck)
def _archive_exec_assert(self, arch_name, src_bck, res_bck, **kwargs):
# Add to object list to clean up on test finish
if res_bck.provider != PROVIDER_AIS:
self.cloud_objects.append(arch_name)
archived_names = self.obj_names[1:5]
expected_contents = {}
for name in archived_names:
expected_contents[name] = src_bck.object(obj_name=name).get().read_all()
arch_job = src_bck.objects(obj_names=archived_names).archive(
archive_name=arch_name, **kwargs
)
self.client.job(job_id=arch_job).wait_for_idle(timeout=TEST_TIMEOUT)
# Read the tar archive and assert the object names and contents match
res_bytes = res_bck.object(arch_name).get().read_all()
with tarfile.open(fileobj=io.BytesIO(res_bytes), mode="r") as tar:
member_names = []
for member in tar.getmembers():
inner_file = tar.extractfile(member)
self.assertEqual(expected_contents[member.name], inner_file.read())
inner_file.close()
member_names.append(member.name)
self.assertEqual(set(archived_names), set(member_names))
@pytest.mark.etl
def test_transform_objects(self):
# Define an etl with code that hashes the contents of each object
etl_name = "etl-" + random_string(5)
def transform(input_bytes):
md5 = hashlib.md5()
md5.update(input_bytes)
return md5.hexdigest().encode()
md5_etl = self.client.etl(etl_name)
md5_etl.init_code(transform=transform)
to_bck_name = "destination-bucket"
to_bck = self._create_bucket(to_bck_name)
new_prefix = "prefix-"
self.assertEqual(0, len(to_bck.list_all_objects(prefix=self.obj_prefix)))
self.assertEqual(
OBJECT_COUNT, len(self.bucket.list_all_objects(prefix=self.obj_prefix))
)
transform_job = self.bucket.objects(obj_names=self.obj_names).transform(
to_bck, etl_name=md5_etl.name, prepend=new_prefix
)
self.client.job(job_id=transform_job).wait_for_idle(timeout=TEST_TIMEOUT)
# Get the md5 transform of each source object and verify the destination bucket contains those results
from_obj_hashes = [
transform(self.bucket.object(name).get().read_all())
for name in self.obj_names
]
to_obj_values = [
to_bck.object(new_prefix + name).get().read_all() for name in self.obj_names
]
self.assertEqual(to_obj_values, from_obj_hashes)
def _evict_all_objects(self):
job_id = self.bucket.objects(obj_names=self.obj_names).evict()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT)
self._check_all_objects_cached(OBJECT_COUNT, expected_cached=False)
def _verify_cached_objects(self, expected_object_count, cached_range):
"""
List each of the objects and verify the correct count and that all objects matching
the cached range are cached and all others are not
Args:
expected_object_count: expected number of objects to list
cached_range: object indices that should be cached, all others should not
"""
objects = self.bucket.list_objects(
props="name,cached", prefix=self.obj_prefix
).entries
self.assertEqual(expected_object_count, len(objects))
cached_names = {self.obj_prefix + str(x) + "-suffix" for x in cached_range}
cached_objs = []
evicted_objs = []
for obj in objects:
if obj.name in cached_names:
cached_objs.append(obj)
else:
evicted_objs.append(obj)
self._validate_objects_cached(cached_objs, True)
self._validate_objects_cached(evicted_objs, False)
| aistore-master | python/tests/integration/sdk/test_object_group_ops.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Default provider is AIS, so all Cloud-related tests are skipped.
import unittest
from aistore.sdk import Client
from aistore.sdk.const import ACT_COPY_OBJECTS
from tests.integration import CLUSTER_ENDPOINT
from tests.utils import random_string
class TestClusterOps(unittest.TestCase): # pylint: disable=unused-variable
def setUp(self) -> None:
self.client = Client(CLUSTER_ENDPOINT)
def test_health_success(self):
self.assertEqual(Client(CLUSTER_ENDPOINT).cluster().is_aistore_running(), True)
def test_health_failure(self):
# url not exisiting or URL down
self.assertEqual(
Client("http://localhost:1234").cluster().is_aistore_running(), False
)
def test_cluster_map(self):
smap = self.client.cluster().get_info()
self.assertIsNotNone(smap)
self.assertIsNotNone(smap.proxy_si)
self.assertNotEqual(len(smap.pmap), 0)
self.assertNotEqual(len(smap.tmap), 0)
self.assertNotEqual(smap.version, 0)
self.assertIsNot(smap.uuid, "")
def _check_jobs_in_result(self, expected_jobs, res, missing_jobs=None):
job_ids = [job.uuid for job in res]
for job in expected_jobs:
self.assertTrue(job in job_ids)
if not missing_jobs:
return
for job in missing_jobs:
self.assertFalse(job in job_ids)
def test_list_jobs_status(self):
job_kind = "lru"
job_1_id = self.client.job(job_kind=job_kind).start()
job_2_id = self.client.job(job_kind=job_kind).start()
job_3_id = self.client.job(job_kind="cleanup").start()
self._check_jobs_in_result(
[job_1_id, job_2_id], self.client.cluster().list_jobs_status()
)
self._check_jobs_in_result(
[job_1_id, job_2_id],
self.client.cluster().list_jobs_status(job_kind=job_kind),
[job_3_id],
)
def test_list_running_jobs(self):
# First generate a multi-obj copy job that will stay "running" (but idle) long enough to query
bck_name = random_string()
new_bck_name = random_string()
obj_name = random_string()
bck = self.client.bucket(bck_name).create()
new_bck = self.client.bucket(new_bck_name).create()
try:
bck.object(obj_name).put_content("any content")
idle_job = bck.objects(obj_names=[obj_name]).copy(to_bck=new_bck)
expected_res = f"{ACT_COPY_OBJECTS}[{idle_job}]"
self.assertIn(expected_res, self.client.cluster().list_running_jobs())
self.assertIn(
expected_res,
self.client.cluster().list_running_jobs(job_kind=ACT_COPY_OBJECTS),
)
self.assertNotIn(
expected_res, self.client.cluster().list_running_jobs(job_kind="lru")
)
finally:
bck.delete()
new_bck.delete()
| aistore-master | python/tests/integration/sdk/test_cluster_ops.py |
#
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
import unittest
from tests.integration.sdk.remote_enabled_test import RemoteEnabledTest
from tests.integration import REMOTE_SET, TEST_TIMEOUT, OBJECT_COUNT
class TestJobOps(RemoteEnabledTest): # pylint: disable=unused-variable
def test_job_start_wait(self):
job_id = self.client.job(job_kind="lru").start()
self.client.job(job_id=job_id).wait()
self.assertNotEqual(0, self.client.job(job_id=job_id).status().end_time)
def test_job_wait_for_idle(self):
obj_names = self._create_objects()
existing_names = {
obj.name for obj in self.bucket.list_objects(prefix=self.obj_prefix).entries
}
self.assertEqual(set(obj_names), existing_names)
# Start a deletion job that will reach an idle state when it finishes
job_id = self.bucket.objects(obj_names=obj_names).delete()
self.client.job(job_id).wait_for_idle(timeout=TEST_TIMEOUT)
self.assertEqual(
0, len(self.bucket.list_objects(prefix=self.obj_prefix).entries)
)
@unittest.skipIf(
not REMOTE_SET,
"Remote bucket is not set",
)
def test_async_job_wait_for_idle(self):
obj_names = self._create_objects()
obj_group = self.bucket.objects(obj_names=obj_names)
job_id = obj_group.evict()
self.client.job(job_id).wait_for_idle(timeout=TEST_TIMEOUT)
self._check_all_objects_cached(OBJECT_COUNT, False)
job_id = obj_group.prefetch()
self.client.job(job_id).wait_for_idle(timeout=TEST_TIMEOUT)
self._check_all_objects_cached(OBJECT_COUNT, True)
def test_job_wait(self):
object_names = self._create_objects()
# Delete does not idle when finished
job_id = self.bucket.objects(obj_names=object_names).delete()
self.client.job(job_id=job_id).wait(timeout=TEST_TIMEOUT)
# Check that objects do not exist
existing_obj = [entry.name for entry in self.bucket.list_all_objects()]
for name in object_names:
self.assertNotIn(name, existing_obj)
if __name__ == "__main__":
unittest.main()
| aistore-master | python/tests/integration/sdk/test_job_ops.py |
#
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
import random
import unittest
from pathlib import Path
from aistore.sdk.const import AIS_VERSION, HEADER_CONTENT_LENGTH, UTF_ENCODING
from aistore.sdk import Client
from tests.utils import (
create_and_put_object,
random_string,
destroy_bucket,
cleanup_local,
)
from tests.integration import CLUSTER_ENDPOINT
OBJ_READ_TYPE_ALL = "read_all"
OBJ_READ_TYPE_CHUNK = "chunk"
OBJ_NAME = "test-object"
# pylint: disable=unused-variable
class TestObjectOps(unittest.TestCase):
def setUp(self) -> None:
self.bck_name = random_string()
self.client = Client(CLUSTER_ENDPOINT)
self.bucket = self.client.bucket(self.bck_name)
self.bucket.create()
self.local_test_files = (
Path().absolute().joinpath("object-ops-test-" + random_string(8))
)
def tearDown(self) -> None:
# Try to destroy bucket if there is one left.
destroy_bucket(self.client, self.bck_name)
# Cleanup local files at end
cleanup_local(str(self.local_test_files))
def _test_get_obj(self, read_type, obj_name, exp_content):
chunk_size = random.randrange(1, len(exp_content) + 10)
stream = self.bucket.object(obj_name).get(chunk_size=chunk_size)
self.assertEqual(stream.attributes.size, len(exp_content))
self.assertNotEqual(stream.attributes.checksum_type, "")
self.assertNotEqual(stream.attributes.checksum_value, "")
self.assertNotEqual(stream.attributes.access_time, "")
self.assertNotEqual(stream.attributes.obj_version, "")
self.assertEqual(stream.attributes.custom_metadata, {})
if read_type == OBJ_READ_TYPE_ALL:
obj = stream.read_all()
else:
obj = b""
for chunk in stream:
obj += chunk
self.assertEqual(obj, exp_content)
def _put_objects(self, num_obj):
name_to_content = {}
for i in range(num_obj):
obj_name = f"obj{ i }"
content = create_and_put_object(
client=self.client, bck_name=self.bck_name, obj_name=obj_name
)
name_to_content[obj_name] = content
return name_to_content
def test_put_content(self):
content = b"content for the object"
obj = self.bucket.object(OBJ_NAME)
obj.put_content(content)
res = obj.get()
self.assertEqual(content, res.read_all())
def test_put_file(self):
self.local_test_files.mkdir()
content = b"content for the object"
filename = self.local_test_files.joinpath("test_file")
with open(filename, "wb") as writer:
writer.write(content)
obj = self.bucket.object(OBJ_NAME)
obj.put_file(filename)
res = obj.get()
self.assertEqual(content, res.read_all())
def test_put_file_invalid(self):
with self.assertRaises(ValueError):
self.bucket.object("any").put_file("non-existent-file")
self.local_test_files.mkdir()
inner_dir = self.local_test_files.joinpath("inner_dir_not_file")
inner_dir.mkdir()
with self.assertRaises(ValueError):
self.bucket.object("any").put_file(inner_dir)
def test_put_head_get(self):
objects = self._put_objects(5)
for obj_name, content in objects.items():
properties = self.bucket.object(obj_name).head()
self.assertEqual(properties[AIS_VERSION], "1")
self.assertEqual(properties[HEADER_CONTENT_LENGTH], str(len(content)))
for option in [OBJ_READ_TYPE_ALL, OBJ_READ_TYPE_CHUNK]:
self._test_get_obj(option, obj_name, content)
def test_get_with_writer(self):
self.local_test_files.mkdir()
filename = self.local_test_files.joinpath("test_get_with_writer.txt")
objects = self._put_objects(10)
all_content = b""
for obj_name, content in objects.items():
# Pass a writer that appends to a file
with open(filename, "ab") as writer:
self.bucket.object(obj_name).get(writer=writer)
all_content += content
# Verify file contents are written from each object
with open(filename, "rb") as reader:
output = reader.read()
self.assertEqual(all_content, output)
filename.unlink()
@unittest.skipIf(
"localhost" not in CLUSTER_ENDPOINT and "127.0.0.1" not in CLUSTER_ENDPOINT,
"Cannot test promote without access to AIS cluster file storage",
)
# pylint: disable=too-many-locals
def test_promote(self):
self.local_test_files.mkdir()
top_folder = self.local_test_files.joinpath("promote_folder")
top_item = "test_file_top"
top_item_contents = "contents in the test file"
inner_folder = "inner_folder"
inner_item = "test_file_inner"
inner_item_contents = "contents of the file in the inner folder"
# Create a folder in the current directory
local_files_path = Path().absolute().joinpath(top_folder)
local_files_path.mkdir()
with open(
local_files_path.joinpath(top_item), "w", encoding=UTF_ENCODING
) as file:
file.write(top_item_contents)
inner_folder = local_files_path.joinpath(inner_folder)
inner_folder.mkdir()
with open(
inner_folder.joinpath(inner_item), "w", encoding=UTF_ENCODING
) as file:
file.write(inner_item_contents)
# Promote to AIS bucket
obj_name = "promoted_obj"
self.bucket.object(obj_name).promote(str(local_files_path))
# Check bucket, only top object is promoted
self.assertEqual(1, len(self.bucket.list_all_objects()))
top_object = self.bucket.object(obj_name + "/" + top_item).get()
self.assertEqual(top_item_contents, top_object.read_all().decode(UTF_ENCODING))
# Update local top item contents
top_item_updated_contents = "new content in top file overwritten"
with open(
local_files_path.joinpath(top_item), "w", encoding=UTF_ENCODING
) as file:
file.write(top_item_updated_contents)
# Promote with recursion, delete source, overwrite destination
self.bucket.object(obj_name).promote(
str(local_files_path),
recursive=True,
delete_source=True,
overwrite_dest=True,
)
# Check bucket, both objects promoted, top overwritten
self.assertEqual(2, len(self.bucket.list_all_objects()))
expected_top_obj = obj_name + "/" + top_item
top_obj = self.bucket.object(expected_top_obj).get()
self.assertEqual(
top_item_updated_contents, top_obj.read_all().decode(UTF_ENCODING)
)
inner_obj = self.bucket.object(obj_name + "/inner_folder/" + inner_item).get()
self.assertEqual(inner_item_contents, inner_obj.read_all().decode(UTF_ENCODING))
# Check source deleted
top_level_files = [
f
for f in Path(top_folder).glob("*")
if Path(top_folder).joinpath(f).is_file()
]
self.assertEqual(0, len(top_level_files))
self.assertEqual(0, len(list(inner_folder.glob("*"))))
def test_delete(self):
bucket_size = 10
delete_cnt = 7
for obj_id in range(bucket_size):
create_and_put_object(
self.client, bck_name=self.bck_name, obj_name=f"obj-{ obj_id }"
)
objects = self.bucket.list_objects()
self.assertEqual(len(objects.entries), bucket_size)
for obj_id in range(delete_cnt):
self.bucket.object(f"obj-{ obj_id + 1 }").delete()
objects = self.bucket.list_objects()
self.assertEqual(len(objects.entries), bucket_size - delete_cnt)
| aistore-master | python/tests/integration/sdk/test_object_ops.py |
aistore-master | python/tests/integration/pytorch/__init__.py |
|
"""
Test class for AIStore PyTorch Plugin
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
import unittest
import torchdata.datapipes.iter as torch_pipes
from aistore.sdk import Client
from aistore.sdk.errors import AISError, ErrBckNotFound
from aistore.pytorch import AISFileLister, AISFileLoader
from tests.integration import CLUSTER_ENDPOINT
from tests.utils import create_and_put_object, random_string, destroy_bucket
# pylint: disable=unused-variable
class TestPytorchPlugin(unittest.TestCase):
"""
Integration tests for the Pytorch plugin
"""
def setUp(self) -> None:
self.bck_name = random_string()
self.client = Client(CLUSTER_ENDPOINT)
self.client.bucket(self.bck_name).create()
def tearDown(self) -> None:
"""
Cleanup after each test, destroy the bucket if it exists
"""
destroy_bucket(self.client, self.bck_name)
def test_filelister_with_prefix_variations(self):
num_objs = 10
# create 10 objects in the /temp dir
for i in range(num_objs):
create_and_put_object(
self.client, bck_name=self.bck_name, obj_name=f"temp/obj{ i }"
)
# create 10 objects in the / dir
for i in range(num_objs):
obj_name = f"obj{ i }"
create_and_put_object(
self.client, bck_name=self.bck_name, obj_name=obj_name
)
prefixes = [
["ais://" + self.bck_name],
["ais://" + self.bck_name + "/"],
["ais://" + self.bck_name + "/temp/", "ais://" + self.bck_name + "/obj"],
]
for prefix in prefixes:
urls = AISFileLister(url=CLUSTER_ENDPOINT, source_datapipe=prefix)
ais_loader = AISFileLoader(url=CLUSTER_ENDPOINT, source_datapipe=urls)
with self.assertRaises(TypeError):
len(urls)
self.assertEqual(len(list(urls)), 20)
self.assertEqual(sum(1 for _ in ais_loader), 20)
def test_incorrect_inputs(self):
prefixes = ["ais://asdasd"]
# AISFileLister: Bucket not found
try:
list(AISFileLister(url=CLUSTER_ENDPOINT, source_datapipe=prefixes))
except ErrBckNotFound as err:
self.assertEqual(err.status_code, 404)
# AISFileLoader: incorrect inputs
url_list = [[""], ["ais:"], ["ais://"], ["s3:///unkown-bucket"]]
for url in url_list:
with self.assertRaises(AISError):
s3_loader_dp = AISFileLoader(url=CLUSTER_ENDPOINT, source_datapipe=url)
for _ in s3_loader_dp:
pass
def test_torch_library(self):
# Tests the torch library imports of aistore
torch_pipes.AISFileLister(
url=CLUSTER_ENDPOINT, source_datapipe=["ais://" + self.bck_name]
)
torch_pipes.AISFileLoader(
url=CLUSTER_ENDPOINT, source_datapipe=["ais://" + self.bck_name]
)
if __name__ == "__main__":
unittest.main()
| aistore-master | python/tests/integration/pytorch/test_pytorch_plugin.py |
from unittest import TestCase
import boto3
# pylint: disable=unused-import,unused-variable
from aistore.botocore_patch import botocore
from aistore.sdk.const import UTF_ENCODING
from tests import AWS_ACCESS_KEY_ID, AWS_SESSION_TOKEN, AWS_SECRET_ACCESS_KEY
from tests.integration import CLUSTER_ENDPOINT
from tests.integration.boto3 import NUM_BUCKETS, NUM_OBJECTS, OBJECT_LENGTH, AWS_REGION
from tests.utils import random_string
class BotoTest(TestCase):
def setUp(self) -> None:
"""
Test basic s3 operations on an AIS cluster using the boto3 client
"""
self.client = boto3.client(
"s3",
region_name=AWS_REGION,
endpoint_url=CLUSTER_ENDPOINT + "/s3",
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
aws_session_token=AWS_SESSION_TOKEN,
)
self.clean_up()
def tearDown(self) -> None:
self.clean_up()
def test_create_bucket(self):
bucket_names = {random_string(20) for _ in range(NUM_BUCKETS)}
for name in bucket_names:
self.client.create_bucket(Bucket=name)
existing_buckets = self.client.list_buckets()
existing_bucket_names = {b.get("Name") for b in existing_buckets.get("Buckets")}
self.assertEqual(bucket_names, existing_bucket_names)
def test_update_read_bucket(self):
bucket_name = self.create_bucket()
objects = [(str(i), random_string(OBJECT_LENGTH)) for i in range(NUM_OBJECTS)]
for key, body in objects:
self.client.put_object(Bucket=bucket_name, Key=key, Body=body)
existing_objects = [
self.client.get_object(Bucket=bucket_name, Key=key)
.get("Body")
.read()
.decode(UTF_ENCODING)
for key, body in objects
]
object_bodies = [body for key, body in objects]
self.assertEqual(object_bodies, existing_objects)
def test_delete_bucket(self):
bucket_name = self.create_bucket()
self.client.delete_bucket(Bucket=bucket_name)
self.assertEqual([], self.client.list_buckets().get("Buckets"))
def test_multipart_upload(self):
key = "object-name"
data_len = 100
num_parts = 4
chunk_size = int(data_len / num_parts)
data = random_string(data_len)
parts = [data[i * chunk_size : (i + 1) * chunk_size] for i in range(num_parts)]
bucket_name = self.create_bucket()
response = self.client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response.get("UploadId")
offset = 0
for part_num, part_data in enumerate(parts):
self.client.upload_part(
Body=part_data,
Bucket=bucket_name,
Key=key,
PartNumber=part_num + 1,
UploadId=upload_id,
)
offset += len(part_data)
self.client.complete_multipart_upload(
Bucket=bucket_name,
Key=key,
UploadId=upload_id,
MultipartUpload={
"Parts": [{"PartNumber": part_num + 1} for part_num in range(num_parts)]
},
)
response = self.client.get_object(Bucket=bucket_name, Key=key)
uploaded_data = response["Body"].read().decode(UTF_ENCODING)
self.assertEqual(data, uploaded_data)
def create_bucket(self):
bucket_name = random_string(20)
self.client.create_bucket(Bucket=bucket_name)
return bucket_name
def clean_up(self):
existing_bucket_names = [
b.get("Name") for b in self.client.list_buckets().get("Buckets")
]
for bucket in existing_bucket_names:
self.client.delete_bucket(Bucket=bucket)
| aistore-master | python/tests/integration/boto3/test_bucket_ops.py |
import os
NUM_BUCKETS = os.environ.get("BOTOTEST_NUM_BUCKETS", 10)
NUM_OBJECTS = os.environ.get("BOTOTEST_NUM_OBJECTS", 20)
OBJECT_LENGTH = os.environ.get("BOTOTEST_OBJECT_LENGTH", 1000)
AWS_REGION = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
| aistore-master | python/tests/integration/boto3/__init__.py |
#
# The functions in this file are modified under the Apache 2.0 license as provided by minio
# The original code can be found at https://github.com/minio/minio-py
#
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import time
import traceback
from inspect import getfullargspec
from minio import Minio
from minio.error import S3Error
from tests import LimitedRandomReader, MB
import tests
class TestFailed(Exception):
"""Indicate test failed error."""
# pylint: disable=unused-variable,protected-access
def init_tests(server_endpoint, access_key, secret_key):
tests._CLIENT = Minio(server_endpoint, access_key, secret_key, secure=False)
test_file = "datafile-1-MB"
large_file = "datafile-11-MB"
with open(test_file, "wb") as file_data:
shutil.copyfileobj(LimitedRandomReader(1 * MB), file_data)
with open(large_file, "wb") as file_data:
shutil.copyfileobj(LimitedRandomReader(11 * MB), file_data)
tests._TEST_FILE = test_file
tests._LARGE_FILE = large_file
def call_test(func, strict):
"""Execute given test function."""
log_entry = {
"name": func.__name__,
"status": "PASS",
}
start_time = time.time()
try:
func(log_entry)
except S3Error as exc:
if exc.code == "NotImplemented":
log_entry["alert"] = "Not Implemented"
log_entry["status"] = "NA"
else:
log_entry["message"] = f"{exc}"
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
except Exception as exc: # pylint: disable=broad-except
log_entry["message"] = f"{exc}"
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
if log_entry.get("method"):
# pylint: disable=deprecated-method
args_string = ", ".join(getfullargspec(log_entry["method"]).args[1:])
log_entry["function"] = f"{log_entry['method'].__name__}({args_string})"
log_entry["args"] = {k: v for k, v in log_entry.get("args", {}).items() if v}
log_entry["duration"] = int(round((time.time() - start_time) * 1000))
log_entry["name"] = "minio-py:" + log_entry["name"]
log_entry["method"] = None
print(json.dumps({k: v for k, v in log_entry.items() if v}))
print()
if strict and log_entry["status"] == "FAIL":
raise TestFailed()
| aistore-master | python/tests/s3compat/minio_helpers.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
import os
import minio_helpers
import tests as minio_test
# pylint: disable=unused-variable,undefined-variable
run_all = os.getenv("S3_COMPAT_RUN_ALL", "True").lower() in ("true", "t", 1)
strict = os.getenv("S3_COMPAT_STRICT", "False").lower() in ("true", "t", 1)
access_key = os.getenv("ACCESS_KEY", "testing")
secret_key = os.getenv("SECRET_KEY", "testing")
server_endpoint = os.getenv("AIS_ENDPOINT", "localhost:8080")
test_list = os.getenv("S3_COMPAT_TEST_LIST")
minio_helpers.init_tests(server_endpoint, access_key, secret_key)
# Always include verified tests
tests = [
minio_test.test_make_bucket_default_region,
minio_test.test_get_object,
minio_test.test_get_object_version,
minio_test.test_presigned_post_policy,
minio_test.test_get_bucket_notification,
minio_test.test_remove_object,
minio_test.test_remove_object_version,
minio_test.test_remove_bucket,
minio_test.test_make_bucket_with_region,
minio_test.test_negative_make_bucket_invalid_name,
minio_test.test_fput_object_small_file,
minio_test.test_fput_object_large_file,
minio_test.test_fput_object_with_content_type,
minio_test.test_copy_object_etag_match,
minio_test.test_copy_object_negative_etag_match,
minio_test.test_copy_object_modified_since,
minio_test.test_copy_object_unmodified_since,
minio_test.test_negative_put_object_with_path_segment,
minio_test.test_fget_object,
minio_test.test_fget_object_version,
minio_test.test_get_object_with_default_length,
minio_test.test_get_partial_object,
minio_test.test_thread_safe,
]
unverified_tests = [
minio_test.test_list_buckets,
minio_test.test_copy_object_no_copy_condition,
minio_test.test_copy_object_with_metadata,
minio_test.test_put_object,
minio_test.test_stat_object,
minio_test.test_stat_object_version,
minio_test.test_list_objects_v1,
minio_test.test_list_object_v1_versions,
minio_test.test_list_objects_with_prefix,
minio_test.test_list_objects_with_1001_files,
minio_test.test_list_objects,
minio_test.test_list_object_versions,
minio_test.test_presigned_get_object_default_expiry,
minio_test.test_presigned_get_object_expiry,
minio_test.test_presigned_get_object_response_headers,
minio_test.test_presigned_get_object_version,
minio_test.test_presigned_put_object_default_expiry,
minio_test.test_presigned_put_object_expiry,
minio_test.test_get_bucket_policy,
minio_test.test_set_bucket_policy_readonly,
minio_test.test_set_bucket_policy_readwrite,
minio_test.test_select_object_content,
minio_test.test_remove_objects,
minio_test.test_remove_object_versions,
]
if run_all:
tests.extend(unverified_tests)
if test_list:
test_names = str.split(test_list, ",")
tests = [getattr(minio_test, test_name) for test_name in test_names]
for test_name in tests:
minio_helpers.call_test(test_name, strict)
| aistore-master | python/tests/s3compat/run_tests.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-lines,unused-variable,implicit-str-concat
"""Functional tests of minio-py."""
from __future__ import absolute_import, division
import hashlib
import io
import json
import math
import os
import random
import shutil
import sys
import tempfile
import time
import traceback
from binascii import crc32
from datetime import datetime, timedelta, timezone
from inspect import getfullargspec
from threading import Thread
from uuid import uuid4
import certifi
import urllib3
from minio import Minio
from minio.commonconfig import ENABLED, REPLACE, CopySource
from minio.datatypes import PostPolicy
from minio.deleteobjects import DeleteObject
from minio.error import S3Error
from minio.select import CSVInputSerialization, CSVOutputSerialization, SelectRequest
from minio.sse import SseCustomerKey
from minio.time import to_http_header
from minio.versioningconfig import VersioningConfig
_CLIENT = None # initialized in main().
_TEST_FILE = None # initialized in main().
_LARGE_FILE = None # initialized in main().
_IS_AWS = None # initialized in main().
KB = 1024
MB = 1024 * KB
HTTP = urllib3.PoolManager(
cert_reqs="CERT_REQUIRED",
ca_certs=os.environ.get("SSL_CERT_FILE") or certifi.where(),
)
def _gen_bucket_name():
"""Generate random bucket name."""
return f"minio-py-test-{uuid4()}"
def _get_sha256sum(filename):
"""Get SHA-256 checksum of given file."""
with open(filename, "rb") as file:
contents = file.read()
return hashlib.sha256(contents).hexdigest()
def _get_random_string(size):
"""Get random string of given size."""
if not size:
return ""
chars = "abcdefghijklmnopqrstuvwxyz"
chars *= int(math.ceil(size / len(chars)))
chars = list(chars[:size])
random.shuffle(chars)
return "".join(chars)
class LimitedRandomReader: # pylint: disable=too-few-public-methods
"""Random data reader of specified size."""
def __init__(self, limit):
self._limit = limit
def read(self, size=64 * KB):
"""Read random data of specified size."""
if size < 0 or size > self._limit:
size = self._limit
data = _get_random_string(size)
self._limit -= size
return data.encode()
def _call(log_entry, func, *args, **kwargs):
"""Execute given function."""
log_entry["method"] = func
return func(*args, **kwargs)
class TestFailed(Exception):
"""Indicate test failed error."""
def _call_test(func, *args, **kwargs):
"""Execute given test function."""
log_entry = {
"name": func.__name__,
"status": "PASS",
}
start_time = time.time()
try:
func(log_entry, *args, **kwargs)
except S3Error as exc:
if exc.code == "NotImplemented":
log_entry["alert"] = "Not Implemented"
log_entry["status"] = "NA"
else:
log_entry["message"] = f"{exc}"
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
except Exception as exc: # pylint: disable=broad-except
log_entry["message"] = f"{exc}"
log_entry["error"] = traceback.format_exc()
log_entry["status"] = "FAIL"
if log_entry.get("method"):
# pylint: disable=deprecated-method
args_string = ", ".join(getfullargspec(log_entry["method"]).args[1:])
log_entry["function"] = f"{log_entry['method'].__name__}({args_string})"
log_entry["args"] = {k: v for k, v in log_entry.get("args", {}).items() if v}
log_entry["duration"] = int(round((time.time() - start_time) * 1000))
log_entry["name"] = "minio-py:" + log_entry["name"]
log_entry["method"] = None
print(json.dumps({k: v for k, v in log_entry.items() if v}))
if log_entry["status"] == "FAIL":
raise TestFailed()
def test_make_bucket_default_region(log_entry):
"""Test make_bucket() with default region."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
"location": "default value ('us-east-1')", # Default location
}
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
# Test passes
log_entry["method"] = _CLIENT.make_bucket
def test_make_bucket_with_region(log_entry):
"""Test make_bucket() with region."""
# Only test make bucket with region against AWS S3
if not _IS_AWS:
return
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# A non-default location
location = "us-west-1"
log_entry["args"] = {
"bucket_name": bucket_name,
"location": location,
}
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name, location)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, bucket_name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
# Test passes
log_entry["method"] = _CLIENT.make_bucket
def test_negative_make_bucket_invalid_name(log_entry): # pylint: disable=invalid-name
"""Test make_bucket() with invalid bucket name."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# Default location
log_entry["args"] = {
"location": "default value ('us-east-1')",
}
# Create an array of invalid bucket names to test
invalid_bucket_name_list = [
bucket_name + ".",
"." + bucket_name,
bucket_name + "...abcd",
]
for name in invalid_bucket_name_list:
log_entry["args"]["bucket_name"] = name
try:
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, name)
# Check if bucket was created properly
_call(log_entry, _CLIENT.bucket_exists, name)
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, name)
except ValueError:
pass
# Test passes
log_entry["method"] = _CLIENT.make_bucket
log_entry["args"]["bucket_name"] = invalid_bucket_name_list
def test_list_buckets(log_entry):
"""Test list_buckets()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
# Create a bucket with default bucket location
_call(log_entry, _CLIENT.make_bucket, bucket_name)
try:
buckets = _CLIENT.list_buckets()
for bucket in buckets:
# bucket object should be of a valid value.
if bucket.name and bucket.creation_date:
continue
raise ValueError("list_bucket api failure")
finally:
# Remove bucket
_call(log_entry, _CLIENT.remove_bucket, bucket_name)
def test_select_object_content(log_entry):
"""Test select_object_content()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
csvfile = "test.csv"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": csvfile,
}
try:
_CLIENT.make_bucket(bucket_name)
content = io.BytesIO(b"col1,col2,col3\none,two,three\nX,Y,Z\n")
_CLIENT.put_object(bucket_name, csvfile, content, len(content.getvalue()))
request = SelectRequest(
"select * from s3object",
CSVInputSerialization(),
CSVOutputSerialization(),
request_progress=True,
)
data = _CLIENT.select_object_content(bucket_name, csvfile, request)
# Get the records
records = io.BytesIO()
for data_bytes in data.stream(16):
records.write(data_bytes)
expected_crc = crc32(content.getvalue()) & 0xFFFFFFFF
generated_crc = crc32(records.getvalue()) & 0xFFFFFFFF
if expected_crc != generated_crc:
raise ValueError(
"Data mismatch Expected : " '"col1,col2,col3\none,two,three\nX,Y,Z\n"',
f"Received {records.getvalue().decode()}",
)
finally:
_CLIENT.remove_object(bucket_name, csvfile)
_CLIENT.remove_bucket(bucket_name)
def _test_fput_object(bucket_name, object_name, filename, metadata, sse):
"""Test fput_object()."""
try:
_CLIENT.make_bucket(bucket_name)
if _IS_AWS:
_CLIENT.fput_object(
bucket_name, object_name, filename, metadata=metadata, sse=sse
)
else:
_CLIENT.fput_object(bucket_name, object_name, filename, sse=sse)
_CLIENT.stat_object(bucket_name, object_name, ssec=sse)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_fput_object_small_file(log_entry, sse=None):
"""Test fput_object() with small file."""
if sse:
log_entry["name"] += "_with_SSE-C"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}-f"
metadata = {"x-amz-storage-class": "STANDARD_IA"}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _TEST_FILE,
"metadata": metadata,
}
_test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, sse)
def test_fput_object_large_file(log_entry, sse=None):
"""Test fput_object() with large file."""
if sse:
log_entry["name"] += "_with_SSE-C"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}-large"
metadata = {"x-amz-storage-class": "STANDARD_IA"}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _LARGE_FILE,
"metadata": metadata,
}
# upload local large file through multipart.
_test_fput_object(bucket_name, object_name, _LARGE_FILE, metadata, sse)
def test_fput_object_with_content_type(log_entry): # pylint: disable=invalid-name
"""Test fput_object() with content-type."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}-f"
metadata = {"x-amz-storage-class": "STANDARD_IA"}
content_type = "application/octet-stream"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": _TEST_FILE,
"metadata": metadata,
"content_type": content_type,
}
_test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, None)
def _validate_stat(st_obj, expected_size, expected_meta, version_id=None):
"""Validate stat information."""
expected_meta = {key.lower(): value for key, value in (expected_meta or {}).items()}
received_etag = st_obj.etag
received_metadata = {
key.lower(): value for key, value in (st_obj.metadata or {}).items()
}
received_content_type = st_obj.content_type
received_size = st_obj.size
received_is_dir = st_obj.is_dir
if not received_etag:
raise ValueError("No Etag value is returned.")
if st_obj.version_id != version_id:
raise ValueError(
f"version-id mismatch. expected={version_id}, " f"got={st_obj.version_id}"
)
# content_type by default can be either application/octet-stream or
# binary/octet-stream
if received_content_type not in ["application/octet-stream", "binary/octet-stream"]:
raise ValueError(
"Incorrect content type. Expected: ",
"'application/octet-stream' or 'binary/octet-stream'," " received: ",
received_content_type,
)
if received_size != expected_size:
raise ValueError(
"Incorrect file size. Expected: 11534336", ", received: ", received_size
)
if received_is_dir:
raise ValueError(
"Incorrect file type. Expected: is_dir=False",
", received: is_dir=",
received_is_dir,
)
if not all(i in received_metadata.items() for i in expected_meta.items()):
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
def test_copy_object_no_copy_condition( # pylint: disable=invalid-name
log_entry, ssec_copy=None, ssec=None
):
"""Test copy_object() with no conditiions."""
if ssec_copy or ssec:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size, sse=ssec)
_CLIENT.copy_object(
bucket_name,
object_copy,
sse=ssec,
source=CopySource(bucket_name, object_source, ssec=ssec_copy),
)
st_obj = _CLIENT.stat_object(bucket_name, object_copy, ssec=ssec)
_validate_stat(st_obj, size, {})
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_with_metadata(log_entry):
"""Test copy_object() with metadata."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
object_source = object_name + "-source"
object_copy = object_name + "-copy"
metadata = {
"testing-string": "string",
"testing-int": 1,
10: "value",
}
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
"metadata": metadata,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Perform a server side copy of an object
_CLIENT.copy_object(
bucket_name,
object_copy,
CopySource(bucket_name, object_source),
metadata=metadata,
metadata_directive=REPLACE,
)
# Verification
st_obj = _CLIENT.stat_object(bucket_name, object_copy)
expected_metadata = {
"x-amz-meta-testing-int": "1",
"x-amz-meta-testing-string": "string",
"x-amz-meta-10": "value",
}
_validate_stat(st_obj, size, expected_metadata)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_etag_match(log_entry):
"""Test copy_object() with etag match condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Perform a server side copy of an object
_CLIENT.copy_object(
bucket_name,
object_copy,
CopySource(bucket_name, object_source),
)
# Verification
source_etag = _CLIENT.stat_object(bucket_name, object_source).etag
log_entry["args"]["conditions"] = {"set_match_etag": source_etag}
_CLIENT.copy_object(
bucket_name,
object_copy,
CopySource(bucket_name, object_source, match_etag=source_etag),
)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_negative_etag_match(log_entry): # pylint: disable=invalid-name
"""Test copy_object() with etag not match condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
try:
# Perform a server side copy of an object
# with incorrect pre-conditions and fail
etag = "test-etag"
log_entry["args"]["conditions"] = {"set_match_etag": etag}
_CLIENT.copy_object(
bucket_name,
object_copy,
CopySource(bucket_name, object_source, match_etag=etag),
)
except S3Error as exc:
if exc.code != "PreconditionFailed":
raise
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_modified_since(log_entry):
"""Test copy_object() with modified since condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Set up the 'modified_since' copy condition
mod_since = datetime(2014, 4, 1, tzinfo=timezone.utc)
log_entry["args"]["conditions"] = {
"set_modified_since": to_http_header(mod_since)
}
# Perform a server side copy of an object
# and expect the copy to complete successfully
_CLIENT.copy_object(
bucket_name,
object_copy,
CopySource(bucket_name, object_source, modified_since=mod_since),
)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_copy_object_unmodified_since(log_entry): # pylint: disable=invalid-name
"""Test copy_object() with unmodified since condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Set up the 'unmodified_since' copy condition
unmod_since = datetime(2014, 4, 1, tzinfo=timezone.utc)
log_entry["args"]["conditions"] = {
"set_unmodified_since": to_http_header(unmod_since)
}
try:
# Perform a server side copy of an object and expect
# the copy to fail since the creation/modification
# time is now, way later than unmodification time, April 1st, 2014
_CLIENT.copy_object(
bucket_name,
object_copy,
CopySource(
bucket_name,
object_source,
unmodified_since=unmod_since,
),
)
except S3Error as exc:
if exc.code != "PreconditionFailed":
raise
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
def test_put_object(log_entry, sse=None):
"""Test put_object()."""
if sse:
log_entry["name"] += "_SSE"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"length": length,
"data": "LimitedRandomReader(1 * MB)",
}
try:
_CLIENT.make_bucket(bucket_name)
# Put/Upload a streaming object of 1 MiB
reader = LimitedRandomReader(length)
_CLIENT.put_object(bucket_name, object_name, reader, length, sse=sse)
_CLIENT.stat_object(bucket_name, object_name, ssec=sse)
# Put/Upload a streaming object of 11 MiB
log_entry["args"]["length"] = length = 11 * MB
reader = LimitedRandomReader(length)
log_entry["args"]["data"] = "LimitedRandomReader(11 * MB)"
log_entry["args"]["metadata"] = metadata = {
"x-amz-meta-testing": "value",
"test-key": "value2",
}
log_entry["args"]["content_type"] = content_type = "application/octet-stream"
log_entry["args"]["object_name"] = object_name + "-metadata"
_CLIENT.put_object(
bucket_name,
object_name + "-metadata",
reader,
length,
content_type,
metadata,
sse=sse,
)
# Stat on the uploaded object to check if it exists
# Fetch saved stat metadata on a previously uploaded object with
# metadata.
st_obj = _CLIENT.stat_object(bucket_name, object_name + "-metadata", ssec=sse)
normalized_meta = {
key.lower(): value for key, value in (st_obj.metadata or {}).items()
}
if "x-amz-meta-testing" not in normalized_meta:
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
value = normalized_meta["x-amz-meta-testing"]
if value != "value":
raise ValueError(f"Metadata key has unexpected value {value}")
if "x-amz-meta-test-key" not in normalized_meta:
raise ValueError("Metadata key 'x-amz-meta-test-key' not found")
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_object(bucket_name, object_name + "-metadata")
_CLIENT.remove_bucket(bucket_name)
def test_negative_put_object_with_path_segment( # pylint: disable=invalid-name
log_entry,
):
"""Test put_object() failure with path segment."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"/a/b/c/{uuid4()}"
length = 0
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"length": length,
"data": "",
}
try:
_CLIENT.make_bucket(bucket_name)
_CLIENT.put_object(bucket_name, object_name, io.BytesIO(b""), 0)
_CLIENT.remove_object(bucket_name, object_name)
except S3Error as err:
if err.code != "XMinioInvalidObjectName":
raise
finally:
_CLIENT.remove_bucket(bucket_name)
def _test_stat_object(log_entry, sse=None, version_check=False):
"""Test stat_object()."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"length": length,
"data": "LimitedRandomReader(1 * MB)",
}
version_id1 = None
version_id2 = None
_CLIENT.make_bucket(bucket_name)
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name,
VersioningConfig(ENABLED),
)
# Put/Upload a streaming object of 1 MiB
reader = LimitedRandomReader(length)
result = _CLIENT.put_object(
bucket_name,
object_name,
reader,
length,
sse=sse,
)
version_id1 = result.version_id
_CLIENT.stat_object(
bucket_name,
object_name,
ssec=sse,
version_id=version_id1,
)
# Put/Upload a streaming object of 11 MiB
log_entry["args"]["length"] = length = 11 * MB
reader = LimitedRandomReader(length)
log_entry["args"]["data"] = "LimitedRandomReader(11 * MB)"
log_entry["args"]["metadata"] = metadata = {"X-Amz-Meta-Testing": "value"}
log_entry["args"]["content_type"] = content_type = "application/octet-stream"
log_entry["args"]["object_name"] = object_name + "-metadata"
result = _CLIENT.put_object(
bucket_name,
object_name + "-metadata",
reader,
length,
content_type,
metadata,
sse=sse,
)
version_id2 = result.version_id
# Stat on the uploaded object to check if it exists
# Fetch saved stat metadata on a previously uploaded object with
# metadata.
st_obj = _CLIENT.stat_object(
bucket_name,
object_name + "-metadata",
ssec=sse,
version_id=version_id2,
)
# Verify the collected stat data.
_validate_stat(
st_obj,
length,
metadata,
version_id=version_id2,
)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id1)
_CLIENT.remove_object(
bucket_name,
object_name + "-metadata",
version_id=version_id2,
)
_CLIENT.remove_bucket(bucket_name)
def test_stat_object(log_entry, sse=None):
"""Test stat_object()."""
_test_stat_object(log_entry, sse)
def test_stat_object_version(log_entry, sse=None):
"""Test stat_object() of versioned object."""
_test_stat_object(log_entry, sse, version_check=True)
def _test_remove_object(log_entry, version_check=False):
"""Test remove_object()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
length = 1 * KB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name,
VersioningConfig(ENABLED),
)
result = _CLIENT.put_object(
bucket_name,
object_name,
LimitedRandomReader(length),
length,
)
_CLIENT.remove_object(
bucket_name,
object_name,
version_id=result.version_id,
)
finally:
_CLIENT.remove_bucket(bucket_name)
def test_remove_object(log_entry):
"""Test remove_object()."""
_test_remove_object(log_entry)
def test_remove_object_version(log_entry):
"""Test remove_object() of versioned object."""
_test_remove_object(log_entry, version_check=True)
def _test_get_object(log_entry, sse=None, version_check=False):
"""Test get_object()."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
version_id = None
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name,
VersioningConfig(ENABLED),
)
result = _CLIENT.put_object(
bucket_name,
object_name,
LimitedRandomReader(length),
length,
sse=sse,
)
version_id = result.version_id
# Get/Download a full object, iterate on response to save to disk
object_data = _CLIENT.get_object(
bucket_name,
object_name,
ssec=sse,
version_id=version_id,
)
newfile = "newfile جديد"
with open(newfile, "wb") as file_data:
shutil.copyfileobj(object_data, file_data)
os.remove(newfile)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id)
_CLIENT.remove_bucket(bucket_name)
def test_get_object(log_entry, sse=None):
"""Test get_object()."""
_test_get_object(log_entry, sse)
def test_get_object_version(log_entry, sse=None):
"""Test get_object() for versioned object."""
_test_get_object(log_entry, sse, version_check=True)
def _test_fget_object(log_entry, sse=None, version_check=False):
"""Test fget_object()."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
tmpfd, tmpfile = tempfile.mkstemp()
os.close(tmpfd)
length = 1 * MB
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"file_path": tmpfile,
}
_CLIENT.make_bucket(bucket_name)
version_id = None
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name,
VersioningConfig(ENABLED),
)
result = _CLIENT.put_object(
bucket_name,
object_name,
LimitedRandomReader(length),
length,
sse=sse,
)
version_id = result.version_id
# Get/Download a full object and save locally at path
_CLIENT.fget_object(
bucket_name,
object_name,
tmpfile,
ssec=sse,
version_id=version_id,
)
os.remove(tmpfile)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id)
_CLIENT.remove_bucket(bucket_name)
def test_fget_object(log_entry, sse=None):
"""Test fget_object()."""
_test_fget_object(log_entry, sse)
def test_fget_object_version(log_entry, sse=None):
"""Test fget_object() of versioned object."""
_test_fget_object(log_entry, sse, version_check=True)
def test_get_object_with_default_length( # pylint: disable=invalid-name
log_entry, sse=None
):
"""Test get_object() with default length."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
size = 1 * MB
length = 1000
offset = size - length
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"offset": offset,
}
_CLIENT.make_bucket(bucket_name)
try:
_CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(size), size, sse=sse
)
# Get half of the object
object_data = _CLIENT.get_object(
bucket_name, object_name, offset=offset, ssec=sse
)
newfile = "newfile"
with open(newfile, "wb") as file_data:
for data in object_data:
file_data.write(data)
# Check if the new file is the right size
new_file_size = os.path.getsize(newfile)
os.remove(newfile)
if new_file_size != length:
raise ValueError("Unexpected file size after running ")
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_get_partial_object(log_entry, sse=None):
"""Test get_object() by offset/length."""
if sse:
log_entry["name"] += "_SSEC"
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
size = 1 * MB
offset = int(size / 2)
length = offset - 1000
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"offset": offset,
}
_CLIENT.make_bucket(bucket_name)
try:
_CLIENT.put_object(
bucket_name, object_name, LimitedRandomReader(size), size, sse=sse
)
# Get half of the object
object_data = _CLIENT.get_object(
bucket_name, object_name, offset=offset, length=length, ssec=sse
)
newfile = "newfile"
with open(newfile, "wb") as file_data:
for data in object_data:
file_data.write(data)
# Check if the new file is the right size
new_file_size = os.path.getsize(newfile)
os.remove(newfile)
if new_file_size != length:
raise ValueError("Unexpected file size after running ")
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def _test_list_objects(log_entry, use_api_v1=False, version_check=False):
"""Test list_objects()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
is_recursive = True
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"recursive": is_recursive,
}
_CLIENT.make_bucket(bucket_name)
version_id1 = None
version_id2 = None
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name,
VersioningConfig(ENABLED),
)
size = 1 * KB
result = _CLIENT.put_object(
bucket_name,
object_name + "-1",
LimitedRandomReader(size),
size,
)
version_id1 = result.version_id
result = _CLIENT.put_object(
bucket_name,
object_name + "-2",
LimitedRandomReader(size),
size,
)
version_id2 = result.version_id
# List all object paths in bucket.
objects = _CLIENT.list_objects(
bucket_name,
"",
is_recursive,
include_version=version_check,
use_api_v1=use_api_v1,
)
for obj in objects:
_ = (
obj.bucket_name,
obj.object_name,
obj.last_modified,
obj.etag,
obj.size,
obj.content_type,
)
if obj.version_id not in [version_id1, version_id2]:
raise ValueError(
f"version ID mismatch. "
f"expected=any{[version_id1, version_id2]}, "
f"got:{obj.version_id}"
)
finally:
_CLIENT.remove_object(
bucket_name,
object_name + "-1",
version_id=version_id1,
)
_CLIENT.remove_object(
bucket_name,
object_name + "-2",
version_id=version_id2,
)
_CLIENT.remove_bucket(bucket_name)
def test_list_objects_v1(log_entry):
"""Test list_objects()."""
_test_list_objects(log_entry, use_api_v1=True)
def test_list_object_v1_versions(log_entry):
"""Test list_objects()."""
_test_list_objects(log_entry, use_api_v1=True, version_check=True)
def _test_list_objects_api(bucket_name, expected_no, *argv):
"""Test list_objects()."""
# argv is composed of prefix and recursive arguments of
# list_objects api. They are both supposed to be passed as strings.
objects = _CLIENT.list_objects(bucket_name, *argv)
# expect all objects to be listed
no_of_files = 0
for obj in objects:
_ = (
obj.bucket_name,
obj.object_name,
obj.last_modified,
obj.etag,
obj.size,
obj.content_type,
)
no_of_files += 1
if expected_no != no_of_files:
raise ValueError(
f"Listed no of objects ({no_of_files}), does not match the "
f"expected no of objects ({expected_no})"
)
def test_list_objects_with_prefix(log_entry):
"""Test list_objects() with prefix."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
no_of_created_files = 4
path_prefix = ""
# Create files and directories
for i in range(no_of_created_files):
_CLIENT.put_object(
bucket_name,
f"{path_prefix}{i}_{object_name}",
LimitedRandomReader(size),
size,
)
path_prefix = f"{path_prefix}{i}/"
# Created files and directory structure
# ._<bucket_name>/
# |___0_<object_name>
# |___0/
# |___1_<object_name>
# |___1/
# |___2_<object_name>
# |___2/
# |___3_<object_name>
#
# Test and verify list_objects api outputs
# List objects recursively with NO prefix
log_entry["args"]["prefix"] = prefix = "" # no prefix
log_entry["args"]["recursive"] = recursive = ""
_test_list_objects_api(bucket_name, no_of_created_files, prefix, True)
# List objects at the top level with no prefix and no recursive option
# Expect only the top 2 objects to be listed
_test_list_objects_api(bucket_name, 2)
# List objects for '0' directory/prefix without recursive option
# Expect 2 object (directory '0' and '0_' object) to be listed
log_entry["args"]["prefix"] = prefix = "0"
_test_list_objects_api(bucket_name, 2, prefix)
# List objects for '0/' directory/prefix without recursive option
# Expect only 2 objects under directory '0/' to be listed,
# non-recursive
log_entry["args"]["prefix"] = prefix = "0/"
_test_list_objects_api(bucket_name, 2, prefix)
# List objects for '0/' directory/prefix, recursively
# Expect 2 objects to be listed
log_entry["args"]["prefix"] = prefix = "0/"
log_entry["args"]["recursive"] = recursive = "True"
_test_list_objects_api(bucket_name, 3, prefix, recursive)
# List object with '0/1/2/' directory/prefix, non-recursive
# Expect the single object under directory '0/1/2/' to be listed
log_entry["args"]["prefix"] = prefix = "0/1/2/"
_test_list_objects_api(bucket_name, 1, prefix)
finally:
path_prefix = ""
for i in range(no_of_created_files):
_CLIENT.remove_object(
bucket_name,
f"{path_prefix}{i}_{object_name}",
)
path_prefix = f"{path_prefix}{i}/"
_CLIENT.remove_bucket(bucket_name)
# Test passes
log_entry["args"]["prefix"] = "Several prefix/recursive combinations are tested"
log_entry["args"]["recursive"] = "Several prefix/recursive combinations are tested"
def test_list_objects_with_1001_files(log_entry): # pylint: disable=invalid-name
"""Test list_objects() with more 1000 objects."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": f"{object_name}_0 ~ {0}_1000",
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
no_of_created_files = 2000
# Create files and directories
for i in range(no_of_created_files):
_CLIENT.put_object(
bucket_name, f"{object_name}_{i}", LimitedRandomReader(size), size
)
# List objects and check if 1001 files are returned
_test_list_objects_api(bucket_name, no_of_created_files)
finally:
for i in range(no_of_created_files):
_CLIENT.remove_object(bucket_name, f"{object_name}_{i}")
_CLIENT.remove_bucket(bucket_name)
def test_list_objects(log_entry):
"""Test list_objects()."""
_test_list_objects(log_entry)
def test_list_object_versions(log_entry):
"""Test list_objects() of versioned object."""
_test_list_objects(log_entry, version_check=True)
def test_presigned_get_object_default_expiry(log_entry): # pylint: disable=invalid-name
"""Test presigned_get_object() with default expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
_CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), size)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name
)
response = HTTP.urlopen("GET", presigned_get_object_url)
if response.status != 200:
raise Exception(
f"Presigned GET object URL {presigned_get_object_url} failed; "
f"code: {response.code}, error: {response.data}"
)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_get_object_expiry(log_entry): # pylint: disable=invalid-name
"""Test presigned_get_object() with expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
_CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), size)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=120)
)
response = HTTP.urlopen("GET", presigned_get_object_url)
if response.status != 200:
raise Exception(
f"Presigned GET object URL {presigned_get_object_url} failed; "
f"code: {response.code}, error: {response.data}"
)
log_entry["args"]["presigned_get_object_url"] = presigned_get_object_url
response = HTTP.urlopen("GET", presigned_get_object_url)
log_entry["args"]["response.status"] = response.status
log_entry["args"]["response.reason"] = response.reason
log_entry["args"]["response.headers"] = json.dumps(response.headers.__dict__)
# pylint: disable=protected-access
log_entry["args"]["response._body"] = response._body.decode("utf-8")
if response.status != 200:
raise Exception(
f"Presigned GET object URL {presigned_get_object_url} failed; "
f"code: {response.code}, error: {response.data}"
)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=1)
)
# Wait for 2 seconds for the presigned url to expire
time.sleep(2)
response = HTTP.urlopen("GET", presigned_get_object_url)
log_entry["args"]["response.status-2"] = response.status
log_entry["args"]["response.reason-2"] = response.reason
log_entry["args"]["response.headers-2"] = json.dumps(response.headers.__dict__)
log_entry["args"]["response._body-2"] = response._body.decode("utf-8")
# Success with an expired url is considered to be a failure
if response.status == 200:
raise ValueError("Presigned get url failed to expire!")
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_get_object_response_headers( # pylint: disable=invalid-name
log_entry,
):
"""Test presigned_get_object() with headers."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
content_type = "text/plain"
content_language = "en_US"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
"content_type": content_type,
"content_language": content_language,
}
_CLIENT.make_bucket(bucket_name)
try:
size = 1 * KB
_CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), size)
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=120)
)
response_headers = {
"response-content-type": content_type,
"response-content-language": content_language,
}
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name, object_name, timedelta(seconds=120), response_headers
)
log_entry["args"]["presigned_get_object_url"] = presigned_get_object_url
response = HTTP.urlopen("GET", presigned_get_object_url)
returned_content_type = response.headers["Content-Type"]
returned_content_language = response.headers["Content-Language"]
log_entry["args"]["response.status"] = response.status
log_entry["args"]["response.reason"] = response.reason
log_entry["args"]["response.headers"] = json.dumps(response.headers.__dict__)
# pylint: disable=protected-access
log_entry["args"]["response._body"] = response._body.decode("utf-8")
log_entry["args"]["returned_content_type"] = returned_content_type
log_entry["args"]["returned_content_language"] = returned_content_language
if (
response.status != 200
or returned_content_type != content_type
or returned_content_language != content_language
):
raise Exception(
"Presigned GET object URL {presigned_get_object_url} failed; "
"code: {response.code}, error: {response.data}"
)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_get_object_version(log_entry): # pylint: disable=invalid-name
"""Test presigned_get_object() of versioned object."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
version_id = None
try:
_CLIENT.set_bucket_versioning(bucket_name, VersioningConfig(ENABLED))
size = 1 * KB
result = _CLIENT.put_object(
bucket_name,
object_name,
LimitedRandomReader(size),
size,
)
version_id = result.version_id
presigned_get_object_url = _CLIENT.presigned_get_object(
bucket_name,
object_name,
version_id=version_id,
)
response = HTTP.urlopen("GET", presigned_get_object_url)
if response.status != 200:
raise Exception(
f"Presigned GET object URL {presigned_get_object_url} failed; "
f"code: {response.code}, error: {response.data}"
)
finally:
_CLIENT.remove_object(bucket_name, object_name, version_id=version_id)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_put_object_default_expiry(log_entry): # pylint: disable=invalid-name
"""Test presigned_put_object() with default expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
presigned_put_object_url = _CLIENT.presigned_put_object(
bucket_name, object_name
)
response = HTTP.urlopen(
"PUT", presigned_put_object_url, LimitedRandomReader(1 * KB)
)
if response.status != 200:
raise Exception(
f"Presigned PUT object URL {presigned_put_object_url} failed; "
f"code: {response.code}, error: {response.data}"
)
_CLIENT.stat_object(bucket_name, object_name)
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_put_object_expiry(log_entry): # pylint: disable=invalid-name
"""Test presigned_put_object() with expiry."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
_CLIENT.make_bucket(bucket_name)
try:
presigned_put_object_url = _CLIENT.presigned_put_object(
bucket_name, object_name, timedelta(seconds=1)
)
# Wait for 2 seconds for the presigned url to expire
time.sleep(2)
response = HTTP.urlopen(
"PUT", presigned_put_object_url, LimitedRandomReader(1 * KB)
)
if response.status == 200:
raise ValueError("Presigned put url failed to expire!")
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_presigned_post_policy(log_entry):
"""Test presigned_post_policy()."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
no_of_days = 10
prefix = "objectPrefix/"
policy = PostPolicy(
bucket_name,
datetime.utcnow() + timedelta(days=no_of_days),
)
policy.add_starts_with_condition("key", prefix)
policy.add_content_length_range_condition(64 * KB, 10 * MB)
policy.add_starts_with_condition("Content-Type", "image/")
log_entry["args"]["post_policy"] = {
"prefix": prefix,
"expires_in_days": no_of_days,
"content_length_range": "64KiB to 10MiB",
"Content-Type": "image/",
}
_CLIENT.presigned_post_policy(policy)
finally:
_CLIENT.remove_bucket(bucket_name)
def test_thread_safe(log_entry):
"""Test thread safety."""
# Create sha-sum value for the user provided
# source file, 'test_file'
test_file_sha_sum = _get_sha256sum(_LARGE_FILE)
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = f"{uuid4()}"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_name": object_name,
}
# A list of exceptions raised by get_object_and_check
# called in multiple threads.
exceptions = []
# get_object_and_check() downloads an object, stores it in a file
# and then calculates its checksum. In case of mismatch, a new
# exception is generated and saved in exceptions.
def get_object_and_check(index):
try:
local_file = f"copied_file_{index}"
_CLIENT.fget_object(bucket_name, object_name, local_file)
copied_file_sha_sum = _get_sha256sum(local_file)
# Compare sha-sum values of the source file and the copied one
if test_file_sha_sum != copied_file_sha_sum:
raise ValueError(
"Sha-sum mismatch on multi-threaded put and " "get objects"
)
except Exception as exc: # pylint: disable=broad-except
exceptions.append(exc)
finally:
# Remove downloaded file
_ = os.path.isfile(local_file) and os.remove(local_file)
_CLIENT.make_bucket(bucket_name)
no_of_threads = 5
try:
# Put/Upload 'no_of_threads' many objects
# simultaneously using multi-threading
for _ in range(no_of_threads):
thread = Thread(
target=_CLIENT.fput_object, args=(bucket_name, object_name, _LARGE_FILE)
)
thread.start()
thread.join()
# Get/Download 'no_of_threads' many objects
# simultaneously using multi-threading
thread_list = []
for i in range(no_of_threads):
# Create dynamic/varying names for to be created threads
thread_name = f"thread_{i}"
vars()[thread_name] = Thread(target=get_object_and_check, args=(i,))
vars()[thread_name].start()
thread_list.append(vars()[thread_name])
# Wait until all threads to finish
for thread in thread_list:
thread.join()
if exceptions:
raise exceptions[0]
finally:
_CLIENT.remove_object(bucket_name, object_name)
_CLIENT.remove_bucket(bucket_name)
def test_get_bucket_policy(log_entry):
"""Test get_bucket_policy()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
_CLIENT.get_bucket_policy(bucket_name)
except S3Error as exc:
if exc.code != "NoSuchBucketPolicy":
raise
finally:
_CLIENT.remove_bucket(bucket_name)
def _get_policy_actions(stat):
"""Get policy actions from stat information."""
def listit(value):
return value if isinstance(value, list) else [value]
actions = [listit(s.get("Action")) for s in stat if s.get("Action")]
actions = list(
set(item.replace("s3:", "") for sublist in actions for item in sublist)
)
actions.sort()
return actions
def _validate_policy(bucket_name, policy):
"""Validate policy."""
policy_dict = json.loads(_CLIENT.get_bucket_policy(bucket_name))
actions = _get_policy_actions(policy_dict.get("Statement"))
expected_actions = _get_policy_actions(policy.get("Statement"))
return expected_actions == actions
def test_get_bucket_notification(log_entry):
"""Test get_bucket_notification()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
config = _CLIENT.get_bucket_notification(bucket_name)
if (
config.cloud_func_config_list
or config.queue_config_list
or config.topic_config_list
):
raise ValueError("Failed to receive an empty bucket notification")
finally:
_CLIENT.remove_bucket(bucket_name)
def test_set_bucket_policy_readonly(log_entry):
"""Test set_bucket_policy() with readonly policy."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
# read-only policy
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::" + bucket_name,
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::" + bucket_name,
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": f"arn:aws:s3:::{bucket_name}/*",
},
],
}
# Set read-only policy
_CLIENT.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not _validate_policy(bucket_name, policy):
raise ValueError("Failed to set ReadOnly bucket policy")
finally:
_CLIENT.remove_bucket(bucket_name)
def test_set_bucket_policy_readwrite(log_entry): # pylint: disable=invalid-name
"""Test set_bucket_policy() with read/write policy."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
try:
# Read-write policy
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": ["s3:GetBucketLocation"],
"Sid": "",
"Resource": ["arn:aws:s3:::" + bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"},
},
{
"Action": ["s3:ListBucket"],
"Sid": "",
"Resource": ["arn:aws:s3:::" + bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"},
},
{
"Action": ["s3:ListBucketMultipartUploads"],
"Sid": "",
"Resource": ["arn:aws:s3:::" + bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"},
},
{
"Action": [
"s3:ListMultipartUploadParts",
"s3:GetObject",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:PutObject",
],
"Sid": "",
"Resource": [f"arn:aws:s3:::{bucket_name}/*"],
"Effect": "Allow",
"Principal": {"AWS": "*"},
},
],
}
# Set read-write policy
_CLIENT.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not _validate_policy(bucket_name, policy):
raise ValueError("Failed to set ReadOnly bucket policy")
finally:
_CLIENT.remove_bucket(bucket_name)
def _test_remove_objects(log_entry, version_check=False):
"""Test remove_objects()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
log_entry["args"] = {
"bucket_name": bucket_name,
}
_CLIENT.make_bucket(bucket_name)
object_names = []
delete_object_list = []
try:
if version_check:
_CLIENT.set_bucket_versioning(
bucket_name,
VersioningConfig(ENABLED),
)
size = 1 * KB
# Upload some new objects to prepare for multi-object delete test.
for i in range(10):
object_name = f"prefix-{i}"
result = _CLIENT.put_object(
bucket_name,
object_name,
LimitedRandomReader(size),
size,
)
object_names.append(
(object_name, result.version_id) if version_check else object_name,
)
log_entry["args"]["delete_object_list"] = object_names
for args in object_names:
delete_object_list.append(
DeleteObject(args)
if isinstance(args, str)
else DeleteObject(args[0], args[1])
)
# delete the objects in a single library call.
errs = _CLIENT.remove_objects(bucket_name, delete_object_list)
for err in errs:
raise ValueError(f"Remove objects err: {err}")
finally:
# Try to clean everything to keep our server intact
errs = _CLIENT.remove_objects(bucket_name, delete_object_list)
for err in errs:
raise ValueError(f"Remove objects err: {err}")
_CLIENT.remove_bucket(bucket_name)
def test_remove_objects(log_entry):
"""Test remove_objects()."""
_test_remove_objects(log_entry)
def test_remove_object_versions(log_entry):
"""Test remove_objects()."""
_test_remove_objects(log_entry, version_check=True)
def test_remove_bucket(log_entry):
"""Test remove_bucket()."""
# Get a unique bucket_name
bucket_name = _gen_bucket_name()
if _IS_AWS:
bucket_name += ".unique"
log_entry["args"] = {
"bucket_name": bucket_name,
}
if _IS_AWS:
log_entry["args"]["location"] = location = "us-east-1"
_CLIENT.make_bucket(bucket_name, location)
else:
_CLIENT.make_bucket(bucket_name)
# Removing bucket. This operation will only work if your bucket is empty.
_CLIENT.remove_bucket(bucket_name)
def main():
"""
Functional testing of minio python library.
"""
# pylint: disable=global-statement
global _CLIENT, _TEST_FILE, _LARGE_FILE, _IS_AWS
access_key = os.getenv("ACCESS_KEY")
secret_key = os.getenv("SECRET_KEY")
server_endpoint = os.getenv("SERVER_ENDPOINT", "play.min.io")
secure = os.getenv("ENABLE_HTTPS", "1") == "1"
if server_endpoint == "play.min.io":
access_key = "Q3AM3UQ867SPQQA43P2F"
secret_key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
secure = True
_CLIENT = Minio(server_endpoint, access_key, secret_key, secure=secure)
_IS_AWS = ".amazonaws.com" in server_endpoint
# Check if we are running in the mint environment.
data_dir = os.getenv("DATA_DIR", "/mint/data")
is_mint_env = (
os.path.exists(data_dir)
and os.path.exists(os.path.join(data_dir, "datafile-1-MB"))
and os.path.exists(os.path.join(data_dir, "datafile-11-MB"))
)
# Enable trace
# _CLIENT.trace_on(sys.stderr)
_TEST_FILE = "datafile-1-MB"
_LARGE_FILE = "datafile-11-MB"
if is_mint_env:
# Choose data files
_TEST_FILE = os.path.join(data_dir, "datafile-1-MB")
_LARGE_FILE = os.path.join(data_dir, "datafile-11-MB")
else:
with open(_TEST_FILE, "wb") as file_data:
shutil.copyfileobj(LimitedRandomReader(1 * MB), file_data)
with open(_LARGE_FILE, "wb") as file_data:
shutil.copyfileobj(LimitedRandomReader(11 * MB), file_data)
ssec = None
if secure:
# Create a Customer Key of 32 Bytes for Server Side Encryption (SSE-C)
cust_key = b"AABBCCDDAABBCCDDAABBCCDDAABBCCDD"
# Create an SSE-C object with provided customer key
ssec = SseCustomerKey(cust_key)
if os.getenv("MINT_MODE") == "full":
tests = {
test_make_bucket_default_region: None,
test_make_bucket_with_region: None,
test_negative_make_bucket_invalid_name: None,
test_list_buckets: None,
test_fput_object_small_file: {"sse": ssec} if ssec else None,
test_fput_object_large_file: {"sse": ssec} if ssec else None,
test_fput_object_with_content_type: None,
test_copy_object_no_copy_condition: {"ssec_copy": ssec, "ssec": ssec}
if ssec
else None,
test_copy_object_etag_match: None,
test_copy_object_with_metadata: None,
test_copy_object_negative_etag_match: None,
test_copy_object_modified_since: None,
test_copy_object_unmodified_since: None,
test_put_object: {"sse": ssec} if ssec else None,
test_negative_put_object_with_path_segment: None,
test_stat_object: {"sse": ssec} if ssec else None,
test_stat_object_version: {"sse": ssec} if ssec else None,
test_get_object: {"sse": ssec} if ssec else None,
test_get_object_version: {"sse": ssec} if ssec else None,
test_fget_object: {"sse": ssec} if ssec else None,
test_fget_object_version: {"sse": ssec} if ssec else None,
test_get_object_with_default_length: None,
test_get_partial_object: {"sse": ssec} if ssec else None,
test_list_objects_v1: None,
test_list_object_v1_versions: None,
test_list_objects_with_prefix: None,
test_list_objects_with_1001_files: None,
test_list_objects: None,
test_list_object_versions: None,
test_presigned_get_object_default_expiry: None,
test_presigned_get_object_expiry: None,
test_presigned_get_object_response_headers: None,
test_presigned_get_object_version: None,
test_presigned_put_object_default_expiry: None,
test_presigned_put_object_expiry: None,
test_presigned_post_policy: None,
test_thread_safe: None,
test_get_bucket_policy: None,
test_set_bucket_policy_readonly: None,
test_set_bucket_policy_readwrite: None,
test_get_bucket_notification: None,
test_select_object_content: None,
}
else:
tests = {
test_make_bucket_default_region: None,
test_list_buckets: None,
test_put_object: {"sse": ssec} if ssec else None,
test_stat_object: {"sse": ssec} if ssec else None,
test_stat_object_version: {"sse": ssec} if ssec else None,
test_get_object: {"sse": ssec} if ssec else None,
test_get_object_version: {"sse": ssec} if ssec else None,
test_list_objects: None,
test_presigned_get_object_default_expiry: None,
test_presigned_put_object_default_expiry: None,
test_presigned_post_policy: None,
test_copy_object_no_copy_condition: {"ssec_copy": ssec, "ssec": ssec}
if ssec
else None,
test_select_object_content: None,
test_get_bucket_policy: None,
test_set_bucket_policy_readonly: None,
test_get_bucket_notification: None,
}
tests.update(
{
test_remove_object: None,
test_remove_object_version: None,
test_remove_objects: None,
test_remove_object_versions: None,
test_remove_bucket: None,
},
)
for test_name, arg_list in tests.items():
args = ()
kwargs = {}
_call_test(test_name, *args, **kwargs)
if arg_list:
args = ()
kwargs = arg_list
_call_test(test_name, *args, **kwargs) # pylint: disable=not-a-mapping
# Remove temporary files.
if not is_mint_env:
os.remove(_TEST_FILE)
os.remove(_LARGE_FILE)
if __name__ == "__main__":
try:
main()
except TestFailed:
sys.exit(1)
except Exception as excp: # pylint: disable=broad-except
print(excp)
sys.exit(-1)
| aistore-master | python/tests/s3compat/tests.py |
"""
ETL to convert images to numpy arrays.
Communication Type: hpush://
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
from aistore import Client
import numpy as np
import cv2
client = Client(
"http://192.168.49.2:8080"
) # ip addr of aistore cluster (in k8s or minikube)
def transform(input_bytes):
nparr = np.fromstring(input_bytes, np.uint8)
return cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# other opencv packages dont work in dockerized environments
deps = ["opencv-python-headless==4.5.3.56"]
# initialize ETL
client.etl("etl-img-to-npy").init_code(transform=transform, dependencies=deps)
to_bck = client.bucket("to-bck")
# Transform bucket with given ETL name
job_id = client.bucket("from-bck").transform(
etl_name="etl-img-to-npy", to_bck=to_bck, ext={"jpg": "npy"}
)
client.job(job_id).wait()
# load an object from transformed bucket
print(np.frombuffer(to_bck.object("obj-id.npy").get().read_all(), dtype=np.uint8))
| aistore-master | python/examples/ais-etl/etl_convert_img_to_npy.py |
"""
ETL to transform images using torchvision.
Communication Type: hpush://
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
import io
from torchvision import transforms
from PIL import Image
import torch
import numpy as np
from aistore import Client
from aistore.sdk import Bucket
client = Client("http://192.168.49.2:8080")
# cannot apply transforms.PILToTensor() as the expected return type is bytes and not tensor
# if you want to convert it to tensor, return it in "bytes-like" object
def apply_image_transforms(reader, writer):
transform = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.PILToTensor()]
)
for b in reader:
buffer = io.BytesIO()
torch.save(transform(Image.open(io.BytesIO(b))), buffer)
buffer.seek(0)
writer.write(buffer.read())
# initialize ETL
client.etl("etl-torchvision").init_code(
transform=apply_image_transforms,
dependencies=["Pillow", "torchvision"],
timeout="10m",
)
# Transform bucket with given ETL name
job_id = client.bucket("from-bck").transform(
etl_name="etl-torchvision", to_bck=Bucket("to-bck"), ext={"jpg": "npy"}
)
client.job(job_id).wait()
# read the numpy array
np.frombuffer(
client.bucket("to-bck").object("obj-id.npy").get().read_all(), dtype=np.uint8
)
| aistore-master | python/examples/ais-etl/etl_torchvision_hpush.py |
"""
ETL to calculate md5 of an object.
Communication Type: io://
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
from aistore import Client
import hashlib
import sys
from aistore.sdk import Bucket
from aistore.sdk.etl_const import ETL_COMM_IO
client = Client("http://192.168.49.2:8080")
def etl():
md5 = hashlib.md5()
chunk = sys.stdin.buffer.read()
md5.update(chunk)
sys.stdout.buffer.write(md5.hexdigest().encode())
client.etl("etl-md5-io-code").init_code(transform=etl, communication_type=ETL_COMM_IO)
job_id = client.bucket("from-bck").transform(
etl_name="etl-md5-io-code", to_bck=Bucket("to-bck"), ext={"jpg": "txt"}
)
client.job(job_id).wait()
| aistore-master | python/examples/ais-etl/etl_md5_io.py |
"""
ETL to calculate md5 of an object.
Communication Type: hpush://
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
import hashlib
from aistore import Client
from aistore.sdk import Bucket
client = Client("http://192.168.49.2:8080")
def transform(input_bytes):
md5 = hashlib.md5()
md5.update(input_bytes)
return md5.hexdigest().encode()
client.etl("etl-md5").init_code(transform=transform)
job_id = client.bucket("from-bck").transform(
etl_name="etl-md5", to_bck=Bucket("to-bck"), ext={"jpg": "txt"}
)
client.job(job_id).wait()
| aistore-master | python/examples/ais-etl/etl_md5_hpush.py |
"""
ETL to calculate md5 of an object with streaming.
Communication Type: hpush://
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
import hashlib
from aistore import Client
from aistore.sdk import Bucket
client = Client("http://192.168.49.2:8080")
def transform(reader, writer):
checksum = hashlib.md5()
for b in reader:
checksum.update(b)
writer.write(checksum.hexdigest().encode())
client.etl("etl-stream").init_code(
transform=transform,
chunk_size=32768,
)
job_id = client.bucket("from-bck").transform(
etl_name="etl-stream", to_bck=Bucket("to-bck"), ext={"jpg": "txt"}
)
client.job(job_id).wait()
| aistore-master | python/examples/ais-etl/etl_md5_hpush_streaming.py |
"""
ETL to transform images using torchvision.
Communication Type: io://
Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
"""
import io
import sys
from torchvision import transforms
from PIL import Image
from aistore import Client
from aistore.sdk import Bucket
from aistore.sdk.const import ETL_COMM_IO
client = Client("http://192.168.49.2:8080")
# cannot apply transforms.PILToTensor() as the expected return type is bytes and not tensor
# if you want to convert it to tensor, return it in "bytes-like" object
def apply_image_transforms():
transform = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.PILToTensor()]
)
input_bytes = sys.stdin.buffer.read()
sys.stdout.buffer.write(transform(Image.open(io.BytesIO(input_bytes))))
deps = ["Pillow", "torchvision"]
# initialize ETL
client.etl(etl_name="etl_torchvision_io").init_code(
transform=apply_image_transforms,
dependencies=deps,
communication_type=ETL_COMM_IO,
)
# Transform bucket with given ETL name
job_id = client.bucket("from-bck").transform(
etl_name="etl_torchvision_io", to_bck=Bucket("to-bck"), ext={"jpg": "npy"}
)
client.job(job_id).wait()
| aistore-master | python/examples/ais-etl/etl_torchvision_io.py |
import io
import os
import torchvision
import webdataset as wds
from PIL import Image
from aistore.sdk import Client
from torch.utils.data import IterableDataset
from torch.utils.data.dataset import T_co
AIS_ENDPOINT = os.getenv("AIS_ENDPOINT")
bucket_name = "images"
etl_name = "wd-transform"
def show_image(image_data):
with Image.open(io.BytesIO(image_data)) as image:
image.show()
def wd_etl(object_url):
def img_to_bytes(img):
buf = io.BytesIO()
img = img.convert("RGB")
img.save(buf, format="JPEG")
return buf.getvalue()
def process_trimap(trimap_bytes):
image = Image.open(io.BytesIO(trimap_bytes))
preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.CenterCrop(350),
torchvision.transforms.Lambda(img_to_bytes)
]
)
return preprocessing(image)
def process_image(image_bytes):
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.CenterCrop(350),
torchvision.transforms.ToTensor(),
# Means and stds from ImageNet
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
torchvision.transforms.ToPILImage(),
torchvision.transforms.Lambda(img_to_bytes),
]
)
return preprocessing(image)
# Initialize a WD object from the AIS URL
dataset = wds.WebDataset(object_url)
# Map the files for each individual sample to the appropriate processing function
processed_shard = dataset.map_dict(**{"image.jpg": process_image, "trimap.png": process_trimap})
# Write the output to a memory buffer and return the value
buffer = io.BytesIO()
with wds.TarWriter(fileobj=buffer) as dst:
for sample in processed_shard:
dst.write(sample)
return buffer.getvalue()
def create_wd_etl(client):
client.etl(etl_name).init_code(
transform=wd_etl,
preimported_modules=["torch"],
dependencies=["webdataset", "pillow", "torch", "torchvision"],
communication_type="hpull",
transform_url=True
)
class LocalTarDataset(IterableDataset):
"""
Builds a PyTorch IterableDataset from bytes in memory as if was read from a URL by WebDataset. This lets us
initialize a WebDataset Pipeline without writing to local disk and iterate over each record from a shard.
"""
def __getitem__(self, index) -> T_co:
raise NotImplemented
def __init__(self, input_bytes):
self.data = [{"url": "input_data", "stream": io.BytesIO(input_bytes)}]
def __iter__(self):
files = wds.tariterators.tar_file_expander(self.data)
samples = wds.tariterators.group_by_keys(files)
return samples
def read_object_tar(shard_data):
local_dataset = LocalTarDataset(shard_data)
sample = next(iter(local_dataset))
show_image(sample.get('image.jpg'))
def transform_object_inline():
single_object = client.bucket(bucket_name).object("samples-00.tar")
# Get object contents with ETL applied
processed_shard = single_object.get(etl_name=etl_name).read_all()
read_object_tar(processed_shard)
def transform_bucket_offline():
dest_bucket = client.bucket("processed-samples").create(exist_ok=True)
# Transform the entire bucket, placing the output in the destination bucket
transform_job = client.bucket(bucket_name).transform(to_bck=dest_bucket, etl_name=etl_name)
client.job(transform_job).wait(verbose=True)
processed_shard = dest_bucket.object("samples-00.tar").get().read_all()
read_object_tar(processed_shard)
if __name__ == "__main__":
client = Client(AIS_ENDPOINT)
image_bucket = client.bucket(bucket_name)
create_wd_etl(client)
transform_object_inline()
transform_bucket_offline()
| aistore-master | docs/examples/aisio_webdataset/etl_webdataset.py |
import os
import torchvision
from aistore.pytorch import AISSourceLister
from aistore.sdk import Client
import webdataset as wds
AIS_ENDPOINT = os.getenv("AIS_ENDPOINT")
client = Client(AIS_ENDPOINT)
bucket_name = "images"
etl_name = "wd-transform"
def show_image_tensor(image_data):
transform = torchvision.transforms.ToPILImage()
image = transform(image_data)
image.show()
def create_dataset() -> wds.WebDataset:
bucket = client.bucket(bucket_name)
# Get a list of urls for each object in AIS, with ETL applied, converted to the format WebDataset expects
sources = AISSourceLister(ais_sources=[bucket], etl_name=etl_name).map(lambda source_url: {"url": source_url})\
.shuffle()
# Load shuffled list of transformed shards into WebDataset pipeline
dataset = wds.WebDataset(sources)
# Shuffle samples and apply built-in webdataset decoder for image files
dataset = dataset.shuffle(size=1000).decode("torchrgb")
# Return iterator over samples as tuples in batches
return dataset.to_tuple("cls", "image.jpg", "trimap.png").batched(16)
def create_dataloader(dataset) -> wds.WebLoader:
loader = wds.WebLoader(dataset, num_workers=4, batch_size=None)
return loader.unbatched().shuffle(1000).batched(64)
def view_data(dataloader):
# Get the first batch
batch = next(iter(dataloader))
classes, images, trimaps = batch
# Result is a set of tensors with the first dimension being the batch size
print(classes.shape, images.shape, trimaps.shape)
# View the first images in the first batch
show_image_tensor(images[0])
show_image_tensor(trimaps[0])
if __name__ == '__main__':
wd_dataset = create_dataset()
wd_dataloader = create_dataloader(wd_dataset)
view_data(wd_dataloader)
first_batch = next(iter(wd_dataloader))
classes, images, trimaps = first_batch
| aistore-master | docs/examples/aisio_webdataset/pytorch_webdataset.py |
import os
from pathlib import Path
from aistore.sdk import Client
import webdataset as wds
AIS_ENDPOINT = os.getenv("AIS_ENDPOINT")
bucket_name = "images"
def parse_annotations(annotations_file):
classes = {}
# Parse the annotations file into a dictionary from file name -> pet class
with open(annotations_file, "r") as annotations:
for line in annotations.readlines():
if line[0] == "#":
continue
file_name, pet_class = line.split(" ")[:2]
classes[file_name] = pet_class
return classes
def create_sample_generator(image_dir, trimap_dir, annotations_file):
classes = parse_annotations(annotations_file)
# Iterate over all image files
for index, image_file in enumerate(Path(image_dir).glob("*.jpg")):
# Use the image name to look up class and trimap files and create a sample entry
sample = create_sample(classes, trimap_dir, index, image_file)
if sample is None:
continue
# Yield optimizes memory by returning a generator that only generates samples as requested
yield sample
def create_sample(classes, trimap_dir, index, image_file):
file_name = str(image_file).split("/")[-1].split(".")[0]
try:
with open(image_file, "rb") as f:
image_data = f.read()
pet_class = classes.get(file_name)
with open(trimap_dir.joinpath(file_name + ".png"), "rb") as f:
trimap_data = f.read()
if not image_data or not pet_class or not trimap_data:
# Ignore incomplete records
return None
return {
"__key__": "sample_%04d" % index,
"image.jpg": image_data,
"cls": pet_class,
"trimap.png": trimap_data
}
# Ignoring records with any missing files
except FileNotFoundError as err:
print(err)
return None
def load_data(bucket, sample_generator):
def upload_shard(filename):
bucket.object(filename).put_file(filename)
os.unlink(filename)
# Writes data as tar to disk, uses callback function "post" to upload to AIS and delete
with wds.ShardWriter("samples-%02d.tar", maxcount=400, post=upload_shard) as writer:
for sample in sample_generator:
writer.write(sample)
def view_shuffled_shards():
objects = client.bucket("images").list_all_objects(prefix="shuffled")
print([entry.name for entry in objects])
if __name__ == "__main__":
client = Client(AIS_ENDPOINT)
image_bucket = client.bucket(bucket_name).create(exist_ok=True)
base_dir = Path("/home/aaron/pets")
pet_image_dir = base_dir.joinpath("images")
pet_trimap_dir = base_dir.joinpath("annotations").joinpath("trimaps")
pet_annotations_file = base_dir.joinpath("annotations").joinpath("list.txt")
samples = create_sample_generator(pet_image_dir, pet_trimap_dir, pet_annotations_file)
load_data(image_bucket, samples)
| aistore-master | docs/examples/aisio_webdataset/load_webdataset_example.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.