repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxjob.py | DXJob.add_tags | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the job
:type tags: list of strings
Adds each of the specified tags to the job. Takes no
action for tags that are already listed for the job.
"""
dxpy.api.job_add_tags(self._dxid, {"tags": tags}, **kwargs) | python | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the job
:type tags: list of strings
Adds each of the specified tags to the job. Takes no
action for tags that are already listed for the job.
"""
dxpy.api.job_add_tags(self._dxid, {"tags": tags}, **kwargs) | :param tags: Tags to add to the job
:type tags: list of strings
Adds each of the specified tags to the job. Takes no
action for tags that are already listed for the job. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L221-L231 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxjob.py | DXJob.remove_tags | def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
"""
dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs) | python | def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
"""
dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs) | :param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L233-L243 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxjob.py | DXJob.set_properties | def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the job for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
dxpy.api.job_set_properties(self._dxid, {"properties": properties}, **kwargs) | python | def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the job for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
dxpy.api.job_set_properties(self._dxid, {"properties": properties}, **kwargs) | :param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the job for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L245-L260 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxjob.py | DXJob.wait_on_done | def wait_on_done(self, interval=2, timeout=3600*24*7, **kwargs):
'''
:param interval: Number of seconds between queries to the job's state
:type interval: integer
:param timeout: Maximum amount of time to wait, in seconds, until the job is done running
:type timeout: integer
:raises: :exc:`~dxpy.exceptions.DXError` if the timeout is reached before the job has finished running, or :exc:`dxpy.exceptions.DXJobFailureError` if the job fails
Waits until the job has finished running.
'''
elapsed = 0
while True:
state = self._get_state(**kwargs)
if state == "done":
break
if state == "failed":
desc = self.describe(**kwargs)
err_msg = "Job has failed because of {failureReason}: {failureMessage}".format(**desc)
if desc.get("failureFrom") != None and desc["failureFrom"]["id"] != desc["id"]:
err_msg += " (failure from {id})".format(id=desc['failureFrom']['id'])
raise DXJobFailureError(err_msg)
if state == "terminated":
raise DXJobFailureError("Job was terminated.")
if elapsed >= timeout or elapsed < 0:
raise DXJobFailureError("Reached timeout while waiting for the job to finish")
time.sleep(interval)
elapsed += interval | python | def wait_on_done(self, interval=2, timeout=3600*24*7, **kwargs):
'''
:param interval: Number of seconds between queries to the job's state
:type interval: integer
:param timeout: Maximum amount of time to wait, in seconds, until the job is done running
:type timeout: integer
:raises: :exc:`~dxpy.exceptions.DXError` if the timeout is reached before the job has finished running, or :exc:`dxpy.exceptions.DXJobFailureError` if the job fails
Waits until the job has finished running.
'''
elapsed = 0
while True:
state = self._get_state(**kwargs)
if state == "done":
break
if state == "failed":
desc = self.describe(**kwargs)
err_msg = "Job has failed because of {failureReason}: {failureMessage}".format(**desc)
if desc.get("failureFrom") != None and desc["failureFrom"]["id"] != desc["id"]:
err_msg += " (failure from {id})".format(id=desc['failureFrom']['id'])
raise DXJobFailureError(err_msg)
if state == "terminated":
raise DXJobFailureError("Job was terminated.")
if elapsed >= timeout or elapsed < 0:
raise DXJobFailureError("Reached timeout while waiting for the job to finish")
time.sleep(interval)
elapsed += interval | :param interval: Number of seconds between queries to the job's state
:type interval: integer
:param timeout: Maximum amount of time to wait, in seconds, until the job is done running
:type timeout: integer
:raises: :exc:`~dxpy.exceptions.DXError` if the timeout is reached before the job has finished running, or :exc:`dxpy.exceptions.DXJobFailureError` if the job fails
Waits until the job has finished running. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L262-L291 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxjob.py | DXJob._get_state | def _get_state(self, **kwargs):
'''
:returns: State of the remote object
:rtype: string
Queries the API server for the job's state.
Note that this function is shorthand for:
dxjob.describe(io=False, **kwargs)["state"]
'''
return self.describe(fields=dict(state=True), **kwargs)["state"] | python | def _get_state(self, **kwargs):
'''
:returns: State of the remote object
:rtype: string
Queries the API server for the job's state.
Note that this function is shorthand for:
dxjob.describe(io=False, **kwargs)["state"]
'''
return self.describe(fields=dict(state=True), **kwargs)["state"] | :returns: State of the remote object
:rtype: string
Queries the API server for the job's state.
Note that this function is shorthand for:
dxjob.describe(io=False, **kwargs)["state"] | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L330-L343 |
dnanexus/dx-toolkit | src/python/dxpy/cli/org.py | get_user_id | def get_user_id(user_id_or_username):
"""Gets the user ID based on the value `user_id_or_username` specified on
the command-line, being extra lenient and lowercasing the value in all
cases.
"""
user_id_or_username = user_id_or_username.lower()
if not user_id_or_username.startswith("user-"):
user_id = "user-" + user_id_or_username.lower()
else:
user_id = user_id_or_username
return user_id | python | def get_user_id(user_id_or_username):
"""Gets the user ID based on the value `user_id_or_username` specified on
the command-line, being extra lenient and lowercasing the value in all
cases.
"""
user_id_or_username = user_id_or_username.lower()
if not user_id_or_username.startswith("user-"):
user_id = "user-" + user_id_or_username.lower()
else:
user_id = user_id_or_username
return user_id | Gets the user ID based on the value `user_id_or_username` specified on
the command-line, being extra lenient and lowercasing the value in all
cases. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/cli/org.py#L32-L42 |
dnanexus/dx-toolkit | src/python/dxpy/cli/org.py | get_org_invite_args | def get_org_invite_args(user_id, args):
"""
Used by:
- `dx new user`
- `dx add member`
PRECONDITION:
- If /org-x/invite is being called in conjunction with /user/new, then
`_validate_new_user_input()` has been called on `args`; otherwise,
the parser must perform all the basic input validation.
"""
org_invite_args = {"invitee": user_id}
org_invite_args["level"] = args.level
if "set_bill_to" in args and args.set_bill_to is True:
# /org-x/invite is called in conjunction with /user/new.
org_invite_args["allowBillableActivities"] = True
else:
org_invite_args["allowBillableActivities"] = args.allow_billable_activities
org_invite_args["appAccess"] = args.app_access
org_invite_args["projectAccess"] = args.project_access
org_invite_args["suppressEmailNotification"] = args.no_email
return org_invite_args | python | def get_org_invite_args(user_id, args):
"""
Used by:
- `dx new user`
- `dx add member`
PRECONDITION:
- If /org-x/invite is being called in conjunction with /user/new, then
`_validate_new_user_input()` has been called on `args`; otherwise,
the parser must perform all the basic input validation.
"""
org_invite_args = {"invitee": user_id}
org_invite_args["level"] = args.level
if "set_bill_to" in args and args.set_bill_to is True:
# /org-x/invite is called in conjunction with /user/new.
org_invite_args["allowBillableActivities"] = True
else:
org_invite_args["allowBillableActivities"] = args.allow_billable_activities
org_invite_args["appAccess"] = args.app_access
org_invite_args["projectAccess"] = args.project_access
org_invite_args["suppressEmailNotification"] = args.no_email
return org_invite_args | Used by:
- `dx new user`
- `dx add member`
PRECONDITION:
- If /org-x/invite is being called in conjunction with /user/new, then
`_validate_new_user_input()` has been called on `args`; otherwise,
the parser must perform all the basic input validation. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/cli/org.py#L45-L66 |
dnanexus/dx-toolkit | src/python/dxpy/scripts/dx.py | _get_user_new_args | def _get_user_new_args(args):
"""
PRECONDITION: `_validate_new_user_input()` has been called on `args`.
"""
user_new_args = {"username": args.username,
"email": args.email}
if args.first is not None:
user_new_args["first"] = args.first
if args.last is not None:
user_new_args["last"] = args.last
if args.middle is not None:
user_new_args["middle"] = args.middle
if args.token_duration is not None:
token_duration_ms = normalize_timedelta(args.token_duration)
if token_duration_ms > 30 * 24 * 60 * 60 * 1000:
raise ValueError("--token-duration must be 30 days or less")
else:
user_new_args["tokenDuration"] = token_duration_ms
if args.occupation is not None:
user_new_args["occupation"] = args.occupation
if args.set_bill_to is True:
user_new_args["billTo"] = args.org
return user_new_args | python | def _get_user_new_args(args):
"""
PRECONDITION: `_validate_new_user_input()` has been called on `args`.
"""
user_new_args = {"username": args.username,
"email": args.email}
if args.first is not None:
user_new_args["first"] = args.first
if args.last is not None:
user_new_args["last"] = args.last
if args.middle is not None:
user_new_args["middle"] = args.middle
if args.token_duration is not None:
token_duration_ms = normalize_timedelta(args.token_duration)
if token_duration_ms > 30 * 24 * 60 * 60 * 1000:
raise ValueError("--token-duration must be 30 days or less")
else:
user_new_args["tokenDuration"] = token_duration_ms
if args.occupation is not None:
user_new_args["occupation"] = args.occupation
if args.set_bill_to is True:
user_new_args["billTo"] = args.org
return user_new_args | PRECONDITION: `_validate_new_user_input()` has been called on `args`. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/scripts/dx.py#L1332-L1354 |
dnanexus/dx-toolkit | src/python/dxpy/scripts/dx.py | _get_input_for_run | def _get_input_for_run(args, executable, preset_inputs=None, input_name_prefix=None):
"""
Returns an input dictionary that can be passed to executable.run()
"""
# The following may throw if the executable is a workflow with no
# input spec available (because a stage is inaccessible)
exec_inputs = try_call(ExecutableInputs,
executable,
input_name_prefix=input_name_prefix,
active_region=args.region)
# Use input and system requirements from a cloned execution
if args.input_json is None and args.filename is None:
# --input-json and --input-json-file completely override input
# from the cloned job
exec_inputs.update(args.input_from_clone, strip_prefix=False)
# Update with inputs passed to the this function
if preset_inputs is not None:
exec_inputs.update(preset_inputs, strip_prefix=False)
# Update with inputs passed with -i, --input_json, --input_json_file, etc.
# If batch_tsv is set, do not prompt for missing arguments
require_all_inputs = (args.batch_tsv is None)
try_call(exec_inputs.update_from_args, args, require_all_inputs)
return exec_inputs.inputs | python | def _get_input_for_run(args, executable, preset_inputs=None, input_name_prefix=None):
"""
Returns an input dictionary that can be passed to executable.run()
"""
# The following may throw if the executable is a workflow with no
# input spec available (because a stage is inaccessible)
exec_inputs = try_call(ExecutableInputs,
executable,
input_name_prefix=input_name_prefix,
active_region=args.region)
# Use input and system requirements from a cloned execution
if args.input_json is None and args.filename is None:
# --input-json and --input-json-file completely override input
# from the cloned job
exec_inputs.update(args.input_from_clone, strip_prefix=False)
# Update with inputs passed to the this function
if preset_inputs is not None:
exec_inputs.update(preset_inputs, strip_prefix=False)
# Update with inputs passed with -i, --input_json, --input_json_file, etc.
# If batch_tsv is set, do not prompt for missing arguments
require_all_inputs = (args.batch_tsv is None)
try_call(exec_inputs.update_from_args, args, require_all_inputs)
return exec_inputs.inputs | Returns an input dictionary that can be passed to executable.run() | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/scripts/dx.py#L2683-L2709 |
dnanexus/dx-toolkit | src/python/dxpy/scripts/dx.py | register_parser | def register_parser(parser, subparsers_action=None, categories=('other', ), add_help=True):
"""Attaches `parser` to the global ``parser_map``. If `add_help` is truthy,
then adds the helpstring of `parser` into the output of ``dx help...``, for
each category in `categories`.
:param subparsers_action: A special action object that is returned by
``ArgumentParser.add_subparsers(...)``, or None.
:type subparsers_action: argparse._SubParsersAction, or None.
"""
name = re.sub('^dx ', '', parser.prog)
if subparsers_action is None:
subparsers_action = subparsers
if isinstance(categories, basestring):
categories = (categories, )
parser_map[name] = parser
if add_help:
_help = subparsers_action._choices_actions[-1].help
parser_categories['all']['cmds'].append((name, _help))
for category in categories:
parser_categories[category]['cmds'].append((name, _help)) | python | def register_parser(parser, subparsers_action=None, categories=('other', ), add_help=True):
"""Attaches `parser` to the global ``parser_map``. If `add_help` is truthy,
then adds the helpstring of `parser` into the output of ``dx help...``, for
each category in `categories`.
:param subparsers_action: A special action object that is returned by
``ArgumentParser.add_subparsers(...)``, or None.
:type subparsers_action: argparse._SubParsersAction, or None.
"""
name = re.sub('^dx ', '', parser.prog)
if subparsers_action is None:
subparsers_action = subparsers
if isinstance(categories, basestring):
categories = (categories, )
parser_map[name] = parser
if add_help:
_help = subparsers_action._choices_actions[-1].help
parser_categories['all']['cmds'].append((name, _help))
for category in categories:
parser_categories[category]['cmds'].append((name, _help)) | Attaches `parser` to the global ``parser_map``. If `add_help` is truthy,
then adds the helpstring of `parser` into the output of ``dx help...``, for
each category in `categories`.
:param subparsers_action: A special action object that is returned by
``ArgumentParser.add_subparsers(...)``, or None.
:type subparsers_action: argparse._SubParsersAction, or None. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/scripts/dx.py#L3805-L3825 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | _is_retryable_exception | def _is_retryable_exception(e):
"""Returns True if the exception is always safe to retry.
This is True if the client was never able to establish a connection
to the server (for example, name resolution failed or the connection
could otherwise not be initialized).
Conservatively, if we can't tell whether a network connection could
have been established, we return False.
"""
if isinstance(e, urllib3.exceptions.ProtocolError):
e = e.args[1]
if isinstance(e, (socket.gaierror, socket.herror)):
return True
if isinstance(e, socket.error) and e.errno in _RETRYABLE_SOCKET_ERRORS:
return True
if isinstance(e, urllib3.exceptions.NewConnectionError):
return True
return False | python | def _is_retryable_exception(e):
"""Returns True if the exception is always safe to retry.
This is True if the client was never able to establish a connection
to the server (for example, name resolution failed or the connection
could otherwise not be initialized).
Conservatively, if we can't tell whether a network connection could
have been established, we return False.
"""
if isinstance(e, urllib3.exceptions.ProtocolError):
e = e.args[1]
if isinstance(e, (socket.gaierror, socket.herror)):
return True
if isinstance(e, socket.error) and e.errno in _RETRYABLE_SOCKET_ERRORS:
return True
if isinstance(e, urllib3.exceptions.NewConnectionError):
return True
return False | Returns True if the exception is always safe to retry.
This is True if the client was never able to establish a connection
to the server (for example, name resolution failed or the connection
could otherwise not be initialized).
Conservatively, if we can't tell whether a network connection could
have been established, we return False. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L326-L345 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | _extract_msg_from_last_exception | def _extract_msg_from_last_exception():
''' Extract a useful error message from the last thrown exception '''
last_exc_type, last_error, last_traceback = sys.exc_info()
if isinstance(last_error, exceptions.DXAPIError):
# Using the same code path as below would not
# produce a useful message when the error contains a
# 'details' hash (which would have a last line of
# '}')
return last_error.error_message()
else:
return traceback.format_exc().splitlines()[-1].strip() | python | def _extract_msg_from_last_exception():
''' Extract a useful error message from the last thrown exception '''
last_exc_type, last_error, last_traceback = sys.exc_info()
if isinstance(last_error, exceptions.DXAPIError):
# Using the same code path as below would not
# produce a useful message when the error contains a
# 'details' hash (which would have a last line of
# '}')
return last_error.error_message()
else:
return traceback.format_exc().splitlines()[-1].strip() | Extract a useful error message from the last thrown exception | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L347-L357 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | _calculate_retry_delay | def _calculate_retry_delay(response, num_attempts):
'''
Returns the time in seconds that we should wait.
:param num_attempts: number of attempts that have been made to the
resource, including the most recent failed one
:type num_attempts: int
'''
if response is not None and response.status == 503 and 'retry-after' in response.headers:
try:
return int(response.headers['retry-after'])
except ValueError:
# In RFC 2616, retry-after can be formatted as absolute time
# instead of seconds to wait. We don't bother to parse that,
# but the apiserver doesn't generate such responses anyway.
pass
if num_attempts <= 1:
return 1
num_attempts = min(num_attempts, 7)
return randint(2 ** (num_attempts - 2), 2 ** (num_attempts - 1)) | python | def _calculate_retry_delay(response, num_attempts):
'''
Returns the time in seconds that we should wait.
:param num_attempts: number of attempts that have been made to the
resource, including the most recent failed one
:type num_attempts: int
'''
if response is not None and response.status == 503 and 'retry-after' in response.headers:
try:
return int(response.headers['retry-after'])
except ValueError:
# In RFC 2616, retry-after can be formatted as absolute time
# instead of seconds to wait. We don't bother to parse that,
# but the apiserver doesn't generate such responses anyway.
pass
if num_attempts <= 1:
return 1
num_attempts = min(num_attempts, 7)
return randint(2 ** (num_attempts - 2), 2 ** (num_attempts - 1)) | Returns the time in seconds that we should wait.
:param num_attempts: number of attempts that have been made to the
resource, including the most recent failed one
:type num_attempts: int | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L360-L379 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | DXHTTPRequest | def DXHTTPRequest(resource, data, method='POST', headers=None, auth=True,
timeout=DEFAULT_TIMEOUT,
use_compression=None, jsonify_data=True, want_full_response=False,
decode_response_body=True, prepend_srv=True, session_handler=None,
max_retries=DEFAULT_RETRIES, always_retry=False,
**kwargs):
'''
:param resource: API server route, e.g. "/record/new". If *prepend_srv* is False, a fully qualified URL is expected. If this argument is a callable, it will be called just before each request attempt, and expected to return a tuple (URL, headers). Headers returned by the callback are updated with *headers* (including headers set by this method).
:type resource: string
:param data: Content of the request body
:type data: list or dict, if *jsonify_data* is True; or string or file-like object, otherwise
:param headers: Names and values of HTTP headers to submit with the request (in addition to those needed for authentication, compression, or other options specified with the call).
:type headers: dict
:param auth:
Controls the ``Authentication`` header or other means of authentication supplied with the request. If ``True``
(default), a token is obtained from the ``DX_SECURITY_CONTEXT``. If the value evaluates to false, no action is
taken to prepare authentication for the request. Otherwise, the value is assumed to be callable, and called with
three arguments (method, url, headers) and expected to prepare the authentication headers by reference.
:type auth: tuple, object, True (default), or None
:param timeout: HTTP request timeout, in seconds
:type timeout: float
:param config: *config* value to pass through to :meth:`requests.request`
:type config: dict
:param use_compression: Deprecated
:type use_compression: string or None
:param jsonify_data: If True, *data* is converted from a Python list or dict to a JSON string
:type jsonify_data: boolean
:param want_full_response: If True, the full :class:`requests.Response` object is returned (otherwise, only the content of the response body is returned)
:type want_full_response: boolean
:param decode_response_body: If True (and *want_full_response* is False), the response body is decoded and, if it is a JSON string, deserialized. Otherwise, the response body is uncompressed if transport compression is on, and returned raw.
:type decode_response_body: boolean
:param prepend_srv: If True, prepends the API server location to the URL
:type prepend_srv: boolean
:param session_handler: Deprecated.
:param max_retries: Maximum number of retries to perform for a request. A "failed" request is retried if any of the following is true:
- A response is received from the server, and the content length received does not match the "Content-Length" header.
- A response is received from the server, and the response has an HTTP status code in 5xx range.
- A response is received from the server, the "Content-Length" header is not set, and the response JSON cannot be parsed.
- No response is received from the server, and either *always_retry* is True or the request *method* is "GET".
:type max_retries: int
:param always_retry: If True, indicates that it is safe to retry a request on failure
- Note: It is not guaranteed that the request will *always* be retried on failure; rather, this is an indication to the function that it would be safe to do so.
:type always_retry: boolean
:returns: Response from API server in the format indicated by *want_full_response* and *decode_response_body*.
:raises: :exc:`exceptions.DXAPIError` or a subclass if the server returned a non-200 status code; :exc:`requests.exceptions.HTTPError` if an invalid response was received from the server; or :exc:`requests.exceptions.ConnectionError` if a connection cannot be established.
Wrapper around :meth:`requests.request()` that makes an HTTP
request, inserting authentication headers and (by default)
converting *data* to JSON.
.. note:: Bindings methods that make API calls make the underlying
HTTP request(s) using :func:`DXHTTPRequest`, and most of them
will pass any unrecognized keyword arguments you have supplied
through to :func:`DXHTTPRequest`.
'''
if headers is None:
headers = {}
global _UPGRADE_NOTIFY
seq_num = _get_sequence_number()
url = APISERVER + resource if prepend_srv else resource
method = method.upper() # Convert method name to uppercase, to ease string comparisons later
if auth is True:
auth = AUTH_HELPER
if auth:
auth(_RequestForAuth(method, url, headers))
pool_args = {arg: kwargs.pop(arg, None) for arg in ("verify", "cert_file", "key_file")}
test_retry = kwargs.pop("_test_retry_http_request", False)
# data is a sequence/buffer or a dict
# serialized_data is a sequence/buffer
if jsonify_data:
serialized_data = json.dumps(data)
if 'Content-Type' not in headers and method == 'POST':
headers['Content-Type'] = 'application/json'
else:
serialized_data = data
# If the input is a buffer, its data gets consumed by
# requests.request (moving the read position). Record the initial
# buffer position so that we can return to it if the request fails
# and needs to be retried.
rewind_input_buffer_offset = None
if hasattr(data, 'seek') and hasattr(data, 'tell'):
rewind_input_buffer_offset = data.tell()
# Maintain two separate counters for the number of tries...
try_index = 0 # excluding 503 errors. The number of tries as given here
# cannot exceed (max_retries + 1).
try_index_including_503 = 0 # including 503 errors. This number is used to
# do exponential backoff.
retried_responses = []
_url = None
while True:
success, time_started = True, None
response = None
req_id = None
try:
time_started = time.time()
_method, _url, _headers = _process_method_url_headers(method, url, headers)
_debug_print_request(_DEBUG, seq_num, time_started, _method, _url, _headers, jsonify_data, data)
body = _maybe_truncate_request(_url, serialized_data)
# throws BadStatusLine if the server returns nothing
try:
pool_manager = _get_pool_manager(**pool_args)
_headers['User-Agent'] = USER_AGENT
_headers['DNAnexus-API'] = API_VERSION
# Converted Unicode headers to ASCII and throw an error if not possible
def ensure_ascii(i):
if not isinstance(i, bytes):
i = i.encode('ascii')
return i
_headers = {ensure_ascii(k): ensure_ascii(v) for k, v in _headers.items()}
if USING_PYTHON2:
encoded_url = _url
else:
# This is needed for python 3 urllib
_headers.pop(b'host', None)
_headers.pop(b'content-length', None)
# The libraries downstream (http client) require elimination of non-ascii
# chars from URL.
# We check if the URL contains non-ascii characters to see if we need to
# quote it. It is important not to always quote the path (here: parts[2])
# since it might contain elements (e.g. HMAC for api proxy) containing
# special characters that should not be quoted.
try:
ensure_ascii(_url)
encoded_url = _url
except UnicodeEncodeError:
import urllib.parse
parts = list(urllib.parse.urlparse(_url))
parts[2] = urllib.parse.quote(parts[2])
encoded_url = urllib.parse.urlunparse(parts)
response = pool_manager.request(_method, encoded_url, headers=_headers, body=body,
timeout=timeout, retries=False, **kwargs)
except urllib3.exceptions.ClosedPoolError:
# If another thread closed the pool before the request was
# started, will throw ClosedPoolError
raise exceptions.UrllibInternalError("ClosedPoolError")
_raise_error_for_testing(try_index, method)
req_id = response.headers.get("x-request-id", "unavailable")
if (_UPGRADE_NOTIFY
and response.headers.get('x-upgrade-info', '').startswith('A recommended update is available')
and '_ARGCOMPLETE' not in os.environ):
logger.info(response.headers['x-upgrade-info'])
try:
with file(_UPGRADE_NOTIFY, 'a'):
os.utime(_UPGRADE_NOTIFY, None)
except:
pass
_UPGRADE_NOTIFY = False
# If an HTTP code that is not in the 200 series is received and the content is JSON, parse it and throw the
# appropriate error. Otherwise, raise the usual exception.
if response.status // 100 != 2:
# response.headers key lookup is case-insensitive
if response.headers.get('content-type', '').startswith('application/json'):
try:
content = response.data.decode('utf-8')
except AttributeError:
raise exceptions.UrllibInternalError("Content is none", response.status)
try:
content = json.loads(content)
except ValueError:
# The JSON is not parsable, but we should be able to retry.
raise exceptions.BadJSONInReply("Invalid JSON received from server", response.status)
try:
error_class = getattr(exceptions, content["error"]["type"], exceptions.DXAPIError)
except (KeyError, AttributeError, TypeError):
raise exceptions.HTTPError(response.status, content)
raise error_class(content, response.status, time_started, req_id)
else:
try:
content = response.data.decode('utf-8')
except AttributeError:
raise exceptions.UrllibInternalError("Content is none", response.status)
raise exceptions.HTTPError("{} {} [Time={} RequestID={}]\n{}".format(response.status,
response.reason,
time_started,
req_id,
content))
if want_full_response:
return response
else:
if 'content-length' in response.headers:
if int(response.headers['content-length']) != len(response.data):
range_str = (' (%s)' % (headers['Range'],)) if 'Range' in headers else ''
raise exceptions.ContentLengthError(
"Received response with content-length header set to %s but content length is %d%s. " +
"[Time=%f RequestID=%s]" %
(response.headers['content-length'], len(response.data), range_str, time_started, req_id)
)
content = response.data
response_was_json = False
if decode_response_body:
content = content.decode('utf-8')
if response.headers.get('content-type', '').startswith('application/json'):
try:
content = json.loads(content)
except ValueError:
# The JSON is not parsable, but we should be able to retry.
raise exceptions.BadJSONInReply("Invalid JSON received from server", response.status)
else:
response_was_json = True
req_id = response.headers.get('x-request-id') or "--"
_debug_print_response(_DEBUG, seq_num, time_started, req_id, response.status, response_was_json,
_method, _url, content)
if test_retry:
retried_responses.append(content)
if len(retried_responses) == 1:
continue
else:
_set_retry_response(retried_responses[0])
return retried_responses[1]
return content
raise AssertionError('Should never reach this line: expected a result to have been returned by now')
except Exception as e:
# Avoid reusing connections in the pool, since they may be
# in an inconsistent state (observed as "ResponseNotReady"
# errors).
_get_pool_manager(**pool_args).clear()
success = False
exception_msg = _extract_msg_from_last_exception()
if isinstance(e, _expected_exceptions):
# Total number of allowed tries is the initial try PLUS
# up to (max_retries) subsequent retries.
total_allowed_tries = max_retries + 1
ok_to_retry = False
is_retryable = always_retry or (method == 'GET') or _is_retryable_exception(e)
# Because try_index is not incremented until we escape
# this iteration of the loop, try_index is equal to the
# number of tries that have failed so far, minus one.
if try_index + 1 < total_allowed_tries:
# BadStatusLine --- server did not return anything
# BadJSONInReply --- server returned JSON that didn't parse properly
if (response is None
or isinstance(e, (exceptions.ContentLengthError, BadStatusLine, exceptions.BadJSONInReply,
urllib3.exceptions.ProtocolError, exceptions.UrllibInternalError))):
ok_to_retry = is_retryable
else:
ok_to_retry = 500 <= response.status < 600
# The server has closed the connection prematurely
if (response is not None
and response.status == 400 and is_retryable and method == 'PUT'
and isinstance(e, requests.exceptions.HTTPError)):
if '<Code>RequestTimeout</Code>' in exception_msg:
logger.info("Retrying 400 HTTP error, due to slow data transfer. " +
"Request Time=%f Request ID=%s", time_started, req_id)
else:
logger.info("400 HTTP error, of unknown origin, exception_msg=[%s]. " +
"Request Time=%f Request ID=%s", exception_msg, time_started, req_id)
ok_to_retry = True
# Unprocessable entity, request has semantical errors
if response is not None and response.status == 422:
ok_to_retry = False
if ok_to_retry:
if rewind_input_buffer_offset is not None:
data.seek(rewind_input_buffer_offset)
delay = _calculate_retry_delay(response, try_index_including_503 + 1)
range_str = (' (range=%s)' % (headers['Range'],)) if 'Range' in headers else ''
if response is not None and response.status == 503:
waiting_msg = 'Waiting %d seconds before retry...' % (delay,)
else:
waiting_msg = 'Waiting %d seconds before retry %d of %d...' % (
delay, try_index + 1, max_retries)
logger.warning("[%s] %s %s: %s. %s %s",
time.ctime(), method, _url, exception_msg, waiting_msg, range_str)
time.sleep(delay)
try_index_including_503 += 1
if response is None or response.status != 503:
try_index += 1
continue
# All retries have been exhausted OR the error is deemed not
# retryable. Print the latest error and propagate it back to the caller.
if not isinstance(e, exceptions.DXAPIError):
logger.error("[%s] %s %s: %s.", time.ctime(), method, _url, exception_msg)
if isinstance(e, urllib3.exceptions.ProtocolError) and \
'Connection reset by peer' in exception_msg:
# If the protocol error is 'connection reset by peer', most likely it is an
# error in the ssl handshake due to unsupported TLS protocol.
_test_tls_version()
# Retries have been exhausted, and we are unable to get a full
# buffer from the data source. Raise a special exception.
if isinstance(e, urllib3.exceptions.ProtocolError) and \
'Connection broken: IncompleteRead' in exception_msg:
raise exceptions.DXIncompleteReadsError(exception_msg)
raise
finally:
if success and try_index > 0:
logger.info("[%s] %s %s: Recovered after %d retries", time.ctime(), method, _url, try_index)
raise AssertionError('Should never reach this line: should have attempted a retry or reraised by now')
raise AssertionError('Should never reach this line: should never break out of loop') | python | def DXHTTPRequest(resource, data, method='POST', headers=None, auth=True,
timeout=DEFAULT_TIMEOUT,
use_compression=None, jsonify_data=True, want_full_response=False,
decode_response_body=True, prepend_srv=True, session_handler=None,
max_retries=DEFAULT_RETRIES, always_retry=False,
**kwargs):
'''
:param resource: API server route, e.g. "/record/new". If *prepend_srv* is False, a fully qualified URL is expected. If this argument is a callable, it will be called just before each request attempt, and expected to return a tuple (URL, headers). Headers returned by the callback are updated with *headers* (including headers set by this method).
:type resource: string
:param data: Content of the request body
:type data: list or dict, if *jsonify_data* is True; or string or file-like object, otherwise
:param headers: Names and values of HTTP headers to submit with the request (in addition to those needed for authentication, compression, or other options specified with the call).
:type headers: dict
:param auth:
Controls the ``Authentication`` header or other means of authentication supplied with the request. If ``True``
(default), a token is obtained from the ``DX_SECURITY_CONTEXT``. If the value evaluates to false, no action is
taken to prepare authentication for the request. Otherwise, the value is assumed to be callable, and called with
three arguments (method, url, headers) and expected to prepare the authentication headers by reference.
:type auth: tuple, object, True (default), or None
:param timeout: HTTP request timeout, in seconds
:type timeout: float
:param config: *config* value to pass through to :meth:`requests.request`
:type config: dict
:param use_compression: Deprecated
:type use_compression: string or None
:param jsonify_data: If True, *data* is converted from a Python list or dict to a JSON string
:type jsonify_data: boolean
:param want_full_response: If True, the full :class:`requests.Response` object is returned (otherwise, only the content of the response body is returned)
:type want_full_response: boolean
:param decode_response_body: If True (and *want_full_response* is False), the response body is decoded and, if it is a JSON string, deserialized. Otherwise, the response body is uncompressed if transport compression is on, and returned raw.
:type decode_response_body: boolean
:param prepend_srv: If True, prepends the API server location to the URL
:type prepend_srv: boolean
:param session_handler: Deprecated.
:param max_retries: Maximum number of retries to perform for a request. A "failed" request is retried if any of the following is true:
- A response is received from the server, and the content length received does not match the "Content-Length" header.
- A response is received from the server, and the response has an HTTP status code in 5xx range.
- A response is received from the server, the "Content-Length" header is not set, and the response JSON cannot be parsed.
- No response is received from the server, and either *always_retry* is True or the request *method* is "GET".
:type max_retries: int
:param always_retry: If True, indicates that it is safe to retry a request on failure
- Note: It is not guaranteed that the request will *always* be retried on failure; rather, this is an indication to the function that it would be safe to do so.
:type always_retry: boolean
:returns: Response from API server in the format indicated by *want_full_response* and *decode_response_body*.
:raises: :exc:`exceptions.DXAPIError` or a subclass if the server returned a non-200 status code; :exc:`requests.exceptions.HTTPError` if an invalid response was received from the server; or :exc:`requests.exceptions.ConnectionError` if a connection cannot be established.
Wrapper around :meth:`requests.request()` that makes an HTTP
request, inserting authentication headers and (by default)
converting *data* to JSON.
.. note:: Bindings methods that make API calls make the underlying
HTTP request(s) using :func:`DXHTTPRequest`, and most of them
will pass any unrecognized keyword arguments you have supplied
through to :func:`DXHTTPRequest`.
'''
if headers is None:
headers = {}
global _UPGRADE_NOTIFY
seq_num = _get_sequence_number()
url = APISERVER + resource if prepend_srv else resource
method = method.upper() # Convert method name to uppercase, to ease string comparisons later
if auth is True:
auth = AUTH_HELPER
if auth:
auth(_RequestForAuth(method, url, headers))
pool_args = {arg: kwargs.pop(arg, None) for arg in ("verify", "cert_file", "key_file")}
test_retry = kwargs.pop("_test_retry_http_request", False)
# data is a sequence/buffer or a dict
# serialized_data is a sequence/buffer
if jsonify_data:
serialized_data = json.dumps(data)
if 'Content-Type' not in headers and method == 'POST':
headers['Content-Type'] = 'application/json'
else:
serialized_data = data
# If the input is a buffer, its data gets consumed by
# requests.request (moving the read position). Record the initial
# buffer position so that we can return to it if the request fails
# and needs to be retried.
rewind_input_buffer_offset = None
if hasattr(data, 'seek') and hasattr(data, 'tell'):
rewind_input_buffer_offset = data.tell()
# Maintain two separate counters for the number of tries...
try_index = 0 # excluding 503 errors. The number of tries as given here
# cannot exceed (max_retries + 1).
try_index_including_503 = 0 # including 503 errors. This number is used to
# do exponential backoff.
retried_responses = []
_url = None
while True:
success, time_started = True, None
response = None
req_id = None
try:
time_started = time.time()
_method, _url, _headers = _process_method_url_headers(method, url, headers)
_debug_print_request(_DEBUG, seq_num, time_started, _method, _url, _headers, jsonify_data, data)
body = _maybe_truncate_request(_url, serialized_data)
# throws BadStatusLine if the server returns nothing
try:
pool_manager = _get_pool_manager(**pool_args)
_headers['User-Agent'] = USER_AGENT
_headers['DNAnexus-API'] = API_VERSION
# Converted Unicode headers to ASCII and throw an error if not possible
def ensure_ascii(i):
if not isinstance(i, bytes):
i = i.encode('ascii')
return i
_headers = {ensure_ascii(k): ensure_ascii(v) for k, v in _headers.items()}
if USING_PYTHON2:
encoded_url = _url
else:
# This is needed for python 3 urllib
_headers.pop(b'host', None)
_headers.pop(b'content-length', None)
# The libraries downstream (http client) require elimination of non-ascii
# chars from URL.
# We check if the URL contains non-ascii characters to see if we need to
# quote it. It is important not to always quote the path (here: parts[2])
# since it might contain elements (e.g. HMAC for api proxy) containing
# special characters that should not be quoted.
try:
ensure_ascii(_url)
encoded_url = _url
except UnicodeEncodeError:
import urllib.parse
parts = list(urllib.parse.urlparse(_url))
parts[2] = urllib.parse.quote(parts[2])
encoded_url = urllib.parse.urlunparse(parts)
response = pool_manager.request(_method, encoded_url, headers=_headers, body=body,
timeout=timeout, retries=False, **kwargs)
except urllib3.exceptions.ClosedPoolError:
# If another thread closed the pool before the request was
# started, will throw ClosedPoolError
raise exceptions.UrllibInternalError("ClosedPoolError")
_raise_error_for_testing(try_index, method)
req_id = response.headers.get("x-request-id", "unavailable")
if (_UPGRADE_NOTIFY
and response.headers.get('x-upgrade-info', '').startswith('A recommended update is available')
and '_ARGCOMPLETE' not in os.environ):
logger.info(response.headers['x-upgrade-info'])
try:
with file(_UPGRADE_NOTIFY, 'a'):
os.utime(_UPGRADE_NOTIFY, None)
except:
pass
_UPGRADE_NOTIFY = False
# If an HTTP code that is not in the 200 series is received and the content is JSON, parse it and throw the
# appropriate error. Otherwise, raise the usual exception.
if response.status // 100 != 2:
# response.headers key lookup is case-insensitive
if response.headers.get('content-type', '').startswith('application/json'):
try:
content = response.data.decode('utf-8')
except AttributeError:
raise exceptions.UrllibInternalError("Content is none", response.status)
try:
content = json.loads(content)
except ValueError:
# The JSON is not parsable, but we should be able to retry.
raise exceptions.BadJSONInReply("Invalid JSON received from server", response.status)
try:
error_class = getattr(exceptions, content["error"]["type"], exceptions.DXAPIError)
except (KeyError, AttributeError, TypeError):
raise exceptions.HTTPError(response.status, content)
raise error_class(content, response.status, time_started, req_id)
else:
try:
content = response.data.decode('utf-8')
except AttributeError:
raise exceptions.UrllibInternalError("Content is none", response.status)
raise exceptions.HTTPError("{} {} [Time={} RequestID={}]\n{}".format(response.status,
response.reason,
time_started,
req_id,
content))
if want_full_response:
return response
else:
if 'content-length' in response.headers:
if int(response.headers['content-length']) != len(response.data):
range_str = (' (%s)' % (headers['Range'],)) if 'Range' in headers else ''
raise exceptions.ContentLengthError(
"Received response with content-length header set to %s but content length is %d%s. " +
"[Time=%f RequestID=%s]" %
(response.headers['content-length'], len(response.data), range_str, time_started, req_id)
)
content = response.data
response_was_json = False
if decode_response_body:
content = content.decode('utf-8')
if response.headers.get('content-type', '').startswith('application/json'):
try:
content = json.loads(content)
except ValueError:
# The JSON is not parsable, but we should be able to retry.
raise exceptions.BadJSONInReply("Invalid JSON received from server", response.status)
else:
response_was_json = True
req_id = response.headers.get('x-request-id') or "--"
_debug_print_response(_DEBUG, seq_num, time_started, req_id, response.status, response_was_json,
_method, _url, content)
if test_retry:
retried_responses.append(content)
if len(retried_responses) == 1:
continue
else:
_set_retry_response(retried_responses[0])
return retried_responses[1]
return content
raise AssertionError('Should never reach this line: expected a result to have been returned by now')
except Exception as e:
# Avoid reusing connections in the pool, since they may be
# in an inconsistent state (observed as "ResponseNotReady"
# errors).
_get_pool_manager(**pool_args).clear()
success = False
exception_msg = _extract_msg_from_last_exception()
if isinstance(e, _expected_exceptions):
# Total number of allowed tries is the initial try PLUS
# up to (max_retries) subsequent retries.
total_allowed_tries = max_retries + 1
ok_to_retry = False
is_retryable = always_retry or (method == 'GET') or _is_retryable_exception(e)
# Because try_index is not incremented until we escape
# this iteration of the loop, try_index is equal to the
# number of tries that have failed so far, minus one.
if try_index + 1 < total_allowed_tries:
# BadStatusLine --- server did not return anything
# BadJSONInReply --- server returned JSON that didn't parse properly
if (response is None
or isinstance(e, (exceptions.ContentLengthError, BadStatusLine, exceptions.BadJSONInReply,
urllib3.exceptions.ProtocolError, exceptions.UrllibInternalError))):
ok_to_retry = is_retryable
else:
ok_to_retry = 500 <= response.status < 600
# The server has closed the connection prematurely
if (response is not None
and response.status == 400 and is_retryable and method == 'PUT'
and isinstance(e, requests.exceptions.HTTPError)):
if '<Code>RequestTimeout</Code>' in exception_msg:
logger.info("Retrying 400 HTTP error, due to slow data transfer. " +
"Request Time=%f Request ID=%s", time_started, req_id)
else:
logger.info("400 HTTP error, of unknown origin, exception_msg=[%s]. " +
"Request Time=%f Request ID=%s", exception_msg, time_started, req_id)
ok_to_retry = True
# Unprocessable entity, request has semantical errors
if response is not None and response.status == 422:
ok_to_retry = False
if ok_to_retry:
if rewind_input_buffer_offset is not None:
data.seek(rewind_input_buffer_offset)
delay = _calculate_retry_delay(response, try_index_including_503 + 1)
range_str = (' (range=%s)' % (headers['Range'],)) if 'Range' in headers else ''
if response is not None and response.status == 503:
waiting_msg = 'Waiting %d seconds before retry...' % (delay,)
else:
waiting_msg = 'Waiting %d seconds before retry %d of %d...' % (
delay, try_index + 1, max_retries)
logger.warning("[%s] %s %s: %s. %s %s",
time.ctime(), method, _url, exception_msg, waiting_msg, range_str)
time.sleep(delay)
try_index_including_503 += 1
if response is None or response.status != 503:
try_index += 1
continue
# All retries have been exhausted OR the error is deemed not
# retryable. Print the latest error and propagate it back to the caller.
if not isinstance(e, exceptions.DXAPIError):
logger.error("[%s] %s %s: %s.", time.ctime(), method, _url, exception_msg)
if isinstance(e, urllib3.exceptions.ProtocolError) and \
'Connection reset by peer' in exception_msg:
# If the protocol error is 'connection reset by peer', most likely it is an
# error in the ssl handshake due to unsupported TLS protocol.
_test_tls_version()
# Retries have been exhausted, and we are unable to get a full
# buffer from the data source. Raise a special exception.
if isinstance(e, urllib3.exceptions.ProtocolError) and \
'Connection broken: IncompleteRead' in exception_msg:
raise exceptions.DXIncompleteReadsError(exception_msg)
raise
finally:
if success and try_index > 0:
logger.info("[%s] %s %s: Recovered after %d retries", time.ctime(), method, _url, try_index)
raise AssertionError('Should never reach this line: should have attempted a retry or reraised by now')
raise AssertionError('Should never reach this line: should never break out of loop') | :param resource: API server route, e.g. "/record/new". If *prepend_srv* is False, a fully qualified URL is expected. If this argument is a callable, it will be called just before each request attempt, and expected to return a tuple (URL, headers). Headers returned by the callback are updated with *headers* (including headers set by this method).
:type resource: string
:param data: Content of the request body
:type data: list or dict, if *jsonify_data* is True; or string or file-like object, otherwise
:param headers: Names and values of HTTP headers to submit with the request (in addition to those needed for authentication, compression, or other options specified with the call).
:type headers: dict
:param auth:
Controls the ``Authentication`` header or other means of authentication supplied with the request. If ``True``
(default), a token is obtained from the ``DX_SECURITY_CONTEXT``. If the value evaluates to false, no action is
taken to prepare authentication for the request. Otherwise, the value is assumed to be callable, and called with
three arguments (method, url, headers) and expected to prepare the authentication headers by reference.
:type auth: tuple, object, True (default), or None
:param timeout: HTTP request timeout, in seconds
:type timeout: float
:param config: *config* value to pass through to :meth:`requests.request`
:type config: dict
:param use_compression: Deprecated
:type use_compression: string or None
:param jsonify_data: If True, *data* is converted from a Python list or dict to a JSON string
:type jsonify_data: boolean
:param want_full_response: If True, the full :class:`requests.Response` object is returned (otherwise, only the content of the response body is returned)
:type want_full_response: boolean
:param decode_response_body: If True (and *want_full_response* is False), the response body is decoded and, if it is a JSON string, deserialized. Otherwise, the response body is uncompressed if transport compression is on, and returned raw.
:type decode_response_body: boolean
:param prepend_srv: If True, prepends the API server location to the URL
:type prepend_srv: boolean
:param session_handler: Deprecated.
:param max_retries: Maximum number of retries to perform for a request. A "failed" request is retried if any of the following is true:
- A response is received from the server, and the content length received does not match the "Content-Length" header.
- A response is received from the server, and the response has an HTTP status code in 5xx range.
- A response is received from the server, the "Content-Length" header is not set, and the response JSON cannot be parsed.
- No response is received from the server, and either *always_retry* is True or the request *method* is "GET".
:type max_retries: int
:param always_retry: If True, indicates that it is safe to retry a request on failure
- Note: It is not guaranteed that the request will *always* be retried on failure; rather, this is an indication to the function that it would be safe to do so.
:type always_retry: boolean
:returns: Response from API server in the format indicated by *want_full_response* and *decode_response_body*.
:raises: :exc:`exceptions.DXAPIError` or a subclass if the server returned a non-200 status code; :exc:`requests.exceptions.HTTPError` if an invalid response was received from the server; or :exc:`requests.exceptions.ConnectionError` if a connection cannot be established.
Wrapper around :meth:`requests.request()` that makes an HTTP
request, inserting authentication headers and (by default)
converting *data* to JSON.
.. note:: Bindings methods that make API calls make the underlying
HTTP request(s) using :func:`DXHTTPRequest`, and most of them
will pass any unrecognized keyword arguments you have supplied
through to :func:`DXHTTPRequest`. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L487-L820 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | set_api_server_info | def set_api_server_info(host=None, port=None, protocol=None):
'''
:param host: API server hostname
:type host: string
:param port: API server port. If not specified, *port* is guessed based on *protocol*.
:type port: string
:param protocol: Either "http" or "https"
:type protocol: string
Overrides the current settings for which API server to communicate
with. Any parameters that are not explicitly specified are not
overridden.
'''
global APISERVER_PROTOCOL, APISERVER_HOST, APISERVER_PORT, APISERVER
if host is not None:
APISERVER_HOST = host
if port is not None:
APISERVER_PORT = port
if protocol is not None:
APISERVER_PROTOCOL = protocol
if port is None or port == '':
APISERVER = APISERVER_PROTOCOL + "://" + APISERVER_HOST
else:
APISERVER = APISERVER_PROTOCOL + "://" + APISERVER_HOST + ":" + str(APISERVER_PORT) | python | def set_api_server_info(host=None, port=None, protocol=None):
'''
:param host: API server hostname
:type host: string
:param port: API server port. If not specified, *port* is guessed based on *protocol*.
:type port: string
:param protocol: Either "http" or "https"
:type protocol: string
Overrides the current settings for which API server to communicate
with. Any parameters that are not explicitly specified are not
overridden.
'''
global APISERVER_PROTOCOL, APISERVER_HOST, APISERVER_PORT, APISERVER
if host is not None:
APISERVER_HOST = host
if port is not None:
APISERVER_PORT = port
if protocol is not None:
APISERVER_PROTOCOL = protocol
if port is None or port == '':
APISERVER = APISERVER_PROTOCOL + "://" + APISERVER_HOST
else:
APISERVER = APISERVER_PROTOCOL + "://" + APISERVER_HOST + ":" + str(APISERVER_PORT) | :param host: API server hostname
:type host: string
:param port: API server port. If not specified, *port* is guessed based on *protocol*.
:type port: string
:param protocol: Either "http" or "https"
:type protocol: string
Overrides the current settings for which API server to communicate
with. Any parameters that are not explicitly specified are not
overridden. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L878-L901 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | get_auth_server_name | def get_auth_server_name(host_override=None, port_override=None, protocol='https'):
"""
Chooses the auth server name from the currently configured API server name.
Raises DXError if the auth server name cannot be guessed and the overrides
are not provided (or improperly provided).
"""
if host_override is not None or port_override is not None:
if host_override is None or port_override is None:
raise exceptions.DXError("Both host and port must be specified if either is specified")
return protocol + '://' + host_override + ':' + str(port_override)
elif APISERVER_HOST == 'stagingapi.dnanexus.com':
return 'https://stagingauth.dnanexus.com'
elif APISERVER_HOST == 'api.dnanexus.com':
return 'https://auth.dnanexus.com'
elif APISERVER_HOST == 'stagingapi.cn.dnanexus.com':
return 'https://stagingauth.cn.dnanexus.com:7001'
elif APISERVER_HOST == 'api.cn.dnanexus.com':
return 'https://auth.cn.dnanexus.com:8001'
elif APISERVER_HOST == "localhost" or APISERVER_HOST == "127.0.0.1":
if "DX_AUTHSERVER_HOST" not in os.environ or "DX_AUTHSERVER_PORT" not in os.environ:
err_msg = "Must set authserver env vars (DX_AUTHSERVER_HOST, DX_AUTHSERVER_PORT) if apiserver is {apiserver}."
raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST))
else:
return os.environ["DX_AUTHSERVER_HOST"] + ":" + os.environ["DX_AUTHSERVER_PORT"]
else:
err_msg = "Could not determine which auth server is associated with {apiserver}."
raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST)) | python | def get_auth_server_name(host_override=None, port_override=None, protocol='https'):
"""
Chooses the auth server name from the currently configured API server name.
Raises DXError if the auth server name cannot be guessed and the overrides
are not provided (or improperly provided).
"""
if host_override is not None or port_override is not None:
if host_override is None or port_override is None:
raise exceptions.DXError("Both host and port must be specified if either is specified")
return protocol + '://' + host_override + ':' + str(port_override)
elif APISERVER_HOST == 'stagingapi.dnanexus.com':
return 'https://stagingauth.dnanexus.com'
elif APISERVER_HOST == 'api.dnanexus.com':
return 'https://auth.dnanexus.com'
elif APISERVER_HOST == 'stagingapi.cn.dnanexus.com':
return 'https://stagingauth.cn.dnanexus.com:7001'
elif APISERVER_HOST == 'api.cn.dnanexus.com':
return 'https://auth.cn.dnanexus.com:8001'
elif APISERVER_HOST == "localhost" or APISERVER_HOST == "127.0.0.1":
if "DX_AUTHSERVER_HOST" not in os.environ or "DX_AUTHSERVER_PORT" not in os.environ:
err_msg = "Must set authserver env vars (DX_AUTHSERVER_HOST, DX_AUTHSERVER_PORT) if apiserver is {apiserver}."
raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST))
else:
return os.environ["DX_AUTHSERVER_HOST"] + ":" + os.environ["DX_AUTHSERVER_PORT"]
else:
err_msg = "Could not determine which auth server is associated with {apiserver}."
raise exceptions.DXError(err_msg.format(apiserver=APISERVER_HOST)) | Chooses the auth server name from the currently configured API server name.
Raises DXError if the auth server name cannot be guessed and the overrides
are not provided (or improperly provided). | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L964-L991 |
dnanexus/dx-toolkit | src/python/dxpy/__init__.py | append_underlying_workflow_describe | def append_underlying_workflow_describe(globalworkflow_desc):
"""
Adds the "workflowDescribe" field to the config for each region of
the global workflow. The value is the description of an underlying
workflow in that region.
"""
if not globalworkflow_desc or \
globalworkflow_desc['class'] != 'globalworkflow' or \
not 'regionalOptions' in globalworkflow_desc:
return globalworkflow_desc
for region, config in globalworkflow_desc['regionalOptions'].items():
workflow_id = config['workflow']
workflow_desc = dxpy.api.workflow_describe(workflow_id)
globalworkflow_desc['regionalOptions'][region]['workflowDescribe'] = workflow_desc
return globalworkflow_desc | python | def append_underlying_workflow_describe(globalworkflow_desc):
"""
Adds the "workflowDescribe" field to the config for each region of
the global workflow. The value is the description of an underlying
workflow in that region.
"""
if not globalworkflow_desc or \
globalworkflow_desc['class'] != 'globalworkflow' or \
not 'regionalOptions' in globalworkflow_desc:
return globalworkflow_desc
for region, config in globalworkflow_desc['regionalOptions'].items():
workflow_id = config['workflow']
workflow_desc = dxpy.api.workflow_describe(workflow_id)
globalworkflow_desc['regionalOptions'][region]['workflowDescribe'] = workflow_desc
return globalworkflow_desc | Adds the "workflowDescribe" field to the config for each region of
the global workflow. The value is the description of an underlying
workflow in that region. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/__init__.py#L1011-L1026 |
dnanexus/dx-toolkit | src/python/dxpy/cli/exec_io.py | _construct_jbor | def _construct_jbor(job_id, field_name_and_maybe_index):
'''
:param job_id: Job ID
:type job_id: string
:param field_name_and_maybe_index: Field name, plus possibly ".N" where N is an array index
:type field_name_and_maybe_index: string
:returns: dict of JBOR
'''
link = {"$dnanexus_link": {"job": job_id}}
if '.' in field_name_and_maybe_index:
split_by_dot = field_name_and_maybe_index.rsplit('.', 1)
link["$dnanexus_link"]["field"] = split_by_dot[0]
link["$dnanexus_link"]["index"] = int(split_by_dot[1])
else:
link["$dnanexus_link"]["field"] = field_name_and_maybe_index
return link | python | def _construct_jbor(job_id, field_name_and_maybe_index):
'''
:param job_id: Job ID
:type job_id: string
:param field_name_and_maybe_index: Field name, plus possibly ".N" where N is an array index
:type field_name_and_maybe_index: string
:returns: dict of JBOR
'''
link = {"$dnanexus_link": {"job": job_id}}
if '.' in field_name_and_maybe_index:
split_by_dot = field_name_and_maybe_index.rsplit('.', 1)
link["$dnanexus_link"]["field"] = split_by_dot[0]
link["$dnanexus_link"]["index"] = int(split_by_dot[1])
else:
link["$dnanexus_link"]["field"] = field_name_and_maybe_index
return link | :param job_id: Job ID
:type job_id: string
:param field_name_and_maybe_index: Field name, plus possibly ".N" where N is an array index
:type field_name_and_maybe_index: string
:returns: dict of JBOR | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/cli/exec_io.py#L78-L93 |
dnanexus/dx-toolkit | src/python/dxpy/cli/exec_io.py | ExecutableInputs.update | def update(self, new_inputs, strip_prefix=True):
"""
Updates the inputs dictionary with the key/value pairs from new_inputs, overwriting existing keys.
"""
if strip_prefix and self.input_name_prefix is not None:
for i in new_inputs:
if i.startswith(self.input_name_prefix):
self.inputs[i[len(self.input_name_prefix):]] = new_inputs[i]
else:
self.inputs.update(new_inputs) | python | def update(self, new_inputs, strip_prefix=True):
"""
Updates the inputs dictionary with the key/value pairs from new_inputs, overwriting existing keys.
"""
if strip_prefix and self.input_name_prefix is not None:
for i in new_inputs:
if i.startswith(self.input_name_prefix):
self.inputs[i[len(self.input_name_prefix):]] = new_inputs[i]
else:
self.inputs.update(new_inputs) | Updates the inputs dictionary with the key/value pairs from new_inputs, overwriting existing keys. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/cli/exec_io.py#L486-L495 |
dnanexus/dx-toolkit | src/python/dxpy/cli/exec_io.py | ExecutableInputs._update_requires_resolution_inputs | def _update_requires_resolution_inputs(self):
"""
Updates self.inputs with resolved input values (the input values that were provided
as paths to items that require resolutions, eg. folder or job/analyses ids)
"""
input_paths = [quad[1] for quad in self.requires_resolution]
results = resolve_multiple_existing_paths(input_paths)
for input_name, input_value, input_class, input_index in self.requires_resolution:
project = results[input_value]['project']
folderpath = results[input_value]['folder']
entity_result = results[input_value]['name']
if input_class is None:
if entity_result is not None:
if isinstance(entity_result, basestring):
# Case: -ifoo=job-012301230123012301230123
# Case: -ifoo=analysis-012301230123012301230123
assert(is_job_id(entity_result) or
(is_analysis_id(entity_result)))
input_value = entity_result
elif is_hashid(input_value):
input_value = {'$dnanexus_link': entity_result['id']}
elif 'describe' in entity_result:
# Then findDataObjects was called (returned describe hash)
input_value = {"$dnanexus_link": {"project": entity_result['describe']['project'],
"id": entity_result['id']}}
else:
# Then resolveDataObjects was called in a batch (no describe hash)
input_value = {"$dnanexus_link": {"project": entity_result['project'],
"id": entity_result['id']}}
if input_index >= 0:
if self.inputs[input_name][input_index] is not None:
raise AssertionError("Expected 'self.inputs' to have saved a spot for 'input_value'.")
self.inputs[input_name][input_index] = input_value
else:
if self.inputs[input_name] is not None:
raise AssertionError("Expected 'self.inputs' to have saved a spot for 'input_value'.")
self.inputs[input_name] = input_value
else:
msg = 'Value provided for input field "' + input_name + '" could not be parsed as ' + \
input_class + ': '
if input_value == '':
raise DXCLIError(msg + 'empty string cannot be resolved')
if entity_result is None:
raise DXCLIError(msg + 'could not resolve \"' + input_value + '\" to a name or ID')
try:
dxpy.bindings.verify_string_dxid(entity_result['id'], input_class)
except DXError as details:
raise DXCLIError(msg + str(details))
if is_hashid(input_value):
input_value = {'$dnanexus_link': entity_result['id']}
elif 'describe' in entity_result:
# Then findDataObjects was called (returned describe hash)
input_value = {'$dnanexus_link': {"project": entity_result['describe']['project'],
"id": entity_result['id']}}
else:
# Then resolveDataObjects was called in a batch (no describe hash)
input_value = {"$dnanexus_link": {"project": entity_result['project'],
"id": entity_result['id']}}
if input_index != -1:
# The class is an array, so append the resolved value
self.inputs[input_name].append(input_value)
else:
self.inputs[input_name] = input_value | python | def _update_requires_resolution_inputs(self):
"""
Updates self.inputs with resolved input values (the input values that were provided
as paths to items that require resolutions, eg. folder or job/analyses ids)
"""
input_paths = [quad[1] for quad in self.requires_resolution]
results = resolve_multiple_existing_paths(input_paths)
for input_name, input_value, input_class, input_index in self.requires_resolution:
project = results[input_value]['project']
folderpath = results[input_value]['folder']
entity_result = results[input_value]['name']
if input_class is None:
if entity_result is not None:
if isinstance(entity_result, basestring):
# Case: -ifoo=job-012301230123012301230123
# Case: -ifoo=analysis-012301230123012301230123
assert(is_job_id(entity_result) or
(is_analysis_id(entity_result)))
input_value = entity_result
elif is_hashid(input_value):
input_value = {'$dnanexus_link': entity_result['id']}
elif 'describe' in entity_result:
# Then findDataObjects was called (returned describe hash)
input_value = {"$dnanexus_link": {"project": entity_result['describe']['project'],
"id": entity_result['id']}}
else:
# Then resolveDataObjects was called in a batch (no describe hash)
input_value = {"$dnanexus_link": {"project": entity_result['project'],
"id": entity_result['id']}}
if input_index >= 0:
if self.inputs[input_name][input_index] is not None:
raise AssertionError("Expected 'self.inputs' to have saved a spot for 'input_value'.")
self.inputs[input_name][input_index] = input_value
else:
if self.inputs[input_name] is not None:
raise AssertionError("Expected 'self.inputs' to have saved a spot for 'input_value'.")
self.inputs[input_name] = input_value
else:
msg = 'Value provided for input field "' + input_name + '" could not be parsed as ' + \
input_class + ': '
if input_value == '':
raise DXCLIError(msg + 'empty string cannot be resolved')
if entity_result is None:
raise DXCLIError(msg + 'could not resolve \"' + input_value + '\" to a name or ID')
try:
dxpy.bindings.verify_string_dxid(entity_result['id'], input_class)
except DXError as details:
raise DXCLIError(msg + str(details))
if is_hashid(input_value):
input_value = {'$dnanexus_link': entity_result['id']}
elif 'describe' in entity_result:
# Then findDataObjects was called (returned describe hash)
input_value = {'$dnanexus_link': {"project": entity_result['describe']['project'],
"id": entity_result['id']}}
else:
# Then resolveDataObjects was called in a batch (no describe hash)
input_value = {"$dnanexus_link": {"project": entity_result['project'],
"id": entity_result['id']}}
if input_index != -1:
# The class is an array, so append the resolved value
self.inputs[input_name].append(input_value)
else:
self.inputs[input_name] = input_value | Updates self.inputs with resolved input values (the input values that were provided
as paths to items that require resolutions, eg. folder or job/analyses ids) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/cli/exec_io.py#L497-L559 |
dnanexus/dx-toolkit | src/python/dxpy/utils/pretty_print.py | escape_unicode_string | def escape_unicode_string(u):
"""
Escapes the nonprintable chars 0-31 and 127, and backslash;
preferably with a friendly equivalent such as '\n' if available, but
otherwise with a Python-style backslashed hex escape.
"""
def replacer(matchobj):
if ord(matchobj.group(1)) == 127:
return "\\x7f"
if ord(matchobj.group(1)) == 92: # backslash
return "\\\\"
return REPLACEMENT_TABLE[ord(matchobj.group(1))]
return re.sub("([\\000-\\037\\134\\177])", replacer, u) | python | def escape_unicode_string(u):
"""
Escapes the nonprintable chars 0-31 and 127, and backslash;
preferably with a friendly equivalent such as '\n' if available, but
otherwise with a Python-style backslashed hex escape.
"""
def replacer(matchobj):
if ord(matchobj.group(1)) == 127:
return "\\x7f"
if ord(matchobj.group(1)) == 92: # backslash
return "\\\\"
return REPLACEMENT_TABLE[ord(matchobj.group(1))]
return re.sub("([\\000-\\037\\134\\177])", replacer, u) | Escapes the nonprintable chars 0-31 and 127, and backslash;
preferably with a friendly equivalent such as '\n' if available, but
otherwise with a Python-style backslashed hex escape. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/pretty_print.py#L61-L73 |
dnanexus/dx-toolkit | src/python/dxpy/utils/pretty_print.py | format_tree | def format_tree(tree, root=None):
''' Tree pretty printer.
Expects trees to be given as mappings (dictionaries). Keys will be printed; values will be traversed if they are
mappings. To preserve order, use collections.OrderedDict.
Example:
print format_tree(collections.OrderedDict({'foo': 0, 'bar': {'xyz': 0}}))
'''
formatted_tree = [root] if root is not None else []
def _format(tree, prefix=' '):
nodes = list(tree.keys())
for i in range(len(nodes)):
node = nodes[i]
if i == len(nodes)-1 and len(prefix) > 1:
my_prefix = prefix[:-4] + '└── '
my_multiline_prefix = prefix[:-4] + ' '
else:
my_prefix = prefix[:-4] + '├── '
my_multiline_prefix = prefix[:-4] + '│ '
n = 0
for line in node.splitlines():
if n == 0:
formatted_tree.append(my_prefix + line)
else:
formatted_tree.append(my_multiline_prefix + line)
n += 1
if isinstance(tree[node], collections.Mapping):
subprefix = prefix
if i < len(nodes)-1 and len(prefix) > 1 and prefix[-4:] == ' ':
subprefix = prefix[:-4] + '│ '
_format(tree[node], subprefix + ' ')
_format(tree)
return '\n'.join(formatted_tree) | python | def format_tree(tree, root=None):
''' Tree pretty printer.
Expects trees to be given as mappings (dictionaries). Keys will be printed; values will be traversed if they are
mappings. To preserve order, use collections.OrderedDict.
Example:
print format_tree(collections.OrderedDict({'foo': 0, 'bar': {'xyz': 0}}))
'''
formatted_tree = [root] if root is not None else []
def _format(tree, prefix=' '):
nodes = list(tree.keys())
for i in range(len(nodes)):
node = nodes[i]
if i == len(nodes)-1 and len(prefix) > 1:
my_prefix = prefix[:-4] + '└── '
my_multiline_prefix = prefix[:-4] + ' '
else:
my_prefix = prefix[:-4] + '├── '
my_multiline_prefix = prefix[:-4] + '│ '
n = 0
for line in node.splitlines():
if n == 0:
formatted_tree.append(my_prefix + line)
else:
formatted_tree.append(my_multiline_prefix + line)
n += 1
if isinstance(tree[node], collections.Mapping):
subprefix = prefix
if i < len(nodes)-1 and len(prefix) > 1 and prefix[-4:] == ' ':
subprefix = prefix[:-4] + '│ '
_format(tree[node], subprefix + ' ')
_format(tree)
return '\n'.join(formatted_tree) | Tree pretty printer.
Expects trees to be given as mappings (dictionaries). Keys will be printed; values will be traversed if they are
mappings. To preserve order, use collections.OrderedDict.
Example:
print format_tree(collections.OrderedDict({'foo': 0, 'bar': {'xyz': 0}})) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/pretty_print.py#L75-L110 |
dnanexus/dx-toolkit | src/python/dxpy/utils/pretty_print.py | format_table | def format_table(table, column_names=None, column_specs=None, max_col_width=32,
report_dimensions=False):
''' Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])
'''
if len(table) > 0:
col_widths = [0] * len(list(table)[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_column_names = []
if column_specs is not None:
column_names = ['Row']
column_names.extend([col['name'] for col in column_specs])
column_specs = [{'name': 'Row', 'type': 'float'}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
my_col = str(column_names[i])
if len(my_col) > max_col_width:
my_col = my_col[:max_col_width-1] + '…'
my_column_names.append(my_col)
col_widths[i] = max(col_widths[i], len(my_col))
my_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = escape_unicode_string(str(row[i]))
if len(my_item) > max_col_width:
my_item = my_item[:max_col_width-1] + '…'
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(my_item))
my_table.append(my_row)
def border(i):
return WHITE() + i + ENDC()
type_colormap = {'boolean': BLUE(),
'integer': YELLOW(),
'float': WHITE(),
'string': GREEN()}
for i in 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64':
type_colormap[i] = type_colormap['integer']
type_colormap['double'] = type_colormap['float']
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]['type']] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border('┌') + border('┬').join(border('─')*i for i in col_widths) + border('┐')]
if len(my_column_names) > 0:
padded_column_names = [col_head(i) + ' '*(col_widths[i]-len(my_column_names[i])) for i in range(len(my_column_names))]
formatted_table.append(border('│') + border('│').join(padded_column_names) + border('│'))
formatted_table.append(border('├') + border('┼').join(border('─')*i for i in col_widths) + border('┤'))
for row in my_table:
padded_row = [row[i] + ' '*(col_widths[i]-len(row[i])) for i in range(len(row))]
formatted_table.append(border('│') + border('│').join(padded_row) + border('│'))
formatted_table.append(border('└') + border('┴').join(border('─')*i for i in col_widths) + border('┘'))
if report_dimensions:
return '\n'.join(formatted_table), len(formatted_table), sum(col_widths) + len(col_widths) + 1
else:
return '\n'.join(formatted_table) | python | def format_table(table, column_names=None, column_specs=None, max_col_width=32,
report_dimensions=False):
''' Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])
'''
if len(table) > 0:
col_widths = [0] * len(list(table)[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_column_names = []
if column_specs is not None:
column_names = ['Row']
column_names.extend([col['name'] for col in column_specs])
column_specs = [{'name': 'Row', 'type': 'float'}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
my_col = str(column_names[i])
if len(my_col) > max_col_width:
my_col = my_col[:max_col_width-1] + '…'
my_column_names.append(my_col)
col_widths[i] = max(col_widths[i], len(my_col))
my_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = escape_unicode_string(str(row[i]))
if len(my_item) > max_col_width:
my_item = my_item[:max_col_width-1] + '…'
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(my_item))
my_table.append(my_row)
def border(i):
return WHITE() + i + ENDC()
type_colormap = {'boolean': BLUE(),
'integer': YELLOW(),
'float': WHITE(),
'string': GREEN()}
for i in 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64':
type_colormap[i] = type_colormap['integer']
type_colormap['double'] = type_colormap['float']
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]['type']] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border('┌') + border('┬').join(border('─')*i for i in col_widths) + border('┐')]
if len(my_column_names) > 0:
padded_column_names = [col_head(i) + ' '*(col_widths[i]-len(my_column_names[i])) for i in range(len(my_column_names))]
formatted_table.append(border('│') + border('│').join(padded_column_names) + border('│'))
formatted_table.append(border('├') + border('┼').join(border('─')*i for i in col_widths) + border('┤'))
for row in my_table:
padded_row = [row[i] + ' '*(col_widths[i]-len(row[i])) for i in range(len(row))]
formatted_table.append(border('│') + border('│').join(padded_row) + border('│'))
formatted_table.append(border('└') + border('┴').join(border('─')*i for i in col_widths) + border('┘'))
if report_dimensions:
return '\n'.join(formatted_table), len(formatted_table), sum(col_widths) + len(col_widths) + 1
else:
return '\n'.join(formatted_table) | Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/pretty_print.py#L112-L182 |
dnanexus/dx-toolkit | src/python/dxpy/utils/pretty_print.py | flatten_json_array | def flatten_json_array(json_string, array_name):
"""
Flattens all arrays with the same name in the JSON string
:param json_string: JSON string
:type json_string: str
:param array_name: Array name to flatten
:type array_name: str
"""
result = re.sub('"{}": \\[\r?\n\\s*'.format(array_name), '"{}": ['.format(array_name), json_string, flags=re.MULTILINE)
flatten_regexp = re.compile('"{}": \\[(.*)(?<=,)\r?\n\\s*'.format(array_name), flags=re.MULTILINE)
while flatten_regexp.search(result):
result = flatten_regexp.sub('"{}": [\\1 '.format(array_name), result)
result = re.sub('"{}": \\[(.*)\r?\n\\s*\\]'.format(array_name), '"{}": [\\1]'.format(array_name), result, flags=re.MULTILINE)
return result | python | def flatten_json_array(json_string, array_name):
"""
Flattens all arrays with the same name in the JSON string
:param json_string: JSON string
:type json_string: str
:param array_name: Array name to flatten
:type array_name: str
"""
result = re.sub('"{}": \\[\r?\n\\s*'.format(array_name), '"{}": ['.format(array_name), json_string, flags=re.MULTILINE)
flatten_regexp = re.compile('"{}": \\[(.*)(?<=,)\r?\n\\s*'.format(array_name), flags=re.MULTILINE)
while flatten_regexp.search(result):
result = flatten_regexp.sub('"{}": [\\1 '.format(array_name), result)
result = re.sub('"{}": \\[(.*)\r?\n\\s*\\]'.format(array_name), '"{}": [\\1]'.format(array_name), result, flags=re.MULTILINE)
return result | Flattens all arrays with the same name in the JSON string
:param json_string: JSON string
:type json_string: str
:param array_name: Array name to flatten
:type array_name: str | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/pretty_print.py#L184-L199 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | new_dxworkflow | def new_dxworkflow(title=None, summary=None, description=None, output_folder=None, init_from=None, **kwargs):
'''
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
:rtype: :class:`DXWorkflow`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`, except `details`.
Creates a new remote workflow object with project set to *project*
and returns the appropriate handler.
Example:
r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...")
Note that this function is shorthand for::
dxworkflow = DXWorkflow()
dxworkflow.new(**kwargs)
'''
dxworkflow = DXWorkflow()
dxworkflow.new(title=title, summary=summary, description=description, output_folder=output_folder, init_from=init_from, **kwargs)
return dxworkflow | python | def new_dxworkflow(title=None, summary=None, description=None, output_folder=None, init_from=None, **kwargs):
'''
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
:rtype: :class:`DXWorkflow`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`, except `details`.
Creates a new remote workflow object with project set to *project*
and returns the appropriate handler.
Example:
r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...")
Note that this function is shorthand for::
dxworkflow = DXWorkflow()
dxworkflow.new(**kwargs)
'''
dxworkflow = DXWorkflow()
dxworkflow.new(title=title, summary=summary, description=description, output_folder=output_folder, init_from=init_from, **kwargs)
return dxworkflow | :param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
:rtype: :class:`DXWorkflow`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`, except `details`.
Creates a new remote workflow object with project set to *project*
and returns the appropriate handler.
Example:
r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...")
Note that this function is shorthand for::
dxworkflow = DXWorkflow()
dxworkflow.new(**kwargs) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L40-L71 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow._new | def _new(self, dx_hash, **kwargs):
"""
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param stages: Stages of the workflow (optional)
:type stages: array of dictionaries
:param workflow_inputs: Workflow-level input specification (optional)
:type workflow_inputs: array of dictionaries
:param workflow_outputs: Workflow-level output specification (optional)
:type workflow_outputs: array of dictionaries
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
Create a new remote workflow object.
"""
def _set_dx_hash(kwargs, dxhash, key, new_key=None):
new_key = key if new_key is None else new_key
if key in kwargs:
if kwargs[key] is not None:
dxhash[new_key] = kwargs[key]
del kwargs[key]
if "init_from" in kwargs:
if kwargs["init_from"] is not None:
if not (isinstance(kwargs["init_from"], (DXWorkflow, DXAnalysis)) or \
(isinstance(kwargs["init_from"], basestring) and \
re.compile('^analysis-[0-9A-Za-z]{24}$').match(kwargs["init_from"]))):
raise DXError("Expected init_from to be an instance of DXWorkflow or DXAnalysis, or to be a string analysis ID.")
if isinstance(kwargs["init_from"], basestring):
dx_hash["initializeFrom"] = {"id": kwargs["init_from"]}
else:
dx_hash["initializeFrom"] = {"id": kwargs["init_from"].get_id()}
if isinstance(kwargs["init_from"], DXWorkflow):
dx_hash["initializeFrom"]["project"] = kwargs["init_from"].get_proj_id()
del kwargs["init_from"]
_set_dx_hash(kwargs, dx_hash, "title")
_set_dx_hash(kwargs, dx_hash, "summary")
_set_dx_hash(kwargs, dx_hash, "description")
_set_dx_hash(kwargs, dx_hash, "output_folder", "outputFolder")
_set_dx_hash(kwargs, dx_hash, "stages")
_set_dx_hash(kwargs, dx_hash, "workflow_inputs", "inputs")
_set_dx_hash(kwargs, dx_hash, "workflow_outputs", "outputs")
resp = dxpy.api.workflow_new(dx_hash, **kwargs)
self.set_ids(resp["id"], dx_hash["project"]) | python | def _new(self, dx_hash, **kwargs):
"""
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param stages: Stages of the workflow (optional)
:type stages: array of dictionaries
:param workflow_inputs: Workflow-level input specification (optional)
:type workflow_inputs: array of dictionaries
:param workflow_outputs: Workflow-level output specification (optional)
:type workflow_outputs: array of dictionaries
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
Create a new remote workflow object.
"""
def _set_dx_hash(kwargs, dxhash, key, new_key=None):
new_key = key if new_key is None else new_key
if key in kwargs:
if kwargs[key] is not None:
dxhash[new_key] = kwargs[key]
del kwargs[key]
if "init_from" in kwargs:
if kwargs["init_from"] is not None:
if not (isinstance(kwargs["init_from"], (DXWorkflow, DXAnalysis)) or \
(isinstance(kwargs["init_from"], basestring) and \
re.compile('^analysis-[0-9A-Za-z]{24}$').match(kwargs["init_from"]))):
raise DXError("Expected init_from to be an instance of DXWorkflow or DXAnalysis, or to be a string analysis ID.")
if isinstance(kwargs["init_from"], basestring):
dx_hash["initializeFrom"] = {"id": kwargs["init_from"]}
else:
dx_hash["initializeFrom"] = {"id": kwargs["init_from"].get_id()}
if isinstance(kwargs["init_from"], DXWorkflow):
dx_hash["initializeFrom"]["project"] = kwargs["init_from"].get_proj_id()
del kwargs["init_from"]
_set_dx_hash(kwargs, dx_hash, "title")
_set_dx_hash(kwargs, dx_hash, "summary")
_set_dx_hash(kwargs, dx_hash, "description")
_set_dx_hash(kwargs, dx_hash, "output_folder", "outputFolder")
_set_dx_hash(kwargs, dx_hash, "stages")
_set_dx_hash(kwargs, dx_hash, "workflow_inputs", "inputs")
_set_dx_hash(kwargs, dx_hash, "workflow_outputs", "outputs")
resp = dxpy.api.workflow_new(dx_hash, **kwargs)
self.set_ids(resp["id"], dx_hash["project"]) | :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param stages: Stages of the workflow (optional)
:type stages: array of dictionaries
:param workflow_inputs: Workflow-level input specification (optional)
:type workflow_inputs: array of dictionaries
:param workflow_outputs: Workflow-level output specification (optional)
:type workflow_outputs: array of dictionaries
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
Create a new remote workflow object. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L94-L148 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow._get_stage_id | def _get_stage_id(self, stage):
'''
:param stage: A stage ID, name, or index (stage index is the number n for the nth stage, starting from 0; can be provided as an int or a string)
:type stage: int or string
:returns: The stage ID (this is a no-op if it was already a stage ID)
:raises: :class:`~dxpy.exceptions.DXError` if *stage* could not be parsed, resolved to a stage ID, or it could not be found in the workflow
'''
# first, if it is a string, see if it is an integer
if isinstance(stage, basestring):
try:
stage = int(stage)
except:
# we'll try parsing it as a string later
pass
if not isinstance(stage, basestring):
# Try to parse as stage index; ensure that if it's not a
# string that it is an integer at this point.
try:
stage_index = int(stage)
except:
raise DXError('DXWorkflow: the given stage identifier was neither a string stage ID nor an integer index')
if stage_index < 0 or stage_index >= len(self.stages):
raise DXError('DXWorkflow: the workflow contains ' + str(len(self.stages)) + \
' stage(s), and the numerical value of the given stage identifier is out of range')
return self.stages[stage_index].get("id")
if re.compile('^([a-zA-Z_]|stage-)[0-9a-zA-Z_]*$').match(stage) is not None:
# Check if there exists a stage with this stage id
stage_id_exists = any([stg['id'] for stg in self.stages if stg.get('id') == stage])
if stage_id_exists:
return stage
# A stage with the provided ID can't be found in the workflow, so look for it as a name
stage_ids_matching_name = [stg['id'] for stg in self.stages if stg.get('name') == stage]
if len(stage_ids_matching_name) == 0:
raise DXError('DXWorkflow: the given stage identifier ' + stage + ' could not be found as a stage ID nor as a stage name')
elif len(stage_ids_matching_name) > 1:
raise DXError('DXWorkflow: more than one workflow stage was found to have the name "' + stage + '"')
else:
return stage_ids_matching_name[0] | python | def _get_stage_id(self, stage):
'''
:param stage: A stage ID, name, or index (stage index is the number n for the nth stage, starting from 0; can be provided as an int or a string)
:type stage: int or string
:returns: The stage ID (this is a no-op if it was already a stage ID)
:raises: :class:`~dxpy.exceptions.DXError` if *stage* could not be parsed, resolved to a stage ID, or it could not be found in the workflow
'''
# first, if it is a string, see if it is an integer
if isinstance(stage, basestring):
try:
stage = int(stage)
except:
# we'll try parsing it as a string later
pass
if not isinstance(stage, basestring):
# Try to parse as stage index; ensure that if it's not a
# string that it is an integer at this point.
try:
stage_index = int(stage)
except:
raise DXError('DXWorkflow: the given stage identifier was neither a string stage ID nor an integer index')
if stage_index < 0 or stage_index >= len(self.stages):
raise DXError('DXWorkflow: the workflow contains ' + str(len(self.stages)) + \
' stage(s), and the numerical value of the given stage identifier is out of range')
return self.stages[stage_index].get("id")
if re.compile('^([a-zA-Z_]|stage-)[0-9a-zA-Z_]*$').match(stage) is not None:
# Check if there exists a stage with this stage id
stage_id_exists = any([stg['id'] for stg in self.stages if stg.get('id') == stage])
if stage_id_exists:
return stage
# A stage with the provided ID can't be found in the workflow, so look for it as a name
stage_ids_matching_name = [stg['id'] for stg in self.stages if stg.get('name') == stage]
if len(stage_ids_matching_name) == 0:
raise DXError('DXWorkflow: the given stage identifier ' + stage + ' could not be found as a stage ID nor as a stage name')
elif len(stage_ids_matching_name) > 1:
raise DXError('DXWorkflow: more than one workflow stage was found to have the name "' + stage + '"')
else:
return stage_ids_matching_name[0] | :param stage: A stage ID, name, or index (stage index is the number n for the nth stage, starting from 0; can be provided as an int or a string)
:type stage: int or string
:returns: The stage ID (this is a no-op if it was already a stage ID)
:raises: :class:`~dxpy.exceptions.DXError` if *stage* could not be parsed, resolved to a stage ID, or it could not be found in the workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L156-L196 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.add_stage | def add_stage(self, executable, stage_id=None, name=None, folder=None, stage_input=None, instance_type=None,
edit_version=None, **kwargs):
'''
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param stage_id: id for the stage (optional)
:type stage_id: string
:param name: name for the stage (optional)
:type name: string
:param folder: default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: ID of the added stage
:rtype: string
:raises: :class:`~dxpy.exceptions.DXError` if *executable* is not an expected type :class:`~dxpy.exceptions.DXAPIError` for errors thrown from the API call
Adds the specified executable as a new stage in the workflow.
'''
if isinstance(executable, basestring):
exec_id = executable
elif isinstance(executable, DXExecutable):
exec_id = executable.get_id()
else:
raise DXError("dxpy.DXWorkflow.add_stage: executable must be a string or an instance of DXApplet or DXApp")
add_stage_input = {"executable": exec_id}
if stage_id is not None:
add_stage_input["id"] = stage_id
if name is not None:
add_stage_input["name"] = name
if folder is not None:
add_stage_input["folder"] = folder
if stage_input is not None:
add_stage_input["input"] = stage_input
if instance_type is not None:
add_stage_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type).as_dict()
self._add_edit_version_to_request(add_stage_input, edit_version)
try:
result = dxpy.api.workflow_add_stage(self._dxid, add_stage_input, **kwargs)
finally:
self.describe() # update cached describe
return result['stage'] | python | def add_stage(self, executable, stage_id=None, name=None, folder=None, stage_input=None, instance_type=None,
edit_version=None, **kwargs):
'''
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param stage_id: id for the stage (optional)
:type stage_id: string
:param name: name for the stage (optional)
:type name: string
:param folder: default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: ID of the added stage
:rtype: string
:raises: :class:`~dxpy.exceptions.DXError` if *executable* is not an expected type :class:`~dxpy.exceptions.DXAPIError` for errors thrown from the API call
Adds the specified executable as a new stage in the workflow.
'''
if isinstance(executable, basestring):
exec_id = executable
elif isinstance(executable, DXExecutable):
exec_id = executable.get_id()
else:
raise DXError("dxpy.DXWorkflow.add_stage: executable must be a string or an instance of DXApplet or DXApp")
add_stage_input = {"executable": exec_id}
if stage_id is not None:
add_stage_input["id"] = stage_id
if name is not None:
add_stage_input["name"] = name
if folder is not None:
add_stage_input["folder"] = folder
if stage_input is not None:
add_stage_input["input"] = stage_input
if instance_type is not None:
add_stage_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type).as_dict()
self._add_edit_version_to_request(add_stage_input, edit_version)
try:
result = dxpy.api.workflow_add_stage(self._dxid, add_stage_input, **kwargs)
finally:
self.describe() # update cached describe
return result['stage'] | :param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param stage_id: id for the stage (optional)
:type stage_id: string
:param name: name for the stage (optional)
:type name: string
:param folder: default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: ID of the added stage
:rtype: string
:raises: :class:`~dxpy.exceptions.DXError` if *executable* is not an expected type :class:`~dxpy.exceptions.DXAPIError` for errors thrown from the API call
Adds the specified executable as a new stage in the workflow. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L198-L243 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.get_stage | def get_stage(self, stage, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow
'''
stage_id = self._get_stage_id(stage)
result = next((stage for stage in self.stages if stage['id'] == stage_id), None)
if result is None:
raise DXError('The stage ID ' + stage_id + ' could not be found')
return result | python | def get_stage(self, stage, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow
'''
stage_id = self._get_stage_id(stage)
result = next((stage for stage in self.stages if stage['id'] == stage_id), None)
if result is None:
raise DXError('The stage ID ' + stage_id + ' could not be found')
return result | :param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L245-L255 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.remove_stage | def remove_stage(self, stage, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: Stage ID that was removed
:rtype: string
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
remove_stage_input = {"stage": stage_id}
self._add_edit_version_to_request(remove_stage_input, edit_version)
try:
dxpy.api.workflow_remove_stage(self._dxid, remove_stage_input, **kwargs)
finally:
self.describe() # update cached describe
return stage_id | python | def remove_stage(self, stage, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: Stage ID that was removed
:rtype: string
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
remove_stage_input = {"stage": stage_id}
self._add_edit_version_to_request(remove_stage_input, edit_version)
try:
dxpy.api.workflow_remove_stage(self._dxid, remove_stage_input, **kwargs)
finally:
self.describe() # update cached describe
return stage_id | :param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: Stage ID that was removed
:rtype: string
Removes the specified stage from the workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L257-L275 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.move_stage | def move_stage(self, stage, new_index, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param new_index: The new position in the order of stages that the specified stage should have (where 0 indicates the first stage)
:type new_index: int
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
move_stage_input = {"stage": stage_id,
"newIndex": new_index}
self._add_edit_version_to_request(move_stage_input, edit_version)
try:
dxpy.api.workflow_move_stage(self._dxid, move_stage_input, **kwargs)
finally:
self.describe() | python | def move_stage(self, stage, new_index, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param new_index: The new position in the order of stages that the specified stage should have (where 0 indicates the first stage)
:type new_index: int
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
move_stage_input = {"stage": stage_id,
"newIndex": new_index}
self._add_edit_version_to_request(move_stage_input, edit_version)
try:
dxpy.api.workflow_move_stage(self._dxid, move_stage_input, **kwargs)
finally:
self.describe() | :param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param new_index: The new position in the order of stages that the specified stage should have (where 0 indicates the first stage)
:type new_index: int
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L277-L295 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.update | def update(self, title=None, unset_title=False, summary=None, description=None,
output_folder=None, unset_output_folder=False,
workflow_inputs=None, unset_workflow_inputs=False,
workflow_outputs=None, unset_workflow_outputs=False,
stages=None, edit_version=None, **kwargs):
'''
:param title: workflow title to set; cannot be provided with *unset_title* set to True
:type title: string
:param unset_title: whether to unset the title; cannot be provided with string value for *title*
:type unset_title: boolean
:param summary: workflow summary to set
:type summary: string
:param description: workflow description to set
:type description: string
:param output_folder: new default output folder for the workflow
:type output_folder: string
:param unset_folder: whether to unset the default output folder; cannot be True with string value for *output_folder*
:type unset_folder: boolean
:param stages: updates to the stages to make; see API documentation for /workflow-xxxx/update for syntax of this field; use :meth:`update_stage()` to update a single stage
:type stages: dict
:param workflow_inputs: updates to the workflow input to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_inputs: dict
:param workflow_outputs: updates to the workflow output to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_outputs: dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Make general metadata updates to the workflow
'''
update_input = {}
if title is not None and unset_title:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "title" and set "unset_title"')
if output_folder is not None and unset_output_folder:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "output_folder" and set "unset_output_folder"')
if workflow_inputs is not None and unset_workflow_inputs:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "workflow_inputs" and set "unset_workflow_inputs"')
if workflow_outputs is not None and unset_workflow_outputs:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "workflow_outputs" and set "unset_workflow_outputs"')
if title is not None:
update_input["title"] = title
elif unset_title:
update_input["title"] = None
if summary is not None:
update_input["summary"] = summary
if description is not None:
update_input["description"] = description
if output_folder is not None:
update_input["outputFolder"] = output_folder
elif unset_output_folder:
update_input["outputFolder"] = None
if stages is not None:
update_input["stages"] = stages
if workflow_inputs is not None:
update_input["inputs"] = workflow_inputs
elif unset_workflow_inputs:
update_input["inputs"] = None
if workflow_outputs is not None:
update_input["outputs"] = workflow_outputs
elif unset_workflow_outputs:
update_input["outputs"] = None
# only perform update if there are changes to make
if update_input:
self._add_edit_version_to_request(update_input, edit_version)
try:
dxpy.api.workflow_update(self._dxid, update_input, **kwargs)
finally:
self.describe() | python | def update(self, title=None, unset_title=False, summary=None, description=None,
output_folder=None, unset_output_folder=False,
workflow_inputs=None, unset_workflow_inputs=False,
workflow_outputs=None, unset_workflow_outputs=False,
stages=None, edit_version=None, **kwargs):
'''
:param title: workflow title to set; cannot be provided with *unset_title* set to True
:type title: string
:param unset_title: whether to unset the title; cannot be provided with string value for *title*
:type unset_title: boolean
:param summary: workflow summary to set
:type summary: string
:param description: workflow description to set
:type description: string
:param output_folder: new default output folder for the workflow
:type output_folder: string
:param unset_folder: whether to unset the default output folder; cannot be True with string value for *output_folder*
:type unset_folder: boolean
:param stages: updates to the stages to make; see API documentation for /workflow-xxxx/update for syntax of this field; use :meth:`update_stage()` to update a single stage
:type stages: dict
:param workflow_inputs: updates to the workflow input to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_inputs: dict
:param workflow_outputs: updates to the workflow output to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_outputs: dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Make general metadata updates to the workflow
'''
update_input = {}
if title is not None and unset_title:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "title" and set "unset_title"')
if output_folder is not None and unset_output_folder:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "output_folder" and set "unset_output_folder"')
if workflow_inputs is not None and unset_workflow_inputs:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "workflow_inputs" and set "unset_workflow_inputs"')
if workflow_outputs is not None and unset_workflow_outputs:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "workflow_outputs" and set "unset_workflow_outputs"')
if title is not None:
update_input["title"] = title
elif unset_title:
update_input["title"] = None
if summary is not None:
update_input["summary"] = summary
if description is not None:
update_input["description"] = description
if output_folder is not None:
update_input["outputFolder"] = output_folder
elif unset_output_folder:
update_input["outputFolder"] = None
if stages is not None:
update_input["stages"] = stages
if workflow_inputs is not None:
update_input["inputs"] = workflow_inputs
elif unset_workflow_inputs:
update_input["inputs"] = None
if workflow_outputs is not None:
update_input["outputs"] = workflow_outputs
elif unset_workflow_outputs:
update_input["outputs"] = None
# only perform update if there are changes to make
if update_input:
self._add_edit_version_to_request(update_input, edit_version)
try:
dxpy.api.workflow_update(self._dxid, update_input, **kwargs)
finally:
self.describe() | :param title: workflow title to set; cannot be provided with *unset_title* set to True
:type title: string
:param unset_title: whether to unset the title; cannot be provided with string value for *title*
:type unset_title: boolean
:param summary: workflow summary to set
:type summary: string
:param description: workflow description to set
:type description: string
:param output_folder: new default output folder for the workflow
:type output_folder: string
:param unset_folder: whether to unset the default output folder; cannot be True with string value for *output_folder*
:type unset_folder: boolean
:param stages: updates to the stages to make; see API documentation for /workflow-xxxx/update for syntax of this field; use :meth:`update_stage()` to update a single stage
:type stages: dict
:param workflow_inputs: updates to the workflow input to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_inputs: dict
:param workflow_outputs: updates to the workflow output to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_outputs: dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Make general metadata updates to the workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L297-L365 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.update_stage | def update_stage(self, stage, executable=None, force=False,
name=None, unset_name=False, folder=None, unset_folder=False, stage_input=None,
instance_type=None, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string stage index, name, or ID
:type stage: int or string
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param force: whether to use *executable* even if it is incompatible with the previous executable's spec
:type force: boolean
:param name: new name for the stage; cannot be provided with *unset_name* set to True
:type name: string
:param unset_name: whether to unset the stage name; cannot be True with string value for *name*
:type unset_name: boolean
:param folder: new default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param unset_folder: whether to unset the stage folder; cannot be True with string value for *folder*
:type unset_folder: boolean
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
if name is not None and unset_name:
raise DXError('dxpy.DXWorkflow.update_stage: cannot provide both "name" and set "unset_name"')
if folder is not None and unset_folder:
raise DXError('dxpy.DXWorkflow.update_stage: cannot provide both "folder" and set "unset_folder"')
if executable is not None:
if isinstance(executable, basestring):
exec_id = executable
elif isinstance(executable, DXExecutable):
exec_id = executable.get_id()
else:
raise DXError("dxpy.DXWorkflow.update_stage: executable (if provided) must be a string or an instance of DXApplet or DXApp")
update_stage_exec_input = {"stage": stage_id,
"executable": exec_id,
"force": force}
self._add_edit_version_to_request(update_stage_exec_input, edit_version)
try:
dxpy.api.workflow_update_stage_executable(self._dxid, update_stage_exec_input, **kwargs)
finally:
self.describe() # update cached describe
# Construct hash and update the workflow's stage if necessary
update_stage_input = {}
if name is not None:
update_stage_input["name"] = name
elif unset_name:
update_stage_input["name"] = None
if folder:
update_stage_input["folder"] = folder
elif unset_folder:
update_stage_input["folder"] = None
if stage_input:
update_stage_input["input"] = stage_input
if instance_type is not None:
update_stage_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type).as_dict()
if update_stage_input:
update_input = {"stages": {stage_id: update_stage_input}}
self._add_edit_version_to_request(update_input, edit_version)
try:
dxpy.api.workflow_update(self._dxid, update_input, **kwargs)
finally:
self.describe() | python | def update_stage(self, stage, executable=None, force=False,
name=None, unset_name=False, folder=None, unset_folder=False, stage_input=None,
instance_type=None, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string stage index, name, or ID
:type stage: int or string
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param force: whether to use *executable* even if it is incompatible with the previous executable's spec
:type force: boolean
:param name: new name for the stage; cannot be provided with *unset_name* set to True
:type name: string
:param unset_name: whether to unset the stage name; cannot be True with string value for *name*
:type unset_name: boolean
:param folder: new default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param unset_folder: whether to unset the stage folder; cannot be True with string value for *folder*
:type unset_folder: boolean
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
if name is not None and unset_name:
raise DXError('dxpy.DXWorkflow.update_stage: cannot provide both "name" and set "unset_name"')
if folder is not None and unset_folder:
raise DXError('dxpy.DXWorkflow.update_stage: cannot provide both "folder" and set "unset_folder"')
if executable is not None:
if isinstance(executable, basestring):
exec_id = executable
elif isinstance(executable, DXExecutable):
exec_id = executable.get_id()
else:
raise DXError("dxpy.DXWorkflow.update_stage: executable (if provided) must be a string or an instance of DXApplet or DXApp")
update_stage_exec_input = {"stage": stage_id,
"executable": exec_id,
"force": force}
self._add_edit_version_to_request(update_stage_exec_input, edit_version)
try:
dxpy.api.workflow_update_stage_executable(self._dxid, update_stage_exec_input, **kwargs)
finally:
self.describe() # update cached describe
# Construct hash and update the workflow's stage if necessary
update_stage_input = {}
if name is not None:
update_stage_input["name"] = name
elif unset_name:
update_stage_input["name"] = None
if folder:
update_stage_input["folder"] = folder
elif unset_folder:
update_stage_input["folder"] = None
if stage_input:
update_stage_input["input"] = stage_input
if instance_type is not None:
update_stage_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type).as_dict()
if update_stage_input:
update_input = {"stages": {stage_id: update_stage_input}}
self._add_edit_version_to_request(update_input, edit_version)
try:
dxpy.api.workflow_update(self._dxid, update_input, **kwargs)
finally:
self.describe() | :param stage: A number for the stage index (for the nth stage, starting from 0), or a string stage index, name, or ID
:type stage: int or string
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param force: whether to use *executable* even if it is incompatible with the previous executable's spec
:type force: boolean
:param name: new name for the stage; cannot be provided with *unset_name* set to True
:type name: string
:param unset_name: whether to unset the stage name; cannot be True with string value for *name*
:type unset_name: boolean
:param folder: new default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param unset_folder: whether to unset the stage folder; cannot be True with string value for *folder*
:type unset_folder: boolean
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L367-L437 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow._get_input_name | def _get_input_name(self, input_str, region=None, describe_output=None):
'''
:param input_str: A string of one of the forms: "<exported input field name>", "<explicit workflow input field name>", "<stage ID>.<input field name>", "<stage index>.<input field name>", "<stage name>.<input field name>"
:type input_str: string
:returns: If the given form was one of those which uses the stage index or stage name, it is translated to the stage ID for use in the API call (stage name takes precedence)
'''
if '.' in input_str:
stage_identifier, input_name = input_str.split('.', 1)
# Try to parse as a stage ID or name
return self._get_stage_id(stage_identifier) + '.' + input_name
return input_str | python | def _get_input_name(self, input_str, region=None, describe_output=None):
'''
:param input_str: A string of one of the forms: "<exported input field name>", "<explicit workflow input field name>", "<stage ID>.<input field name>", "<stage index>.<input field name>", "<stage name>.<input field name>"
:type input_str: string
:returns: If the given form was one of those which uses the stage index or stage name, it is translated to the stage ID for use in the API call (stage name takes precedence)
'''
if '.' in input_str:
stage_identifier, input_name = input_str.split('.', 1)
# Try to parse as a stage ID or name
return self._get_stage_id(stage_identifier) + '.' + input_name
return input_str | :param input_str: A string of one of the forms: "<exported input field name>", "<explicit workflow input field name>", "<stage ID>.<input field name>", "<stage index>.<input field name>", "<stage name>.<input field name>"
:type input_str: string
:returns: If the given form was one of those which uses the stage index or stage name, it is translated to the stage ID for use in the API call (stage name takes precedence) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L442-L453 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxworkflow.py | DXWorkflow.run | def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the associated workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXWorkflow, self).run(workflow_input, *args, **kwargs) | python | def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the associated workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXWorkflow, self).run(workflow_input, *args, **kwargs) | :param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the associated workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L503-L541 |
dnanexus/dx-toolkit | src/python/dxpy/utils/completer.py | get_folder_matches | def get_folder_matches(text, delim_pos, dxproj, folderpath):
'''
:param text: String to be tab-completed; still in escaped form
:type text: string
:param delim_pos: index of last unescaped "/" in text
:type delim_pos: int
:param dxproj: DXProject handler to use
:type dxproj: DXProject
:param folderpath: Unescaped path in which to search for folder matches
:type folderpath: string
:returns: List of matches
:rtype: list of strings
Members of the returned list are guaranteed to start with *text*
and be in escaped form for consumption by the command-line.
'''
try:
folders = dxproj.list_folder(folder=folderpath, only='folders')['folders']
folder_names = [name[name.rfind('/') + 1:] for name in folders]
if text != '' and delim_pos != len(text) - 1:
folder_names += ['.', '..']
prefix = text[:delim_pos + 1]
return [prefix + f + '/' for f in folder_names if (prefix + f + '/').startswith(text)]
except:
return [] | python | def get_folder_matches(text, delim_pos, dxproj, folderpath):
'''
:param text: String to be tab-completed; still in escaped form
:type text: string
:param delim_pos: index of last unescaped "/" in text
:type delim_pos: int
:param dxproj: DXProject handler to use
:type dxproj: DXProject
:param folderpath: Unescaped path in which to search for folder matches
:type folderpath: string
:returns: List of matches
:rtype: list of strings
Members of the returned list are guaranteed to start with *text*
and be in escaped form for consumption by the command-line.
'''
try:
folders = dxproj.list_folder(folder=folderpath, only='folders')['folders']
folder_names = [name[name.rfind('/') + 1:] for name in folders]
if text != '' and delim_pos != len(text) - 1:
folder_names += ['.', '..']
prefix = text[:delim_pos + 1]
return [prefix + f + '/' for f in folder_names if (prefix + f + '/').startswith(text)]
except:
return [] | :param text: String to be tab-completed; still in escaped form
:type text: string
:param delim_pos: index of last unescaped "/" in text
:type delim_pos: int
:param dxproj: DXProject handler to use
:type dxproj: DXProject
:param folderpath: Unescaped path in which to search for folder matches
:type folderpath: string
:returns: List of matches
:rtype: list of strings
Members of the returned list are guaranteed to start with *text*
and be in escaped form for consumption by the command-line. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/completer.py#L87-L111 |
dnanexus/dx-toolkit | src/python/dxpy/utils/completer.py | get_data_matches | def get_data_matches(text, delim_pos, dxproj, folderpath, classname=None,
typespec=None, visibility=None):
'''
:param text: String to be tab-completed; still in escaped form
:type text: string
:param delim_pos: index of last unescaped "/" or ":" in text
:type delim_pos: int
:param dxproj: DXProject handler to use
:type dxproj: DXProject
:param folderpath: Unescaped path in which to search for data object matches
:type folderpath: string
:param classname: Data object class by which to restrict the search (None for no restriction on class)
:type classname: string
:param visibility: Visibility to constrain the results to; default is "visible" for empty strings, "either" for nonempty
:type visibility: string
:returns: List of matches
:rtype: list of strings
Members of the returned list are guaranteed to start with *text*
and be in escaped form for consumption by the command-line.
'''
#unescaped_text = unescape_completion_name_str(text[delim_pos + 1:])
unescaped_text = text[delim_pos + 1:]
if visibility is None:
if text != '' and delim_pos != len(text) - 1:
visibility = "either"
else:
visibility = "visible"
try:
results = list(dxpy.find_data_objects(project=dxproj.get_id(),
folder=folderpath,
name=unescaped_text + "*",
name_mode="glob",
recurse=False,
visibility=visibility,
classname=classname,
limit=100,
describe=dict(fields=dict(name=True)),
typename=typespec))
prefix = '' if text == '' else text[:delim_pos + 1]
return [prefix + escape_name(result['describe']['name']) for result in results]
except:
return [] | python | def get_data_matches(text, delim_pos, dxproj, folderpath, classname=None,
typespec=None, visibility=None):
'''
:param text: String to be tab-completed; still in escaped form
:type text: string
:param delim_pos: index of last unescaped "/" or ":" in text
:type delim_pos: int
:param dxproj: DXProject handler to use
:type dxproj: DXProject
:param folderpath: Unescaped path in which to search for data object matches
:type folderpath: string
:param classname: Data object class by which to restrict the search (None for no restriction on class)
:type classname: string
:param visibility: Visibility to constrain the results to; default is "visible" for empty strings, "either" for nonempty
:type visibility: string
:returns: List of matches
:rtype: list of strings
Members of the returned list are guaranteed to start with *text*
and be in escaped form for consumption by the command-line.
'''
#unescaped_text = unescape_completion_name_str(text[delim_pos + 1:])
unescaped_text = text[delim_pos + 1:]
if visibility is None:
if text != '' and delim_pos != len(text) - 1:
visibility = "either"
else:
visibility = "visible"
try:
results = list(dxpy.find_data_objects(project=dxproj.get_id(),
folder=folderpath,
name=unescaped_text + "*",
name_mode="glob",
recurse=False,
visibility=visibility,
classname=classname,
limit=100,
describe=dict(fields=dict(name=True)),
typename=typespec))
prefix = '' if text == '' else text[:delim_pos + 1]
return [prefix + escape_name(result['describe']['name']) for result in results]
except:
return [] | :param text: String to be tab-completed; still in escaped form
:type text: string
:param delim_pos: index of last unescaped "/" or ":" in text
:type delim_pos: int
:param dxproj: DXProject handler to use
:type dxproj: DXProject
:param folderpath: Unescaped path in which to search for data object matches
:type folderpath: string
:param classname: Data object class by which to restrict the search (None for no restriction on class)
:type classname: string
:param visibility: Visibility to constrain the results to; default is "visible" for empty strings, "either" for nonempty
:type visibility: string
:returns: List of matches
:rtype: list of strings
Members of the returned list are guaranteed to start with *text*
and be in escaped form for consumption by the command-line. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/completer.py#L113-L157 |
dnanexus/dx-toolkit | src/python/dxpy/utils/completer.py | path_completer | def path_completer(text, expected=None, classes=None, perm_level=None,
include_current_proj=False, typespec=None, visibility=None):
'''
:param text: String to tab-complete to a path matching the syntax project-name:folder/entity_or_folder_name
:type text: string
:param expected: "folder", "entity", "project", or None (no restriction) as to the types of answers to look for
:type expected: string
:param classes: if expected="entity", the possible data object classes that are acceptable
:type classes: list of strings
:param perm_level: the minimum permissions level required, e.g. "VIEW" or "CONTRIBUTE"
:type perm_level: string
:param include_current_proj: Indicate whether the current project's name should be a potential result
:type include_current_proj: boolean
:param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*)
Returns a list of matches to the text and restricted by the
requested parameters.
'''
colon_pos = get_last_pos_of_char(':', text)
slash_pos = get_last_pos_of_char('/', text)
delim_pos = max(colon_pos, slash_pos)
# First get projects if necessary
matches = []
if expected == 'project' and colon_pos > 0 and colon_pos == len(text) - 1:
if dxpy.find_one_project(zero_ok=True, name=text[:colon_pos]) is not None:
return [text + " "]
if colon_pos < 0 and slash_pos < 0:
# Might be tab-completing a project, but don't ever include
# whatever's set as dxpy.WORKSPACE_ID unless expected == "project"
# Also, don't bother if text=="" and expected is NOT "project"
# Also, add space if expected == "project"
if text != "" or expected == 'project':
results = dxpy.find_projects(describe=True, level=perm_level)
if not include_current_proj:
results = [r for r in results if r['id'] != dxpy.WORKSPACE_ID]
matches += [escape_colon(r['describe']['name'])+':' for r in results if r['describe']['name'].startswith(text)]
if expected == 'project':
return matches
# Attempt to tab-complete to a folder or data object name
if colon_pos < 0 and slash_pos >= 0:
# Not tab-completing a project, and the project is unambiguous
# (use dxpy.WORKSPACE_ID)
if dxpy.WORKSPACE_ID is not None:
# try-catch block in case dxpy.WORKSPACE_ID is garbage
try:
dxproj = dxpy.get_handler(dxpy.WORKSPACE_ID)
folderpath, entity_name = clean_folder_path(text)
matches += get_folder_matches(text, slash_pos, dxproj, folderpath)
if expected != 'folder':
if classes is not None:
for classname in classes:
matches += get_data_matches(text, slash_pos, dxproj,
folderpath, classname=classname,
typespec=typespec,
visibility=visibility)
else:
matches += get_data_matches(text, slash_pos, dxproj,
folderpath, typespec=typespec,
visibility=visibility)
except:
pass
else:
# project is given by a path, but attempt to resolve to an
# object or folder anyway
try:
proj_ids, folderpath, entity_name = resolve_path(text, multi_projects=True)
except ResolutionError as details:
sys.stderr.write("\n" + fill(str(details)))
return matches
for proj in proj_ids:
# protects against dxpy.WORKSPACE_ID being garbage
try:
dxproj = dxpy.get_handler(proj)
matches += get_folder_matches(text, delim_pos, dxproj, folderpath)
if expected != 'folder':
if classes is not None:
for classname in classes:
matches += get_data_matches(text, delim_pos, dxproj,
folderpath, classname=classname,
typespec=typespec, visibility=visibility)
else:
matches += get_data_matches(text, delim_pos, dxproj,
folderpath, typespec=typespec,
visibility=visibility)
except:
pass
return matches | python | def path_completer(text, expected=None, classes=None, perm_level=None,
include_current_proj=False, typespec=None, visibility=None):
'''
:param text: String to tab-complete to a path matching the syntax project-name:folder/entity_or_folder_name
:type text: string
:param expected: "folder", "entity", "project", or None (no restriction) as to the types of answers to look for
:type expected: string
:param classes: if expected="entity", the possible data object classes that are acceptable
:type classes: list of strings
:param perm_level: the minimum permissions level required, e.g. "VIEW" or "CONTRIBUTE"
:type perm_level: string
:param include_current_proj: Indicate whether the current project's name should be a potential result
:type include_current_proj: boolean
:param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*)
Returns a list of matches to the text and restricted by the
requested parameters.
'''
colon_pos = get_last_pos_of_char(':', text)
slash_pos = get_last_pos_of_char('/', text)
delim_pos = max(colon_pos, slash_pos)
# First get projects if necessary
matches = []
if expected == 'project' and colon_pos > 0 and colon_pos == len(text) - 1:
if dxpy.find_one_project(zero_ok=True, name=text[:colon_pos]) is not None:
return [text + " "]
if colon_pos < 0 and slash_pos < 0:
# Might be tab-completing a project, but don't ever include
# whatever's set as dxpy.WORKSPACE_ID unless expected == "project"
# Also, don't bother if text=="" and expected is NOT "project"
# Also, add space if expected == "project"
if text != "" or expected == 'project':
results = dxpy.find_projects(describe=True, level=perm_level)
if not include_current_proj:
results = [r for r in results if r['id'] != dxpy.WORKSPACE_ID]
matches += [escape_colon(r['describe']['name'])+':' for r in results if r['describe']['name'].startswith(text)]
if expected == 'project':
return matches
# Attempt to tab-complete to a folder or data object name
if colon_pos < 0 and slash_pos >= 0:
# Not tab-completing a project, and the project is unambiguous
# (use dxpy.WORKSPACE_ID)
if dxpy.WORKSPACE_ID is not None:
# try-catch block in case dxpy.WORKSPACE_ID is garbage
try:
dxproj = dxpy.get_handler(dxpy.WORKSPACE_ID)
folderpath, entity_name = clean_folder_path(text)
matches += get_folder_matches(text, slash_pos, dxproj, folderpath)
if expected != 'folder':
if classes is not None:
for classname in classes:
matches += get_data_matches(text, slash_pos, dxproj,
folderpath, classname=classname,
typespec=typespec,
visibility=visibility)
else:
matches += get_data_matches(text, slash_pos, dxproj,
folderpath, typespec=typespec,
visibility=visibility)
except:
pass
else:
# project is given by a path, but attempt to resolve to an
# object or folder anyway
try:
proj_ids, folderpath, entity_name = resolve_path(text, multi_projects=True)
except ResolutionError as details:
sys.stderr.write("\n" + fill(str(details)))
return matches
for proj in proj_ids:
# protects against dxpy.WORKSPACE_ID being garbage
try:
dxproj = dxpy.get_handler(proj)
matches += get_folder_matches(text, delim_pos, dxproj, folderpath)
if expected != 'folder':
if classes is not None:
for classname in classes:
matches += get_data_matches(text, delim_pos, dxproj,
folderpath, classname=classname,
typespec=typespec, visibility=visibility)
else:
matches += get_data_matches(text, delim_pos, dxproj,
folderpath, typespec=typespec,
visibility=visibility)
except:
pass
return matches | :param text: String to tab-complete to a path matching the syntax project-name:folder/entity_or_folder_name
:type text: string
:param expected: "folder", "entity", "project", or None (no restriction) as to the types of answers to look for
:type expected: string
:param classes: if expected="entity", the possible data object classes that are acceptable
:type classes: list of strings
:param perm_level: the minimum permissions level required, e.g. "VIEW" or "CONTRIBUTE"
:type perm_level: string
:param include_current_proj: Indicate whether the current project's name should be a potential result
:type include_current_proj: boolean
:param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*)
Returns a list of matches to the text and restricted by the
requested parameters. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/completer.py#L159-L249 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | verify_string_dxid | def verify_string_dxid(dxid, expected_classes):
'''
:param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
'''
if isinstance(expected_classes, basestring):
expected_classes = [expected_classes]
if not isinstance(expected_classes, list) or len(expected_classes) == 0:
raise DXError('verify_string_dxid: expected_classes should be a string or list of strings')
if not (isinstance(dxid, basestring) and
re.match('^(' + '|'.join(expected_classes) + ')-[0-9a-zA-Z]{24}$', dxid)):
if len(expected_classes) == 1:
str_expected_classes = expected_classes[0]
elif len(expected_classes) == 2:
str_expected_classes = ' or '.join(expected_classes)
else:
str_expected_classes = ', '.join(expected_classes[:-1]) + ', or ' + expected_classes[-1]
raise DXError('Invalid ID of class %s: %r' % (str_expected_classes, dxid)) | python | def verify_string_dxid(dxid, expected_classes):
'''
:param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class
'''
if isinstance(expected_classes, basestring):
expected_classes = [expected_classes]
if not isinstance(expected_classes, list) or len(expected_classes) == 0:
raise DXError('verify_string_dxid: expected_classes should be a string or list of strings')
if not (isinstance(dxid, basestring) and
re.match('^(' + '|'.join(expected_classes) + ')-[0-9a-zA-Z]{24}$', dxid)):
if len(expected_classes) == 1:
str_expected_classes = expected_classes[0]
elif len(expected_classes) == 2:
str_expected_classes = ' or '.join(expected_classes)
else:
str_expected_classes = ', '.join(expected_classes[:-1]) + ', or ' + expected_classes[-1]
raise DXError('Invalid ID of class %s: %r' % (str_expected_classes, dxid)) | :param dxid: Value to verify as a DNAnexus ID of class *expected_class*
:param expected_classes: Single string or list of strings of allowed classes of the ID, e.g. "file" or ["project", "container"]
:type expected_classes: string or list of strings
:raises: :exc:`~dxpy.exceptions.DXError` if *dxid* is not a string or is not a valid DNAnexus ID of the expected class | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L32-L52 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXObject.set_id | def set_id(self, dxid):
'''
:param dxid: New ID to be associated with the handler
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
verify_string_dxid(dxid, self._class)
self._dxid = dxid | python | def set_id(self, dxid):
'''
:param dxid: New ID to be associated with the handler
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
verify_string_dxid(dxid, self._class)
self._dxid = dxid | :param dxid: New ID to be associated with the handler
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid* | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L113-L123 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.new | def new(self, **kwargs):
'''
:param project: Project ID in which to create the new remote object
:type project: string
:param name: Name for the object
:type name: string
:param tags: Tags to add for the object
:type tags: list of strings
:param types: Types to add to the object
:type types: list of strings
:param hidden: Whether the object is to be hidden
:type hidden: boolean
:param properties: Properties given as key-value pairs of strings
:type properties: dict
:param details: Details to set for the object
:type details: dict or list
:param folder: Full path to the destination folder
:type folder: string
:param parents: If True, recursively create all parent folders if they are missing
:type parents: boolean
:rtype: :class:`DXDataObject`
Creates a data object with the given fields. Only *project* is
required, and only if no default project or workspace is set;
the remaining arguments are optional and have default behavior
as specified in the API documentation for the ``/new`` method of
each data object class.
'''
if not hasattr(self, '_class'):
raise NotImplementedError(
"DXDataObject is an abstract class; a subclass should" + \
"be initialized instead.")
dx_hash, remaining_kwargs = self._get_creation_params(kwargs)
self._new(dx_hash, **remaining_kwargs) | python | def new(self, **kwargs):
'''
:param project: Project ID in which to create the new remote object
:type project: string
:param name: Name for the object
:type name: string
:param tags: Tags to add for the object
:type tags: list of strings
:param types: Types to add to the object
:type types: list of strings
:param hidden: Whether the object is to be hidden
:type hidden: boolean
:param properties: Properties given as key-value pairs of strings
:type properties: dict
:param details: Details to set for the object
:type details: dict or list
:param folder: Full path to the destination folder
:type folder: string
:param parents: If True, recursively create all parent folders if they are missing
:type parents: boolean
:rtype: :class:`DXDataObject`
Creates a data object with the given fields. Only *project* is
required, and only if no default project or workspace is set;
the remaining arguments are optional and have default behavior
as specified in the API documentation for the ``/new`` method of
each data object class.
'''
if not hasattr(self, '_class'):
raise NotImplementedError(
"DXDataObject is an abstract class; a subclass should" + \
"be initialized instead.")
dx_hash, remaining_kwargs = self._get_creation_params(kwargs)
self._new(dx_hash, **remaining_kwargs) | :param project: Project ID in which to create the new remote object
:type project: string
:param name: Name for the object
:type name: string
:param tags: Tags to add for the object
:type tags: list of strings
:param types: Types to add to the object
:type types: list of strings
:param hidden: Whether the object is to be hidden
:type hidden: boolean
:param properties: Properties given as key-value pairs of strings
:type properties: dict
:param details: Details to set for the object
:type details: dict or list
:param folder: Full path to the destination folder
:type folder: string
:param parents: If True, recursively create all parent folders if they are missing
:type parents: boolean
:rtype: :class:`DXDataObject`
Creates a data object with the given fields. Only *project* is
required, and only if no default project or workspace is set;
the remaining arguments are optional and have default behavior
as specified in the API documentation for the ``/new`` method of
each data object class. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L222-L258 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.set_ids | def set_ids(self, dxid, project=None):
'''
:param dxid: Object ID or a DNAnexus link (a dict with key "$dnanexus_link"); if a project ID is provided in the DNAnexus link, it will be used as *project* unless *project* has been explictly provided
:type dxid: string or dict
:param project: Project ID
:type project: string
Discards the currently stored ID and associates the handler with
*dxid*. Associates the handler with the copy of the object in
*project* (if no project is explicitly specified, the default
data container is used).
'''
if is_dxlink(dxid):
dxid, project_from_link = get_dxlink_ids(dxid)
if project is None:
project = project_from_link
if dxid is not None:
verify_string_dxid(dxid, self._class)
self._dxid = dxid
if project is None:
self._proj = dxpy.WORKSPACE_ID
elif project is not None:
verify_string_dxid(project, ['project', 'container'])
self._proj = project | python | def set_ids(self, dxid, project=None):
'''
:param dxid: Object ID or a DNAnexus link (a dict with key "$dnanexus_link"); if a project ID is provided in the DNAnexus link, it will be used as *project* unless *project* has been explictly provided
:type dxid: string or dict
:param project: Project ID
:type project: string
Discards the currently stored ID and associates the handler with
*dxid*. Associates the handler with the copy of the object in
*project* (if no project is explicitly specified, the default
data container is used).
'''
if is_dxlink(dxid):
dxid, project_from_link = get_dxlink_ids(dxid)
if project is None:
project = project_from_link
if dxid is not None:
verify_string_dxid(dxid, self._class)
self._dxid = dxid
if project is None:
self._proj = dxpy.WORKSPACE_ID
elif project is not None:
verify_string_dxid(project, ['project', 'container'])
self._proj = project | :param dxid: Object ID or a DNAnexus link (a dict with key "$dnanexus_link"); if a project ID is provided in the DNAnexus link, it will be used as *project* unless *project* has been explictly provided
:type dxid: string or dict
:param project: Project ID
:type project: string
Discards the currently stored ID and associates the handler with
*dxid*. Associates the handler with the copy of the object in
*project* (if no project is explicitly specified, the default
data container is used). | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L271-L297 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.describe | def describe(self, incl_properties=False, incl_details=False, fields=None, default_fields=None, **kwargs):
"""
:param fields: set of fields to include in the output, for
example ``{'name', 'modified'}``. The field ``id`` is always
implicitly included. If ``fields`` is specified, the default
fields are not included (that is, only the fields specified
here, and ``id``, are included) unless ``default_fields`` is
additionally set to True.
:type fields: set or sequence of str
:param default_fields: if True, include the default fields in
addition to fields requested in ``fields``, if any; if
False, only the fields specified in ``fields``, if any, are
returned (defaults to False if ``fields`` is specified, True
otherwise)
:type default_fields: bool
:param incl_properties: if true, includes the properties of the
object in the output (deprecated; use
``fields={'properties'}, default_fields=True`` instead)
:type incl_properties: bool
:param incl_details: if true, includes the details of the object
in the output (deprecated; use ``fields={'details'},
default_fields=True`` instead)
:type incl_details: bool
:returns: Description of the remote object
:rtype: dict
Return a dict with a description of the remote data object.
The result includes the key-value pairs as specified in the API
documentation for the ``/describe`` method of each data object
class. The API defines some default set of fields that will be
included (at a minimum, "id", "class", etc. should be available,
and there may be additional fields that vary based on the
class); the set of fields may be customized using ``fields`` and
``default_fields``.
Any project-specific metadata fields (name, properties, and
tags) are obtained from the copy of the object in the project
associated with the handler, if possible.
"""
if self._dxid is None:
raise DXError('This {handler} handler has not been initialized with a {_class} ID and cannot be described'.format(
handler=self.__class__.__name__,
_class=self._class)
)
if (incl_properties or incl_details) and (fields is not None or default_fields is not None):
raise ValueError('Cannot specify properties or details in conjunction with fields or default_fields')
if incl_properties or incl_details:
describe_input = dict(properties=incl_properties, details=incl_details)
else:
describe_input = {}
if default_fields is not None:
describe_input['defaultFields'] = default_fields
if fields is not None:
describe_input['fields'] = {field_name: True for field_name in fields}
if self._proj is not None:
describe_input["project"] = self._proj
self._desc = self._describe(self._dxid, describe_input, **kwargs)
return self._desc | python | def describe(self, incl_properties=False, incl_details=False, fields=None, default_fields=None, **kwargs):
"""
:param fields: set of fields to include in the output, for
example ``{'name', 'modified'}``. The field ``id`` is always
implicitly included. If ``fields`` is specified, the default
fields are not included (that is, only the fields specified
here, and ``id``, are included) unless ``default_fields`` is
additionally set to True.
:type fields: set or sequence of str
:param default_fields: if True, include the default fields in
addition to fields requested in ``fields``, if any; if
False, only the fields specified in ``fields``, if any, are
returned (defaults to False if ``fields`` is specified, True
otherwise)
:type default_fields: bool
:param incl_properties: if true, includes the properties of the
object in the output (deprecated; use
``fields={'properties'}, default_fields=True`` instead)
:type incl_properties: bool
:param incl_details: if true, includes the details of the object
in the output (deprecated; use ``fields={'details'},
default_fields=True`` instead)
:type incl_details: bool
:returns: Description of the remote object
:rtype: dict
Return a dict with a description of the remote data object.
The result includes the key-value pairs as specified in the API
documentation for the ``/describe`` method of each data object
class. The API defines some default set of fields that will be
included (at a minimum, "id", "class", etc. should be available,
and there may be additional fields that vary based on the
class); the set of fields may be customized using ``fields`` and
``default_fields``.
Any project-specific metadata fields (name, properties, and
tags) are obtained from the copy of the object in the project
associated with the handler, if possible.
"""
if self._dxid is None:
raise DXError('This {handler} handler has not been initialized with a {_class} ID and cannot be described'.format(
handler=self.__class__.__name__,
_class=self._class)
)
if (incl_properties or incl_details) and (fields is not None or default_fields is not None):
raise ValueError('Cannot specify properties or details in conjunction with fields or default_fields')
if incl_properties or incl_details:
describe_input = dict(properties=incl_properties, details=incl_details)
else:
describe_input = {}
if default_fields is not None:
describe_input['defaultFields'] = default_fields
if fields is not None:
describe_input['fields'] = {field_name: True for field_name in fields}
if self._proj is not None:
describe_input["project"] = self._proj
self._desc = self._describe(self._dxid, describe_input, **kwargs)
return self._desc | :param fields: set of fields to include in the output, for
example ``{'name', 'modified'}``. The field ``id`` is always
implicitly included. If ``fields`` is specified, the default
fields are not included (that is, only the fields specified
here, and ``id``, are included) unless ``default_fields`` is
additionally set to True.
:type fields: set or sequence of str
:param default_fields: if True, include the default fields in
addition to fields requested in ``fields``, if any; if
False, only the fields specified in ``fields``, if any, are
returned (defaults to False if ``fields`` is specified, True
otherwise)
:type default_fields: bool
:param incl_properties: if true, includes the properties of the
object in the output (deprecated; use
``fields={'properties'}, default_fields=True`` instead)
:type incl_properties: bool
:param incl_details: if true, includes the details of the object
in the output (deprecated; use ``fields={'details'},
default_fields=True`` instead)
:type incl_details: bool
:returns: Description of the remote object
:rtype: dict
Return a dict with a description of the remote data object.
The result includes the key-value pairs as specified in the API
documentation for the ``/describe`` method of each data object
class. The API defines some default set of fields that will be
included (at a minimum, "id", "class", etc. should be available,
and there may be additional fields that vary based on the
class); the set of fields may be customized using ``fields`` and
``default_fields``.
Any project-specific metadata fields (name, properties, and
tags) are obtained from the copy of the object in the project
associated with the handler, if possible. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L311-L376 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.add_types | def add_types(self, types, **kwargs):
"""
:param types: Types to add to the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Adds each of the specified types to the remote object. Takes no
action for types that are already listed for the object.
"""
self._add_types(self._dxid, {"types": types}, **kwargs) | python | def add_types(self, types, **kwargs):
"""
:param types: Types to add to the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Adds each of the specified types to the remote object. Takes no
action for types that are already listed for the object.
"""
self._add_types(self._dxid, {"types": types}, **kwargs) | :param types: Types to add to the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Adds each of the specified types to the remote object. Takes no
action for types that are already listed for the object. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L378-L389 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.remove_types | def remove_types(self, types, **kwargs):
"""
:param types: Types to remove from the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Removes each the specified types from the remote object. Takes
no action for types that the object does not currently have.
"""
self._remove_types(self._dxid, {"types": types}, **kwargs) | python | def remove_types(self, types, **kwargs):
"""
:param types: Types to remove from the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Removes each the specified types from the remote object. Takes
no action for types that the object does not currently have.
"""
self._remove_types(self._dxid, {"types": types}, **kwargs) | :param types: Types to remove from the object
:type types: list of strings
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Removes each the specified types from the remote object. Takes
no action for types that the object does not currently have. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L391-L402 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.set_details | def set_details(self, details, **kwargs):
"""
:param details: Details to set for the object
:type details: dict or list
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Sets the details for the remote object with the specified value.
If the input contains the string ``"$dnanexus_link"`` as a key
in a hash, it must be the only key in the hash, and its value
must be a valid ID of an existing object.
"""
return self._set_details(self._dxid, details, **kwargs) | python | def set_details(self, details, **kwargs):
"""
:param details: Details to set for the object
:type details: dict or list
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Sets the details for the remote object with the specified value.
If the input contains the string ``"$dnanexus_link"`` as a key
in a hash, it must be the only key in the hash, and its value
must be a valid ID of an existing object.
"""
return self._set_details(self._dxid, details, **kwargs) | :param details: Details to set for the object
:type details: dict or list
:raises: :class:`~dxpy.exceptions.DXAPIError` if the object is not in the "open" state
Sets the details for the remote object with the specified value.
If the input contains the string ``"$dnanexus_link"`` as a key
in a hash, it must be the only key in the hash, and its value
must be a valid ID of an existing object. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L413-L426 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.rename | def rename(self, name, **kwargs):
"""
:param name: New name for the object
:type name: string
Renames the remote object.
The name is changed on the copy of the object in the project
associated with the handler.
"""
return self._rename(self._dxid, {"project": self._proj,
"name": name}, **kwargs) | python | def rename(self, name, **kwargs):
"""
:param name: New name for the object
:type name: string
Renames the remote object.
The name is changed on the copy of the object in the project
associated with the handler.
"""
return self._rename(self._dxid, {"project": self._proj,
"name": name}, **kwargs) | :param name: New name for the object
:type name: string
Renames the remote object.
The name is changed on the copy of the object in the project
associated with the handler. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L448-L461 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.set_properties | def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the object for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
The properties are written to the copy of the object in the
project associated with the handler.
The following example sets the properties for "name" and
"project" for a remote file::
dxfile.set_properties({"name": "George", "project": "cancer"})
Subsequently, the following would delete the property "project"::
dxfile.set_properties({"project": None})
"""
self._set_properties(self._dxid, {"project": self._proj,
"properties": properties},
**kwargs) | python | def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the object for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
The properties are written to the copy of the object in the
project associated with the handler.
The following example sets the properties for "name" and
"project" for a remote file::
dxfile.set_properties({"name": "George", "project": "cancer"})
Subsequently, the following would delete the property "project"::
dxfile.set_properties({"project": None})
"""
self._set_properties(self._dxid, {"project": self._proj,
"properties": properties},
**kwargs) | :param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the object for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
The properties are written to the copy of the object in the
project associated with the handler.
The following example sets the properties for "name" and
"project" for a remote file::
dxfile.set_properties({"name": "George", "project": "cancer"})
Subsequently, the following would delete the property "project"::
dxfile.set_properties({"project": None}) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L476-L505 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.add_tags | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the object
:type tags: list of strings
Adds each of the specified tags to the remote object. Takes no
action for tags that are already listed for the object.
The tags are added to the copy of the object in the project
associated with the handler.
"""
self._add_tags(self._dxid, {"project": self._proj, "tags": tags},
**kwargs) | python | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the object
:type tags: list of strings
Adds each of the specified tags to the remote object. Takes no
action for tags that are already listed for the object.
The tags are added to the copy of the object in the project
associated with the handler.
"""
self._add_tags(self._dxid, {"project": self._proj, "tags": tags},
**kwargs) | :param tags: Tags to add to the object
:type tags: list of strings
Adds each of the specified tags to the remote object. Takes no
action for tags that are already listed for the object.
The tags are added to the copy of the object in the project
associated with the handler. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L507-L521 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.remove_tags | def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the object
:type tags: list of strings
Removes each of the specified tags from the remote object. Takes
no action for tags that the object does not currently have.
The tags are removed from the copy of the object in the project
associated with the handler.
"""
self._remove_tags(self._dxid, {"project": self._proj, "tags": tags},
**kwargs) | python | def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the object
:type tags: list of strings
Removes each of the specified tags from the remote object. Takes
no action for tags that the object does not currently have.
The tags are removed from the copy of the object in the project
associated with the handler.
"""
self._remove_tags(self._dxid, {"project": self._proj, "tags": tags},
**kwargs) | :param tags: Tags to remove from the object
:type tags: list of strings
Removes each of the specified tags from the remote object. Takes
no action for tags that the object does not currently have.
The tags are removed from the copy of the object in the project
associated with the handler. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L523-L537 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.remove | def remove(self, **kwargs):
'''
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Permanently removes the associated remote object from the
associated project.
'''
if self._proj is None:
raise DXError("Remove called when a project ID was not associated with this object handler")
dxpy.api.project_remove_objects(self._proj, {"objects": [self._dxid]},
**kwargs)
# Reset internal state
self._dxid = None
self._proj = None
self._desc = {} | python | def remove(self, **kwargs):
'''
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Permanently removes the associated remote object from the
associated project.
'''
if self._proj is None:
raise DXError("Remove called when a project ID was not associated with this object handler")
dxpy.api.project_remove_objects(self._proj, {"objects": [self._dxid]},
**kwargs)
# Reset internal state
self._dxid = None
self._proj = None
self._desc = {} | :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Permanently removes the associated remote object from the
associated project. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L559-L576 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.move | def move(self, folder, **kwargs):
'''
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Moves the associated remote object to *folder*.
'''
if self._proj is None:
raise DXError("Move called when a project ID was not associated with this object handler")
dxpy.api.project_move(self._proj, {"objects": [self._dxid],
"destination": folder},
**kwargs) | python | def move(self, folder, **kwargs):
'''
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Moves the associated remote object to *folder*.
'''
if self._proj is None:
raise DXError("Move called when a project ID was not associated with this object handler")
dxpy.api.project_move(self._proj, {"objects": [self._dxid],
"destination": folder},
**kwargs) | :param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
Moves the associated remote object to *folder*. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L578-L593 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/__init__.py | DXDataObject.clone | def clone(self, project, folder="/", **kwargs):
'''
:param project: Destination project ID
:type project: string
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
:returns: An object handler for the new cloned object
:rtype: :class:`DXDataObject`
Clones the associated remote object to *folder* in *project* and
returns an object handler for the new object in the destination
project.
'''
if self._proj is None:
raise DXError("Clone called when a project ID was not associated with this object handler")
dxpy.api.project_clone(self._proj,
{"objects": [self._dxid],
"project": project,
"destination": folder},
**kwargs)
cloned_copy = copy.copy(self)
cloned_copy.set_ids(cloned_copy.get_id(), project)
return cloned_copy | python | def clone(self, project, folder="/", **kwargs):
'''
:param project: Destination project ID
:type project: string
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
:returns: An object handler for the new cloned object
:rtype: :class:`DXDataObject`
Clones the associated remote object to *folder* in *project* and
returns an object handler for the new object in the destination
project.
'''
if self._proj is None:
raise DXError("Clone called when a project ID was not associated with this object handler")
dxpy.api.project_clone(self._proj,
{"objects": [self._dxid],
"project": project,
"destination": folder},
**kwargs)
cloned_copy = copy.copy(self)
cloned_copy.set_ids(cloned_copy.get_id(), project)
return cloned_copy | :param project: Destination project ID
:type project: string
:param folder: Folder route to which to move the object
:type folder: string
:raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object
:returns: An object handler for the new cloned object
:rtype: :class:`DXDataObject`
Clones the associated remote object to *folder* in *project* and
returns an object handler for the new object in the destination
project. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L596-L622 |
dnanexus/dx-toolkit | src/python/dxpy/utils/config.py | DXConfig.get_session_conf_dir | def get_session_conf_dir(self, cleanup=False):
"""
Tries to find the session configuration directory by looking in ~/.dnanexus_config/sessions/<PID>,
where <PID> is pid of the parent of this process, then its parent, and so on.
If none of those exist, the path for the immediate parent is given, even if it doesn't exist.
If *cleanup* is True, looks up and deletes all session configuration directories that belong to nonexistent
processes.
"""
sessions_dir = os.path.join(self._user_conf_dir, "sessions")
try:
from psutil import Process, pid_exists
if cleanup:
try:
session_dirs = os.listdir(sessions_dir)
except OSError as e:
# Silently skip cleanup and continue if we are unable to
# enumerate the session directories for any reason
# (including, most commonly, because the sessions dir
# doesn't exist)
session_dirs = []
for session_dir in session_dirs:
try:
session_pid = int(session_dir)
except ValueError:
# If dir name doesn't look like an int, leave it
# alone
continue
if not pid_exists(session_pid):
rmtree(os.path.join(sessions_dir, session_dir), ignore_errors=True)
parent_process = Process(os.getpid()).parent()
default_session_dir = os.path.join(sessions_dir, str(parent_process.pid))
while parent_process is not None and parent_process.pid != 0:
session_dir = os.path.join(sessions_dir, str(parent_process.pid))
if os.path.exists(session_dir):
return session_dir
parent_process = parent_process.parent()
return default_session_dir
except (ImportError, IOError, AttributeError) as e:
# We don't bundle psutil with Windows, so failure to import
# psutil would be expected.
if platform.system() != 'Windows':
warn(fill("Error while retrieving session configuration: " + format_exception(e)))
except Exception as e:
warn(fill("Unexpected error while retrieving session configuration: " + format_exception(e)))
return self._get_ppid_session_conf_dir(sessions_dir) | python | def get_session_conf_dir(self, cleanup=False):
"""
Tries to find the session configuration directory by looking in ~/.dnanexus_config/sessions/<PID>,
where <PID> is pid of the parent of this process, then its parent, and so on.
If none of those exist, the path for the immediate parent is given, even if it doesn't exist.
If *cleanup* is True, looks up and deletes all session configuration directories that belong to nonexistent
processes.
"""
sessions_dir = os.path.join(self._user_conf_dir, "sessions")
try:
from psutil import Process, pid_exists
if cleanup:
try:
session_dirs = os.listdir(sessions_dir)
except OSError as e:
# Silently skip cleanup and continue if we are unable to
# enumerate the session directories for any reason
# (including, most commonly, because the sessions dir
# doesn't exist)
session_dirs = []
for session_dir in session_dirs:
try:
session_pid = int(session_dir)
except ValueError:
# If dir name doesn't look like an int, leave it
# alone
continue
if not pid_exists(session_pid):
rmtree(os.path.join(sessions_dir, session_dir), ignore_errors=True)
parent_process = Process(os.getpid()).parent()
default_session_dir = os.path.join(sessions_dir, str(parent_process.pid))
while parent_process is not None and parent_process.pid != 0:
session_dir = os.path.join(sessions_dir, str(parent_process.pid))
if os.path.exists(session_dir):
return session_dir
parent_process = parent_process.parent()
return default_session_dir
except (ImportError, IOError, AttributeError) as e:
# We don't bundle psutil with Windows, so failure to import
# psutil would be expected.
if platform.system() != 'Windows':
warn(fill("Error while retrieving session configuration: " + format_exception(e)))
except Exception as e:
warn(fill("Unexpected error while retrieving session configuration: " + format_exception(e)))
return self._get_ppid_session_conf_dir(sessions_dir) | Tries to find the session configuration directory by looking in ~/.dnanexus_config/sessions/<PID>,
where <PID> is pid of the parent of this process, then its parent, and so on.
If none of those exist, the path for the immediate parent is given, even if it doesn't exist.
If *cleanup* is True, looks up and deletes all session configuration directories that belong to nonexistent
processes. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/config.py#L161-L208 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxdataobject_functions.py | dxlink | def dxlink(object_id, project_id=None, field=None):
'''
:param object_id: Object ID or the object handler itself
:type object_id: string or :class:`~dxpy.bindings.DXDataObject`
:param project_id: A project ID, if creating a cross-project DXLink
:type project_id: string
:param field: A field name, if creating a job-based object reference
:type field: string
:returns: A dict formatted as a symbolic DNAnexus object reference
:rtype: dict
Creates a DXLink to the specified object.
If `object_id` is already a link, it is returned without modification.
If `object_id is a `~dxpy.bindings.DXDataObject`, the object ID is
retrieved via its `get_id()` method.
If `field` is not `None`, `object_id` is expected to be of class 'job'
and the link created is a Job Based Object Reference (JBOR), which is
of the form::
{'$dnanexus_link': {'job': object_id, 'field': field}}
If `field` is `None` and `project_id` is not `None`, the link created
is a project-specific link of the form::
{'$dnanexus_link': {'project': project_id, 'id': object_id}}
'''
if is_dxlink(object_id):
return object_id
if isinstance(object_id, DXDataObject):
object_id = object_id.get_id()
if not any((project_id, field)):
return {'$dnanexus_link': object_id}
elif field:
dxpy.verify_string_dxid(object_id, "job")
return {'$dnanexus_link': {'job': object_id, 'field': field}}
else:
return {'$dnanexus_link': {'project': project_id, 'id': object_id}} | python | def dxlink(object_id, project_id=None, field=None):
'''
:param object_id: Object ID or the object handler itself
:type object_id: string or :class:`~dxpy.bindings.DXDataObject`
:param project_id: A project ID, if creating a cross-project DXLink
:type project_id: string
:param field: A field name, if creating a job-based object reference
:type field: string
:returns: A dict formatted as a symbolic DNAnexus object reference
:rtype: dict
Creates a DXLink to the specified object.
If `object_id` is already a link, it is returned without modification.
If `object_id is a `~dxpy.bindings.DXDataObject`, the object ID is
retrieved via its `get_id()` method.
If `field` is not `None`, `object_id` is expected to be of class 'job'
and the link created is a Job Based Object Reference (JBOR), which is
of the form::
{'$dnanexus_link': {'job': object_id, 'field': field}}
If `field` is `None` and `project_id` is not `None`, the link created
is a project-specific link of the form::
{'$dnanexus_link': {'project': project_id, 'id': object_id}}
'''
if is_dxlink(object_id):
return object_id
if isinstance(object_id, DXDataObject):
object_id = object_id.get_id()
if not any((project_id, field)):
return {'$dnanexus_link': object_id}
elif field:
dxpy.verify_string_dxid(object_id, "job")
return {'$dnanexus_link': {'job': object_id, 'field': field}}
else:
return {'$dnanexus_link': {'project': project_id, 'id': object_id}} | :param object_id: Object ID or the object handler itself
:type object_id: string or :class:`~dxpy.bindings.DXDataObject`
:param project_id: A project ID, if creating a cross-project DXLink
:type project_id: string
:param field: A field name, if creating a job-based object reference
:type field: string
:returns: A dict formatted as a symbolic DNAnexus object reference
:rtype: dict
Creates a DXLink to the specified object.
If `object_id` is already a link, it is returned without modification.
If `object_id is a `~dxpy.bindings.DXDataObject`, the object ID is
retrieved via its `get_id()` method.
If `field` is not `None`, `object_id` is expected to be of class 'job'
and the link created is a Job Based Object Reference (JBOR), which is
of the form::
{'$dnanexus_link': {'job': object_id, 'field': field}}
If `field` is `None` and `project_id` is not `None`, the link created
is a project-specific link of the form::
{'$dnanexus_link': {'project': project_id, 'id': object_id}} | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxdataobject_functions.py#L37-L76 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxdataobject_functions.py | is_dxlink | def is_dxlink(x):
'''
:param x: A potential DNAnexus link
Returns whether *x* appears to be a DNAnexus link (is a dict with
key ``"$dnanexus_link"``) with a referenced data object.
'''
if not isinstance(x, dict):
return False
if '$dnanexus_link' not in x:
return False
link = x['$dnanexus_link']
if isinstance(link, basestring):
return True
elif isinstance(link, dict):
return any(key in link for key in ('id', 'job'))
return False | python | def is_dxlink(x):
'''
:param x: A potential DNAnexus link
Returns whether *x* appears to be a DNAnexus link (is a dict with
key ``"$dnanexus_link"``) with a referenced data object.
'''
if not isinstance(x, dict):
return False
if '$dnanexus_link' not in x:
return False
link = x['$dnanexus_link']
if isinstance(link, basestring):
return True
elif isinstance(link, dict):
return any(key in link for key in ('id', 'job'))
return False | :param x: A potential DNAnexus link
Returns whether *x* appears to be a DNAnexus link (is a dict with
key ``"$dnanexus_link"``) with a referenced data object. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxdataobject_functions.py#L78-L94 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxdataobject_functions.py | get_dxlink_ids | def get_dxlink_ids(link):
'''
:param link: A DNAnexus link
:type link: dict
:returns: (Object ID, Project ID) if the link is to a data object (or :const:`None`
if no project specified in the link), or (Job ID, Field) if the link is
a job-based object reference (JBOR).
:rtype: tuple
Get the object ID and detail from a link. There are three types of links:
* Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns
``("file-XXXX", None)``.
* Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX",
"project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``.
* Job-based object reference (JBOR) of the form ``{"$dnanexus_link":
{"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``.
'''
if not is_dxlink(link):
raise DXError('Invalid link: %r' % link)
if isinstance(link['$dnanexus_link'], basestring):
return link['$dnanexus_link'], None
elif 'id' in link['$dnanexus_link']:
return link['$dnanexus_link']['id'], link['$dnanexus_link'].get('project')
else:
return link['$dnanexus_link']['job'], link['$dnanexus_link']['field'] | python | def get_dxlink_ids(link):
'''
:param link: A DNAnexus link
:type link: dict
:returns: (Object ID, Project ID) if the link is to a data object (or :const:`None`
if no project specified in the link), or (Job ID, Field) if the link is
a job-based object reference (JBOR).
:rtype: tuple
Get the object ID and detail from a link. There are three types of links:
* Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns
``("file-XXXX", None)``.
* Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX",
"project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``.
* Job-based object reference (JBOR) of the form ``{"$dnanexus_link":
{"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``.
'''
if not is_dxlink(link):
raise DXError('Invalid link: %r' % link)
if isinstance(link['$dnanexus_link'], basestring):
return link['$dnanexus_link'], None
elif 'id' in link['$dnanexus_link']:
return link['$dnanexus_link']['id'], link['$dnanexus_link'].get('project')
else:
return link['$dnanexus_link']['job'], link['$dnanexus_link']['field'] | :param link: A DNAnexus link
:type link: dict
:returns: (Object ID, Project ID) if the link is to a data object (or :const:`None`
if no project specified in the link), or (Job ID, Field) if the link is
a job-based object reference (JBOR).
:rtype: tuple
Get the object ID and detail from a link. There are three types of links:
* Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns
``("file-XXXX", None)``.
* Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX",
"project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``.
* Job-based object reference (JBOR) of the form ``{"$dnanexus_link":
{"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxdataobject_functions.py#L96-L121 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxdataobject_functions.py | get_handler | def get_handler(id_or_link, project=None):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234")
'''
try:
cls = _guess_link_target_type(id_or_link)
except Exception as e:
raise DXError("Could not parse link {}: {}".format(id_or_link, e))
if cls in [dxpy.DXApp, dxpy.DXGlobalWorkflow]:
# This special case should translate identifiers of the form
# "app-name" or "app-name/version_or_tag" to the appropriate
# arguments
if dxpy.utils.resolver.is_hashid(id_or_link):
return cls(id_or_link)
else:
slash_pos = id_or_link.find('/')
dash_pos = id_or_link.find('-')
if slash_pos == -1:
return cls(name=id_or_link[dash_pos+1:])
else:
return cls(name=id_or_link[dash_pos+1:slash_pos],
alias=id_or_link[slash_pos + 1:])
elif project is None or cls in [dxpy.DXJob, dxpy.DXAnalysis, dxpy.DXProject, dxpy.DXContainer]:
# This case is important for the handlers which do not
# take a project field
return cls(id_or_link)
else:
return cls(id_or_link, project=project) | python | def get_handler(id_or_link, project=None):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234")
'''
try:
cls = _guess_link_target_type(id_or_link)
except Exception as e:
raise DXError("Could not parse link {}: {}".format(id_or_link, e))
if cls in [dxpy.DXApp, dxpy.DXGlobalWorkflow]:
# This special case should translate identifiers of the form
# "app-name" or "app-name/version_or_tag" to the appropriate
# arguments
if dxpy.utils.resolver.is_hashid(id_or_link):
return cls(id_or_link)
else:
slash_pos = id_or_link.find('/')
dash_pos = id_or_link.find('-')
if slash_pos == -1:
return cls(name=id_or_link[dash_pos+1:])
else:
return cls(name=id_or_link[dash_pos+1:slash_pos],
alias=id_or_link[slash_pos + 1:])
elif project is None or cls in [dxpy.DXJob, dxpy.DXAnalysis, dxpy.DXProject, dxpy.DXContainer]:
# This case is important for the handlers which do not
# take a project field
return cls(id_or_link)
else:
return cls(id_or_link, project=project) | :param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234") | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxdataobject_functions.py#L136-L174 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxdataobject_functions.py | describe | def describe(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method.
'''
# If this is a list, extract the ids.
# TODO: modify the procedure to use project ID when possible
if isinstance(id_or_link, basestring) or is_dxlink(id_or_link):
handler = get_handler(id_or_link)
return handler.describe(**kwargs)
else:
links = []
for link in id_or_link:
# If this entry is a dxlink, then get the id.
if is_dxlink(link):
# Guaranteed by is_dxlink that one of the following will work
if isinstance(link['$dnanexus_link'], basestring):
link = link['$dnanexus_link']
else:
link = link['$dnanexus_link']['id']
links.append(link)
# Prepare input to system_describe_data_objects, the same fields will be passed
# for all data object classes; if a class doesn't include a field in its describe
# output, it will be ignored
describe_input = \
dict([(field, True) for field in kwargs['fields']]) if kwargs.get('fields', []) else True
describe_links_input = [{'id': link, 'describe': describe_input} for link in links]
bulk_describe_input = {'objects': describe_links_input}
if 'classDescribeOptions' in kwargs:
bulk_describe_input['classDescribeOptions'] = kwargs['classDescribeOptions']
data_object_descriptions = dxpy.api.system_describe_data_objects(bulk_describe_input)
return [desc['describe'] for desc in data_object_descriptions['results']] | python | def describe(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method.
'''
# If this is a list, extract the ids.
# TODO: modify the procedure to use project ID when possible
if isinstance(id_or_link, basestring) or is_dxlink(id_or_link):
handler = get_handler(id_or_link)
return handler.describe(**kwargs)
else:
links = []
for link in id_or_link:
# If this entry is a dxlink, then get the id.
if is_dxlink(link):
# Guaranteed by is_dxlink that one of the following will work
if isinstance(link['$dnanexus_link'], basestring):
link = link['$dnanexus_link']
else:
link = link['$dnanexus_link']['id']
links.append(link)
# Prepare input to system_describe_data_objects, the same fields will be passed
# for all data object classes; if a class doesn't include a field in its describe
# output, it will be ignored
describe_input = \
dict([(field, True) for field in kwargs['fields']]) if kwargs.get('fields', []) else True
describe_links_input = [{'id': link, 'describe': describe_input} for link in links]
bulk_describe_input = {'objects': describe_links_input}
if 'classDescribeOptions' in kwargs:
bulk_describe_input['classDescribeOptions'] = kwargs['classDescribeOptions']
data_object_descriptions = dxpy.api.system_describe_data_objects(bulk_describe_input)
return [desc['describe'] for desc in data_object_descriptions['results']] | :param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/describeDataObjects
for input parameters used with the multiple object describe method. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxdataobject_functions.py#L176-L229 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.describe | def describe(self, **kwargs):
"""
:returns: A hash containing attributes of the project or container.
:rtype: dict
Returns a hash with key-value pairs as specified by the API
specification for the `/project-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdescribe>`_
method. This will usually include keys such as "id", "name",
"class", "billTo", "created", "modified", and "dataUsage".
"""
# TODO: link to /container-xxxx/describe
api_method = dxpy.api.container_describe
if isinstance(self, DXProject):
api_method = dxpy.api.project_describe
self._desc = api_method(self._dxid, **kwargs)
return self._desc | python | def describe(self, **kwargs):
"""
:returns: A hash containing attributes of the project or container.
:rtype: dict
Returns a hash with key-value pairs as specified by the API
specification for the `/project-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdescribe>`_
method. This will usually include keys such as "id", "name",
"class", "billTo", "created", "modified", and "dataUsage".
"""
# TODO: link to /container-xxxx/describe
api_method = dxpy.api.container_describe
if isinstance(self, DXProject):
api_method = dxpy.api.project_describe
self._desc = api_method(self._dxid, **kwargs)
return self._desc | :returns: A hash containing attributes of the project or container.
:rtype: dict
Returns a hash with key-value pairs as specified by the API
specification for the `/project-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdescribe>`_
method. This will usually include keys such as "id", "name",
"class", "billTo", "created", "modified", and "dataUsage". | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L58-L75 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.new_folder | def new_folder(self, folder, parents=False, **kwargs):
"""
:param folder: Full path to the new folder to create
:type folder: string
:param parents: If True, recursively create any parent folders that are missing
:type parents: boolean
Creates a new folder in the project or container.
"""
api_method = dxpy.api.container_new_folder
if isinstance(self, DXProject):
api_method = dxpy.api.project_new_folder
api_method(self._dxid, {"folder": folder,
"parents": parents},
**kwargs) | python | def new_folder(self, folder, parents=False, **kwargs):
"""
:param folder: Full path to the new folder to create
:type folder: string
:param parents: If True, recursively create any parent folders that are missing
:type parents: boolean
Creates a new folder in the project or container.
"""
api_method = dxpy.api.container_new_folder
if isinstance(self, DXProject):
api_method = dxpy.api.project_new_folder
api_method(self._dxid, {"folder": folder,
"parents": parents},
**kwargs) | :param folder: Full path to the new folder to create
:type folder: string
:param parents: If True, recursively create any parent folders that are missing
:type parents: boolean
Creates a new folder in the project or container. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L77-L93 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.list_folder | def list_folder(self, folder="/", describe=False, only="all", includeHidden=False, **kwargs):
"""
:param folder: Full path to the folder to list
:type folder: string
:param describe: If True, returns the output of ``/describe`` on each object (see below for notes)
:type describe: bool or dict
:param only: Indicate "objects" for only objects, "folders" for only folders, or "all" for both
:type only: string
:param includeHidden: Indicate whether hidden objects should be returned
:type includeHidden: bool
:returns: A hash with key "objects" for the list of object IDs and key "folders" for the list of folder routes
:rtype: dict
Returns a hash containing a list of objects that reside directly
inside the specified folder, and a list of strings representing
the full paths to folders that reside directly inside the
specified folder.
By default, the list of objects is provided as a list containing
one hash ``{"id": "class-XXXX"}`` with the ID of each matching
object. If *describe* is not False, the output of ``/describe``
is also included in an additional field "describe" for each
object. If *describe* is True, ``/describe`` is called with the
default arguments. *describe* may also be a hash, indicating the
input hash to be supplied to each ``/describe`` call.
"""
# TODO: it would be nice if we could supply describe
# fields/defaultFields in a similar way to what we pass to the
# high-level describe method, rather than having to construct
# the literal API input
api_method = dxpy.api.container_list_folder
if isinstance(self, DXProject):
api_method = dxpy.api.project_list_folder
return api_method(self._dxid, {"folder": folder,
"describe": describe,
"only": only,
"includeHidden": includeHidden},
**kwargs) | python | def list_folder(self, folder="/", describe=False, only="all", includeHidden=False, **kwargs):
"""
:param folder: Full path to the folder to list
:type folder: string
:param describe: If True, returns the output of ``/describe`` on each object (see below for notes)
:type describe: bool or dict
:param only: Indicate "objects" for only objects, "folders" for only folders, or "all" for both
:type only: string
:param includeHidden: Indicate whether hidden objects should be returned
:type includeHidden: bool
:returns: A hash with key "objects" for the list of object IDs and key "folders" for the list of folder routes
:rtype: dict
Returns a hash containing a list of objects that reside directly
inside the specified folder, and a list of strings representing
the full paths to folders that reside directly inside the
specified folder.
By default, the list of objects is provided as a list containing
one hash ``{"id": "class-XXXX"}`` with the ID of each matching
object. If *describe* is not False, the output of ``/describe``
is also included in an additional field "describe" for each
object. If *describe* is True, ``/describe`` is called with the
default arguments. *describe* may also be a hash, indicating the
input hash to be supplied to each ``/describe`` call.
"""
# TODO: it would be nice if we could supply describe
# fields/defaultFields in a similar way to what we pass to the
# high-level describe method, rather than having to construct
# the literal API input
api_method = dxpy.api.container_list_folder
if isinstance(self, DXProject):
api_method = dxpy.api.project_list_folder
return api_method(self._dxid, {"folder": folder,
"describe": describe,
"only": only,
"includeHidden": includeHidden},
**kwargs) | :param folder: Full path to the folder to list
:type folder: string
:param describe: If True, returns the output of ``/describe`` on each object (see below for notes)
:type describe: bool or dict
:param only: Indicate "objects" for only objects, "folders" for only folders, or "all" for both
:type only: string
:param includeHidden: Indicate whether hidden objects should be returned
:type includeHidden: bool
:returns: A hash with key "objects" for the list of object IDs and key "folders" for the list of folder routes
:rtype: dict
Returns a hash containing a list of objects that reside directly
inside the specified folder, and a list of strings representing
the full paths to folders that reside directly inside the
specified folder.
By default, the list of objects is provided as a list containing
one hash ``{"id": "class-XXXX"}`` with the ID of each matching
object. If *describe* is not False, the output of ``/describe``
is also included in an additional field "describe" for each
object. If *describe* is True, ``/describe`` is called with the
default arguments. *describe* may also be a hash, indicating the
input hash to be supplied to each ``/describe`` call. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L95-L135 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.move | def move(self, destination, objects=[], folders=[], **kwargs):
"""
:param destination: Path of destination folder
:type destination: string
:param objects: List of object IDs to move
:type objects: list of strings
:param folders: List of full paths to folders to move
:type folders: list of strings
Moves the specified objects and folders into the folder
represented by *destination*. Moving a folder also moves all
contained folders and objects. If an object or folder is
explicitly specified but also appears inside another specified
folder, it will be removed from its parent folder and placed
directly in *destination*.
"""
api_method = dxpy.api.container_move
if isinstance(self, DXProject):
api_method = dxpy.api.project_move
api_method(self._dxid, {"objects": objects,
"folders": folders,
"destination": destination},
**kwargs) | python | def move(self, destination, objects=[], folders=[], **kwargs):
"""
:param destination: Path of destination folder
:type destination: string
:param objects: List of object IDs to move
:type objects: list of strings
:param folders: List of full paths to folders to move
:type folders: list of strings
Moves the specified objects and folders into the folder
represented by *destination*. Moving a folder also moves all
contained folders and objects. If an object or folder is
explicitly specified but also appears inside another specified
folder, it will be removed from its parent folder and placed
directly in *destination*.
"""
api_method = dxpy.api.container_move
if isinstance(self, DXProject):
api_method = dxpy.api.project_move
api_method(self._dxid, {"objects": objects,
"folders": folders,
"destination": destination},
**kwargs) | :param destination: Path of destination folder
:type destination: string
:param objects: List of object IDs to move
:type objects: list of strings
:param folders: List of full paths to folders to move
:type folders: list of strings
Moves the specified objects and folders into the folder
represented by *destination*. Moving a folder also moves all
contained folders and objects. If an object or folder is
explicitly specified but also appears inside another specified
folder, it will be removed from its parent folder and placed
directly in *destination*. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L137-L161 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.move_folder | def move_folder(self, folder, destination, **kwargs):
"""
:param folder: Full path to the folder to move
:type folder: string
:param destination: Full path to the destination folder that will contain *folder*
:type destination: string
Moves *folder* to reside in *destination* in the same project or
container. All objects and subfolders inside *folder* are also
moved.
"""
api_method = dxpy.api.container_move
if isinstance(self, DXProject):
api_method = dxpy.api.project_move
api_method(self._dxid, {"folders": [folder],
"destination": destination},
**kwargs) | python | def move_folder(self, folder, destination, **kwargs):
"""
:param folder: Full path to the folder to move
:type folder: string
:param destination: Full path to the destination folder that will contain *folder*
:type destination: string
Moves *folder* to reside in *destination* in the same project or
container. All objects and subfolders inside *folder* are also
moved.
"""
api_method = dxpy.api.container_move
if isinstance(self, DXProject):
api_method = dxpy.api.project_move
api_method(self._dxid, {"folders": [folder],
"destination": destination},
**kwargs) | :param folder: Full path to the folder to move
:type folder: string
:param destination: Full path to the destination folder that will contain *folder*
:type destination: string
Moves *folder* to reside in *destination* in the same project or
container. All objects and subfolders inside *folder* are also
moved. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L163-L181 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.remove_folder | def remove_folder(self, folder, recurse=False, force=False, **kwargs):
"""
:param folder: Full path to the folder to remove
:type folder: string
:param recurse: If True, recursively remove all objects and subfolders in the folder
:type recurse: bool
:param force: If True, will suppress errors for folders that do not exist
:type force: bool
Removes the specified folder from the project or container. It
must be empty to be removed, unless *recurse* is True.
Removal propagates to any hidden objects that become unreachable
from any visible object in the same project or container as a
result of this operation. (This can only happen if *recurse* is
True.)
"""
api_method = dxpy.api.container_remove_folder
if isinstance(self, DXProject):
api_method = dxpy.api.project_remove_folder
completed = False
while not completed:
resp = api_method(self._dxid,
{"folder": folder, "recurse": recurse, "force": force, "partial": True},
always_retry=force, # api call is idempotent under 'force' semantics
**kwargs)
if 'completed' not in resp:
raise DXError('Error removing folder')
completed = resp['completed'] | python | def remove_folder(self, folder, recurse=False, force=False, **kwargs):
"""
:param folder: Full path to the folder to remove
:type folder: string
:param recurse: If True, recursively remove all objects and subfolders in the folder
:type recurse: bool
:param force: If True, will suppress errors for folders that do not exist
:type force: bool
Removes the specified folder from the project or container. It
must be empty to be removed, unless *recurse* is True.
Removal propagates to any hidden objects that become unreachable
from any visible object in the same project or container as a
result of this operation. (This can only happen if *recurse* is
True.)
"""
api_method = dxpy.api.container_remove_folder
if isinstance(self, DXProject):
api_method = dxpy.api.project_remove_folder
completed = False
while not completed:
resp = api_method(self._dxid,
{"folder": folder, "recurse": recurse, "force": force, "partial": True},
always_retry=force, # api call is idempotent under 'force' semantics
**kwargs)
if 'completed' not in resp:
raise DXError('Error removing folder')
completed = resp['completed'] | :param folder: Full path to the folder to remove
:type folder: string
:param recurse: If True, recursively remove all objects and subfolders in the folder
:type recurse: bool
:param force: If True, will suppress errors for folders that do not exist
:type force: bool
Removes the specified folder from the project or container. It
must be empty to be removed, unless *recurse* is True.
Removal propagates to any hidden objects that become unreachable
from any visible object in the same project or container as a
result of this operation. (This can only happen if *recurse* is
True.) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L183-L213 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.remove_objects | def remove_objects(self, objects, force=False, **kwargs):
"""
:param objects: List of object IDs to remove from the project or container
:type objects: list of strings
:param force: If True, will suppress errors for objects that do not exist
:type force: bool
Removes the specified objects from the project or container.
Removal propagates to any hidden objects that become unreachable
from any visible object in the same project or container as a
result of this operation.
"""
api_method = dxpy.api.container_remove_objects
if isinstance(self, DXProject):
api_method = dxpy.api.project_remove_objects
api_method(self._dxid,
{"objects": objects, "force": force},
always_retry=force, # api call is idempotent under 'force' semantics
**kwargs) | python | def remove_objects(self, objects, force=False, **kwargs):
"""
:param objects: List of object IDs to remove from the project or container
:type objects: list of strings
:param force: If True, will suppress errors for objects that do not exist
:type force: bool
Removes the specified objects from the project or container.
Removal propagates to any hidden objects that become unreachable
from any visible object in the same project or container as a
result of this operation.
"""
api_method = dxpy.api.container_remove_objects
if isinstance(self, DXProject):
api_method = dxpy.api.project_remove_objects
api_method(self._dxid,
{"objects": objects, "force": force},
always_retry=force, # api call is idempotent under 'force' semantics
**kwargs) | :param objects: List of object IDs to remove from the project or container
:type objects: list of strings
:param force: If True, will suppress errors for objects that do not exist
:type force: bool
Removes the specified objects from the project or container.
Removal propagates to any hidden objects that become unreachable
from any visible object in the same project or container as a
result of this operation. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L215-L236 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXContainer.clone | def clone(self, container, destination="/", objects=[], folders=[], parents=False, **kwargs):
"""
:param container: Destination container ID
:type container: string
:param destination: Path of destination folder in the destination container
:type destination: string
:param objects: List of object IDs to move
:type objects: list of strings
:param folders: List of full paths to folders to move
:type folders: list of strings
:param parents: Whether the destination folder and/or parent folders should be created if they do not exist
:type parents: boolean
Clones (copies) the specified objects and folders in the
container into the folder *destination* in the container
*container*. Cloning a folder also clones all all folders and
objects it contains. If an object or folder is explicitly
specified but also appears inside another specified folder, it
will be removed from its parent folder and placed directly in
*destination*. No objects or folders are modified in the source
container.
Objects must be in the "closed" state to be cloned.
"""
api_method = dxpy.api.container_clone
if isinstance(self, DXProject):
api_method = dxpy.api.project_clone
return api_method(self._dxid,
{"objects": objects,
"folders": folders,
"project": container,
"destination": destination,
"parents": parents},
**kwargs) | python | def clone(self, container, destination="/", objects=[], folders=[], parents=False, **kwargs):
"""
:param container: Destination container ID
:type container: string
:param destination: Path of destination folder in the destination container
:type destination: string
:param objects: List of object IDs to move
:type objects: list of strings
:param folders: List of full paths to folders to move
:type folders: list of strings
:param parents: Whether the destination folder and/or parent folders should be created if they do not exist
:type parents: boolean
Clones (copies) the specified objects and folders in the
container into the folder *destination* in the container
*container*. Cloning a folder also clones all all folders and
objects it contains. If an object or folder is explicitly
specified but also appears inside another specified folder, it
will be removed from its parent folder and placed directly in
*destination*. No objects or folders are modified in the source
container.
Objects must be in the "closed" state to be cloned.
"""
api_method = dxpy.api.container_clone
if isinstance(self, DXProject):
api_method = dxpy.api.project_clone
return api_method(self._dxid,
{"objects": objects,
"folders": folders,
"project": container,
"destination": destination,
"parents": parents},
**kwargs) | :param container: Destination container ID
:type container: string
:param destination: Path of destination folder in the destination container
:type destination: string
:param objects: List of object IDs to move
:type objects: list of strings
:param folders: List of full paths to folders to move
:type folders: list of strings
:param parents: Whether the destination folder and/or parent folders should be created if they do not exist
:type parents: boolean
Clones (copies) the specified objects and folders in the
container into the folder *destination* in the container
*container*. Cloning a folder also clones all all folders and
objects it contains. If an object or folder is explicitly
specified but also appears inside another specified folder, it
will be removed from its parent folder and placed directly in
*destination*. No objects or folders are modified in the source
container.
Objects must be in the "closed" state to be cloned. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L238-L273 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXProject.new | def new(self, name, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, contains_phi=None, tags=None,
properties=None, bill_to=None, **kwargs):
"""
:param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info.
"""
input_hash = {}
input_hash["name"] = name
if summary is not None:
input_hash["summary"] = summary
if description is not None:
input_hash["description"] = description
if protected is not None:
input_hash["protected"] = protected
if restricted is not None:
input_hash["restricted"] = restricted
if download_restricted is not None:
input_hash["downloadRestricted"] = download_restricted
if contains_phi is not None:
input_hash["containsPHI"] = contains_phi
if bill_to is not None:
input_hash["billTo"] = bill_to
if tags is not None:
input_hash["tags"] = tags
if properties is not None:
input_hash["properties"] = properties
self.set_id(dxpy.api.project_new(input_hash, **kwargs)["id"])
self._desc = {}
return self._dxid | python | def new(self, name, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, contains_phi=None, tags=None,
properties=None, bill_to=None, **kwargs):
"""
:param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info.
"""
input_hash = {}
input_hash["name"] = name
if summary is not None:
input_hash["summary"] = summary
if description is not None:
input_hash["description"] = description
if protected is not None:
input_hash["protected"] = protected
if restricted is not None:
input_hash["restricted"] = restricted
if download_restricted is not None:
input_hash["downloadRestricted"] = download_restricted
if contains_phi is not None:
input_hash["containsPHI"] = contains_phi
if bill_to is not None:
input_hash["billTo"] = bill_to
if tags is not None:
input_hash["tags"] = tags
if properties is not None:
input_hash["properties"] = properties
self.set_id(dxpy.api.project_new(input_hash, **kwargs)["id"])
self._desc = {}
return self._dxid | :param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L284-L339 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXProject.update | def update(self, name=None, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, version=None, **kwargs):
"""
:param name: If provided, the new project name
:type name: string
:param summary: If provided, the new project summary
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param version: If provided, the update will only occur if the value matches the current project's version number
:type version: int
Updates the project with the new fields. All fields are
optional. Fields that are not provided are not changed.
See the API documentation for the `/project-xxxx/update
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fupdate>`_
method for more info.
"""
update_hash = {}
if name is not None:
update_hash["name"] = name
if summary is not None:
update_hash["summary"] = summary
if description is not None:
update_hash["description"] = description
if protected is not None:
update_hash["protected"] = protected
if restricted is not None:
update_hash["restricted"] = restricted
if download_restricted is not None:
update_hash["downloadRestricted"] = download_restricted
if version is not None:
update_hash["version"] = version
dxpy.api.project_update(self._dxid, update_hash, **kwargs) | python | def update(self, name=None, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, version=None, **kwargs):
"""
:param name: If provided, the new project name
:type name: string
:param summary: If provided, the new project summary
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param version: If provided, the update will only occur if the value matches the current project's version number
:type version: int
Updates the project with the new fields. All fields are
optional. Fields that are not provided are not changed.
See the API documentation for the `/project-xxxx/update
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fupdate>`_
method for more info.
"""
update_hash = {}
if name is not None:
update_hash["name"] = name
if summary is not None:
update_hash["summary"] = summary
if description is not None:
update_hash["description"] = description
if protected is not None:
update_hash["protected"] = protected
if restricted is not None:
update_hash["restricted"] = restricted
if download_restricted is not None:
update_hash["downloadRestricted"] = download_restricted
if version is not None:
update_hash["version"] = version
dxpy.api.project_update(self._dxid, update_hash, **kwargs) | :param name: If provided, the new project name
:type name: string
:param summary: If provided, the new project summary
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param version: If provided, the update will only occur if the value matches the current project's version number
:type version: int
Updates the project with the new fields. All fields are
optional. Fields that are not provided are not changed.
See the API documentation for the `/project-xxxx/update
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fupdate>`_
method for more info. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L341-L381 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXProject.invite | def invite(self, invitee, level, send_email=True, **kwargs):
"""
:param invitee: Username (of the form "user-USERNAME") or email address of person to be invited to the project; use "PUBLIC" to make the project publicly available (in which case level must be set to "VIEW").
:type invitee: string
:param level: Permissions level that the invitee would get ("VIEW", "UPLOAD", "CONTRIBUTE", or "ADMINISTER")
:type level: string
:param send_email: Determines whether user receives email notifications regarding the project invitation
:type send_email: boolean
Invites the specified user to have access to the project.
"""
return dxpy.api.project_invite(self._dxid,
{"invitee": invitee, "level": level,
"suppressEmailNotification": not send_email},
**kwargs) | python | def invite(self, invitee, level, send_email=True, **kwargs):
"""
:param invitee: Username (of the form "user-USERNAME") or email address of person to be invited to the project; use "PUBLIC" to make the project publicly available (in which case level must be set to "VIEW").
:type invitee: string
:param level: Permissions level that the invitee would get ("VIEW", "UPLOAD", "CONTRIBUTE", or "ADMINISTER")
:type level: string
:param send_email: Determines whether user receives email notifications regarding the project invitation
:type send_email: boolean
Invites the specified user to have access to the project.
"""
return dxpy.api.project_invite(self._dxid,
{"invitee": invitee, "level": level,
"suppressEmailNotification": not send_email},
**kwargs) | :param invitee: Username (of the form "user-USERNAME") or email address of person to be invited to the project; use "PUBLIC" to make the project publicly available (in which case level must be set to "VIEW").
:type invitee: string
:param level: Permissions level that the invitee would get ("VIEW", "UPLOAD", "CONTRIBUTE", or "ADMINISTER")
:type level: string
:param send_email: Determines whether user receives email notifications regarding the project invitation
:type send_email: boolean
Invites the specified user to have access to the project. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L383-L399 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXProject.decrease_perms | def decrease_perms(self, member, level, **kwargs):
"""
:param member: Username (of the form "user-USERNAME") of the project member whose permissions will be decreased.
:type member: string
:param level: Permissions level that the member will have after this operation (None, "VIEW", "UPLOAD", or "CONTRIBUTE")
:type level: string or None
Decreases the permissions that the specified user has in the project.
"""
input_hash = {}
input_hash[member] = level
return dxpy.api.project_decrease_permissions(self._dxid,
input_hash,
**kwargs) | python | def decrease_perms(self, member, level, **kwargs):
"""
:param member: Username (of the form "user-USERNAME") of the project member whose permissions will be decreased.
:type member: string
:param level: Permissions level that the member will have after this operation (None, "VIEW", "UPLOAD", or "CONTRIBUTE")
:type level: string or None
Decreases the permissions that the specified user has in the project.
"""
input_hash = {}
input_hash[member] = level
return dxpy.api.project_decrease_permissions(self._dxid,
input_hash,
**kwargs) | :param member: Username (of the form "user-USERNAME") of the project member whose permissions will be decreased.
:type member: string
:param level: Permissions level that the member will have after this operation (None, "VIEW", "UPLOAD", or "CONTRIBUTE")
:type level: string or None
Decreases the permissions that the specified user has in the project. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L401-L417 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxproject.py | DXProject.set_properties | def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the project for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
return dxpy.api.project_set_properties(self._dxid, {"properties": properties}, **kwargs) | python | def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the project for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
return dxpy.api.project_set_properties(self._dxid, {"properties": properties}, **kwargs) | :param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the project for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L426-L441 |
dnanexus/dx-toolkit | src/python/dxpy/exceptions.py | format_exception | def format_exception(e):
"""Returns a string containing the type and text of the exception.
"""
from .utils.printing import fill
return '\n'.join(fill(line) for line in traceback.format_exception_only(type(e), e)) | python | def format_exception(e):
"""Returns a string containing the type and text of the exception.
"""
from .utils.printing import fill
return '\n'.join(fill(line) for line in traceback.format_exception_only(type(e), e)) | Returns a string containing the type and text of the exception. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/exceptions.py#L185-L190 |
dnanexus/dx-toolkit | src/python/dxpy/exceptions.py | exit_with_exc_info | def exit_with_exc_info(code=1, message='', print_tb=False, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param message: Message to be printed after the exception information.
:type message: string
:param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it.
:type print_tb: boolean
:type exception: an exception to use in place of the last exception raised
'''
exc_type, exc_value = (exception.__class__, exception) \
if exception is not None else sys.exc_info()[:2]
if exc_type is not None:
if print_tb:
traceback.print_exc()
elif isinstance(exc_value, KeyboardInterrupt):
sys.stderr.write('^C\n')
else:
for line in traceback.format_exception_only(exc_type, exc_value):
sys.stderr.write(line)
sys.stderr.write(message)
if message != '' and not message.endswith('\n'):
sys.stderr.write('\n')
sys.exit(code) | python | def exit_with_exc_info(code=1, message='', print_tb=False, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param message: Message to be printed after the exception information.
:type message: string
:param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it.
:type print_tb: boolean
:type exception: an exception to use in place of the last exception raised
'''
exc_type, exc_value = (exception.__class__, exception) \
if exception is not None else sys.exc_info()[:2]
if exc_type is not None:
if print_tb:
traceback.print_exc()
elif isinstance(exc_value, KeyboardInterrupt):
sys.stderr.write('^C\n')
else:
for line in traceback.format_exception_only(exc_type, exc_value):
sys.stderr.write(line)
sys.stderr.write(message)
if message != '' and not message.endswith('\n'):
sys.stderr.write('\n')
sys.exit(code) | Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param message: Message to be printed after the exception information.
:type message: string
:param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it.
:type print_tb: boolean
:type exception: an exception to use in place of the last exception raised | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/exceptions.py#L193-L220 |
dnanexus/dx-toolkit | src/python/dxpy/exceptions.py | err_exit | def err_exit(message='', code=None, expected_exceptions=default_expected_exceptions, arg_parser=None,
ignore_sigpipe=True, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
Uses **expected_exceptions** to set the error code decide whether to
suppress the error traceback.
:param message: Message to be printed after the exception information.
:type message: string
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param expected_exceptions: Exceptions for which to exit with error code 3 (expected error condition) and suppress the stack trace (unless the _DX_DEBUG environment variable is set).
:type expected_exceptions: iterable
:param arg_parser: argparse.ArgumentParser object used in the program (optional)
:param ignore_sigpipe: Whether to exit silently with code 3 when IOError with code EPIPE is raised. Default true.
:type ignore_sigpipe: boolean
:param exception: an exception to use in place of the last exception raised
'''
if arg_parser is not None:
message = arg_parser.prog + ": " + message
exc = exception if exception is not None else sys.exc_info()[1]
if isinstance(exc, SystemExit):
raise exc
elif isinstance(exc, expected_exceptions):
exit_with_exc_info(EXPECTED_ERR_EXIT_STATUS, message, print_tb=dxpy._DEBUG > 0, exception=exception)
elif ignore_sigpipe and isinstance(exc, IOError) and getattr(exc, 'errno', None) == errno.EPIPE:
if dxpy._DEBUG > 0:
print("Broken pipe", file=sys.stderr)
sys.exit(3)
else:
if code is None:
code = 1
exit_with_exc_info(code, message, print_tb=True, exception=exception) | python | def err_exit(message='', code=None, expected_exceptions=default_expected_exceptions, arg_parser=None,
ignore_sigpipe=True, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
Uses **expected_exceptions** to set the error code decide whether to
suppress the error traceback.
:param message: Message to be printed after the exception information.
:type message: string
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param expected_exceptions: Exceptions for which to exit with error code 3 (expected error condition) and suppress the stack trace (unless the _DX_DEBUG environment variable is set).
:type expected_exceptions: iterable
:param arg_parser: argparse.ArgumentParser object used in the program (optional)
:param ignore_sigpipe: Whether to exit silently with code 3 when IOError with code EPIPE is raised. Default true.
:type ignore_sigpipe: boolean
:param exception: an exception to use in place of the last exception raised
'''
if arg_parser is not None:
message = arg_parser.prog + ": " + message
exc = exception if exception is not None else sys.exc_info()[1]
if isinstance(exc, SystemExit):
raise exc
elif isinstance(exc, expected_exceptions):
exit_with_exc_info(EXPECTED_ERR_EXIT_STATUS, message, print_tb=dxpy._DEBUG > 0, exception=exception)
elif ignore_sigpipe and isinstance(exc, IOError) and getattr(exc, 'errno', None) == errno.EPIPE:
if dxpy._DEBUG > 0:
print("Broken pipe", file=sys.stderr)
sys.exit(3)
else:
if code is None:
code = 1
exit_with_exc_info(code, message, print_tb=True, exception=exception) | Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
Uses **expected_exceptions** to set the error code decide whether to
suppress the error traceback.
:param message: Message to be printed after the exception information.
:type message: string
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param expected_exceptions: Exceptions for which to exit with error code 3 (expected error condition) and suppress the stack trace (unless the _DX_DEBUG environment variable is set).
:type expected_exceptions: iterable
:param arg_parser: argparse.ArgumentParser object used in the program (optional)
:param ignore_sigpipe: Whether to exit silently with code 3 when IOError with code EPIPE is raised. Default true.
:type ignore_sigpipe: boolean
:param exception: an exception to use in place of the last exception raised | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/exceptions.py#L235-L269 |
dnanexus/dx-toolkit | src/python/dxpy/exceptions.py | DXAPIError.error_message | def error_message(self):
"Returns a one-line description of the error."
output = self.msg + ", code " + str(self.code)
output += ". Request Time={}, Request ID={}".format(self.timestamp, self.req_id)
if self.name != self.__class__.__name__:
output = self.name + ": " + output
return output | python | def error_message(self):
"Returns a one-line description of the error."
output = self.msg + ", code " + str(self.code)
output += ". Request Time={}, Request ID={}".format(self.timestamp, self.req_id)
if self.name != self.__class__.__name__:
output = self.name + ": " + output
return output | Returns a one-line description of the error. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/exceptions.py#L51-L57 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxglobalworkflow.py | DXGlobalWorkflow.publish | def publish(self, **kwargs):
"""
Publishes the global workflow, so all users can find it and use it on the platform.
The current user must be a developer of the workflow.
"""
if self._dxid is not None:
return dxpy.api.global_workflow_publish(self._dxid, **kwargs)
else:
return dxpy.api.global_workflow_publish('globalworkflow-' + self._name, alias=self._alias, **kwargs) | python | def publish(self, **kwargs):
"""
Publishes the global workflow, so all users can find it and use it on the platform.
The current user must be a developer of the workflow.
"""
if self._dxid is not None:
return dxpy.api.global_workflow_publish(self._dxid, **kwargs)
else:
return dxpy.api.global_workflow_publish('globalworkflow-' + self._name, alias=self._alias, **kwargs) | Publishes the global workflow, so all users can find it and use it on the platform.
The current user must be a developer of the workflow. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxglobalworkflow.py#L152-L161 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxglobalworkflow.py | DXGlobalWorkflow.describe_underlying_workflow | def describe_underlying_workflow(self, region, describe_output=None):
"""
:param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object description of a workflow
:rtype: : dict
Returns an object description of an underlying workflow from a given region.
"""
assert(describe_output is None or describe_output.get('class', '') == 'globalworkflow')
if region is None:
raise DXError(
'DXGlobalWorkflow: region must be provided to get an underlying workflow')
# Perhaps we have cached it already
if region in self._workflow_desc_by_region:
return self._workflow_desc_by_region[region]
if not describe_output:
describe_output = self.describe()
if region not in describe_output['regionalOptions'].keys():
raise DXError('DXGlobalWorkflow: the global workflow {} is not enabled in region {}'.format(
self.get_id(), region))
underlying_workflow_id = describe_output['regionalOptions'][region]['workflow']
dxworkflow = dxpy.DXWorkflow(underlying_workflow_id)
dxworkflow_desc = dxworkflow.describe()
self._workflow_desc_by_region = dxworkflow_desc
return dxworkflow_desc | python | def describe_underlying_workflow(self, region, describe_output=None):
"""
:param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object description of a workflow
:rtype: : dict
Returns an object description of an underlying workflow from a given region.
"""
assert(describe_output is None or describe_output.get('class', '') == 'globalworkflow')
if region is None:
raise DXError(
'DXGlobalWorkflow: region must be provided to get an underlying workflow')
# Perhaps we have cached it already
if region in self._workflow_desc_by_region:
return self._workflow_desc_by_region[region]
if not describe_output:
describe_output = self.describe()
if region not in describe_output['regionalOptions'].keys():
raise DXError('DXGlobalWorkflow: the global workflow {} is not enabled in region {}'.format(
self.get_id(), region))
underlying_workflow_id = describe_output['regionalOptions'][region]['workflow']
dxworkflow = dxpy.DXWorkflow(underlying_workflow_id)
dxworkflow_desc = dxworkflow.describe()
self._workflow_desc_by_region = dxworkflow_desc
return dxworkflow_desc | :param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object description of a workflow
:rtype: : dict
Returns an object description of an underlying workflow from a given region. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxglobalworkflow.py#L163-L195 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxglobalworkflow.py | DXGlobalWorkflow.get_underlying_workflow | def get_underlying_workflow(self, region, describe_output=None):
"""
:param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object handler of a workflow
:rtype: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`
Returns an object handler of an underlying workflow from a given region.
"""
assert(describe_output is None or describe_output.get('class') == 'globalworkflow')
if region is None:
raise DXError(
'DXGlobalWorkflow: region must be provided to get an underlying workflow')
# Perhaps we have cached it already
if region in self._workflows_by_region:
return self._workflows_by_region[region]
if not describe_output:
describe_output = self.describe()
if region not in describe_output['regionalOptions'].keys():
raise DXError('DXGlobalWorkflow: the global workflow {} is not enabled in region {}'.format(
self.get_id(), region))
underlying_workflow_id = describe_output['regionalOptions'][region]['workflow']
self._workflow_desc_by_region = dxpy.DXWorkflow(underlying_workflow_id)
return dxpy.DXWorkflow(underlying_workflow_id) | python | def get_underlying_workflow(self, region, describe_output=None):
"""
:param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object handler of a workflow
:rtype: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`
Returns an object handler of an underlying workflow from a given region.
"""
assert(describe_output is None or describe_output.get('class') == 'globalworkflow')
if region is None:
raise DXError(
'DXGlobalWorkflow: region must be provided to get an underlying workflow')
# Perhaps we have cached it already
if region in self._workflows_by_region:
return self._workflows_by_region[region]
if not describe_output:
describe_output = self.describe()
if region not in describe_output['regionalOptions'].keys():
raise DXError('DXGlobalWorkflow: the global workflow {} is not enabled in region {}'.format(
self.get_id(), region))
underlying_workflow_id = describe_output['regionalOptions'][region]['workflow']
self._workflow_desc_by_region = dxpy.DXWorkflow(underlying_workflow_id)
return dxpy.DXWorkflow(underlying_workflow_id) | :param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object handler of a workflow
:rtype: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`
Returns an object handler of an underlying workflow from a given region. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxglobalworkflow.py#L197-L227 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxglobalworkflow.py | DXGlobalWorkflow.append_underlying_workflow_desc | def append_underlying_workflow_desc(self, describe_output, region):
"""
:param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object description of the global workflow
:rtype: : dict
Appends stages, inputs, outputs and other workflow-specific metadata to a global workflow describe output.
Note: global workflow description does not contain functional metadata (stages, IO), since this data
is region-specific (due to applets and bound inputs) and so reside only in region-specific underlying
workflows. We add them to global_workflow_desc so that it can be used for a workflow or a global workflow
"""
assert(describe_output is None or describe_output.get('class') == 'globalworkflow')
underlying_workflow_desc = self.describe_underlying_workflow(region,
describe_output=describe_output)
for field in ['inputs', 'outputs', 'inputSpec', 'outputSpec', 'stages']:
describe_output[field] = underlying_workflow_desc[field]
return describe_output | python | def append_underlying_workflow_desc(self, describe_output, region):
"""
:param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object description of the global workflow
:rtype: : dict
Appends stages, inputs, outputs and other workflow-specific metadata to a global workflow describe output.
Note: global workflow description does not contain functional metadata (stages, IO), since this data
is region-specific (due to applets and bound inputs) and so reside only in region-specific underlying
workflows. We add them to global_workflow_desc so that it can be used for a workflow or a global workflow
"""
assert(describe_output is None or describe_output.get('class') == 'globalworkflow')
underlying_workflow_desc = self.describe_underlying_workflow(region,
describe_output=describe_output)
for field in ['inputs', 'outputs', 'inputSpec', 'outputSpec', 'stages']:
describe_output[field] = underlying_workflow_desc[field]
return describe_output | :param region: region name
:type region: string
:param describe_output: description of a global workflow
:type describe_output: dict
:returns: object description of the global workflow
:rtype: : dict
Appends stages, inputs, outputs and other workflow-specific metadata to a global workflow describe output.
Note: global workflow description does not contain functional metadata (stages, IO), since this data
is region-specific (due to applets and bound inputs) and so reside only in region-specific underlying
workflows. We add them to global_workflow_desc so that it can be used for a workflow or a global workflow | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxglobalworkflow.py#L229-L250 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxglobalworkflow.py | DXGlobalWorkflow._get_run_input | def _get_run_input(self, workflow_input, project=None, **kwargs):
"""
Checks the region in which the global workflow is run
and returns the input associated with the underlying workflow
from that region.
"""
region = dxpy.api.project_describe(project,
input_params={"fields": {"region": True}})["region"]
dxworkflow = self.get_underlying_workflow(region)
return dxworkflow._get_run_input(workflow_input, **kwargs) | python | def _get_run_input(self, workflow_input, project=None, **kwargs):
"""
Checks the region in which the global workflow is run
and returns the input associated with the underlying workflow
from that region.
"""
region = dxpy.api.project_describe(project,
input_params={"fields": {"region": True}})["region"]
dxworkflow = self.get_underlying_workflow(region)
return dxworkflow._get_run_input(workflow_input, **kwargs) | Checks the region in which the global workflow is run
and returns the input associated with the underlying workflow
from that region. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxglobalworkflow.py#L256-L265 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxglobalworkflow.py | DXGlobalWorkflow.run | def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXGlobalWorkflow, self).run(workflow_input, *args, **kwargs) | python | def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXGlobalWorkflow, self).run(workflow_input, *args, **kwargs) | :param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxglobalworkflow.py#L275-L311 |
dnanexus/dx-toolkit | src/python/dxpy/compat.py | unwrap_stream | def unwrap_stream(stream_name):
"""
Temporarily unwraps a given stream (stdin, stdout, or stderr) to undo the effects of wrap_stdio_in_codecs().
"""
wrapped_stream = None
try:
wrapped_stream = getattr(sys, stream_name)
if hasattr(wrapped_stream, '_original_stream'):
setattr(sys, stream_name, wrapped_stream._original_stream)
yield
finally:
if wrapped_stream:
setattr(sys, stream_name, wrapped_stream) | python | def unwrap_stream(stream_name):
"""
Temporarily unwraps a given stream (stdin, stdout, or stderr) to undo the effects of wrap_stdio_in_codecs().
"""
wrapped_stream = None
try:
wrapped_stream = getattr(sys, stream_name)
if hasattr(wrapped_stream, '_original_stream'):
setattr(sys, stream_name, wrapped_stream._original_stream)
yield
finally:
if wrapped_stream:
setattr(sys, stream_name, wrapped_stream) | Temporarily unwraps a given stream (stdin, stdout, or stderr) to undo the effects of wrap_stdio_in_codecs(). | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/compat.py#L206-L218 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | open_dxfile | def open_dxfile(dxid, project=None, mode=None, read_buffer_size=dxfile.DEFAULT_BUFFER_SIZE):
'''
:param dxid: file ID
:type dxid: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Given the object ID of an uploaded file, returns a remote file
handler that is a Python file-like object.
Example::
with open_dxfile("file-xxxx") as fd:
for line in fd:
...
Note that this is shorthand for::
DXFile(dxid)
'''
return DXFile(dxid, project=project, mode=mode, read_buffer_size=read_buffer_size) | python | def open_dxfile(dxid, project=None, mode=None, read_buffer_size=dxfile.DEFAULT_BUFFER_SIZE):
'''
:param dxid: file ID
:type dxid: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Given the object ID of an uploaded file, returns a remote file
handler that is a Python file-like object.
Example::
with open_dxfile("file-xxxx") as fd:
for line in fd:
...
Note that this is shorthand for::
DXFile(dxid)
'''
return DXFile(dxid, project=project, mode=mode, read_buffer_size=read_buffer_size) | :param dxid: file ID
:type dxid: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Given the object ID of an uploaded file, returns a remote file
handler that is a Python file-like object.
Example::
with open_dxfile("file-xxxx") as fd:
for line in fd:
...
Note that this is shorthand for::
DXFile(dxid) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L43-L63 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | new_dxfile | def new_dxfile(mode=None, write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE, expected_file_size=None, file_is_mmapd=False,
**kwargs):
'''
:param mode: One of "w" or "a" for write and append modes, respectively
:type mode: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote file object that is ready to be written to;
returns a :class:`~dxpy.bindings.dxfile.DXFile` object that is a
writable file-like object.
Example::
with new_dxfile(media_type="application/json") as fd:
fd.write("foo\\n")
Note that this is shorthand for::
dxFile = DXFile()
dxFile.new(**kwargs)
'''
dx_file = DXFile(mode=mode, write_buffer_size=write_buffer_size, expected_file_size=expected_file_size,
file_is_mmapd=file_is_mmapd)
dx_file.new(**kwargs)
return dx_file | python | def new_dxfile(mode=None, write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE, expected_file_size=None, file_is_mmapd=False,
**kwargs):
'''
:param mode: One of "w" or "a" for write and append modes, respectively
:type mode: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote file object that is ready to be written to;
returns a :class:`~dxpy.bindings.dxfile.DXFile` object that is a
writable file-like object.
Example::
with new_dxfile(media_type="application/json") as fd:
fd.write("foo\\n")
Note that this is shorthand for::
dxFile = DXFile()
dxFile.new(**kwargs)
'''
dx_file = DXFile(mode=mode, write_buffer_size=write_buffer_size, expected_file_size=expected_file_size,
file_is_mmapd=file_is_mmapd)
dx_file.new(**kwargs)
return dx_file | :param mode: One of "w" or "a" for write and append modes, respectively
:type mode: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote file object that is ready to be written to;
returns a :class:`~dxpy.bindings.dxfile.DXFile` object that is a
writable file-like object.
Example::
with new_dxfile(media_type="application/json") as fd:
fd.write("foo\\n")
Note that this is shorthand for::
dxFile = DXFile()
dxFile.new(**kwargs) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L66-L94 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | download_dxfile | def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
project=None, describe_output=None, **kwargs):
'''
:param dxid: DNAnexus file ID or DXFile (file handler) object
:type dxid: string or DXFile
:param filename: Local filename
:type filename: string
:param append: If True, appends to the local file (default is to truncate local file if it exists)
:type append: boolean
:param project: project to use as context for this download (may affect
which billing account is billed for this download). If None or
DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server.
:type project: str or None
:param describe_output: (experimental) output of the file-xxxx/describe API call,
if available. It will make it possible to skip another describe API call.
It should contain the default fields of the describe API call output and
the "parts" field, not included in the output by default.
:type describe_output: dict or None
Downloads the remote file referenced by *dxid* and saves it to *filename*.
Example::
download_dxfile("file-xxxx", "localfilename.fastq")
'''
# retry the inner loop while there are retriable errors
part_retry_counter = defaultdict(lambda: 3)
success = False
while not success:
success = _download_dxfile(dxid,
filename,
part_retry_counter,
chunksize=chunksize,
append=append,
show_progress=show_progress,
project=project,
describe_output=describe_output,
**kwargs) | python | def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
project=None, describe_output=None, **kwargs):
'''
:param dxid: DNAnexus file ID or DXFile (file handler) object
:type dxid: string or DXFile
:param filename: Local filename
:type filename: string
:param append: If True, appends to the local file (default is to truncate local file if it exists)
:type append: boolean
:param project: project to use as context for this download (may affect
which billing account is billed for this download). If None or
DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server.
:type project: str or None
:param describe_output: (experimental) output of the file-xxxx/describe API call,
if available. It will make it possible to skip another describe API call.
It should contain the default fields of the describe API call output and
the "parts" field, not included in the output by default.
:type describe_output: dict or None
Downloads the remote file referenced by *dxid* and saves it to *filename*.
Example::
download_dxfile("file-xxxx", "localfilename.fastq")
'''
# retry the inner loop while there are retriable errors
part_retry_counter = defaultdict(lambda: 3)
success = False
while not success:
success = _download_dxfile(dxid,
filename,
part_retry_counter,
chunksize=chunksize,
append=append,
show_progress=show_progress,
project=project,
describe_output=describe_output,
**kwargs) | :param dxid: DNAnexus file ID or DXFile (file handler) object
:type dxid: string or DXFile
:param filename: Local filename
:type filename: string
:param append: If True, appends to the local file (default is to truncate local file if it exists)
:type append: boolean
:param project: project to use as context for this download (may affect
which billing account is billed for this download). If None or
DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server.
:type project: str or None
:param describe_output: (experimental) output of the file-xxxx/describe API call,
if available. It will make it possible to skip another describe API call.
It should contain the default fields of the describe API call output and
the "parts" field, not included in the output by default.
:type describe_output: dict or None
Downloads the remote file referenced by *dxid* and saves it to *filename*.
Example::
download_dxfile("file-xxxx", "localfilename.fastq") | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L97-L135 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | _download_dxfile | def _download_dxfile(dxid, filename, part_retry_counter,
chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
project=None, describe_output=None, **kwargs):
'''
Core of download logic. Download file-id *dxid* and store it in
a local file *filename*.
The return value is as follows:
- True means the download was successfully completed
- False means the download was stopped because of a retryable error
- Exception raised for other errors
'''
def print_progress(bytes_downloaded, file_size, action="Downloaded"):
num_ticks = 60
effective_file_size = file_size or 1
if bytes_downloaded > effective_file_size:
effective_file_size = bytes_downloaded
ticks = int(round((bytes_downloaded / float(effective_file_size)) * num_ticks))
percent = int(math.floor((bytes_downloaded / float(effective_file_size)) * 100))
fmt = "[{done}{pending}] {action} {done_bytes:,}{remaining} bytes ({percent}%) {name}"
# Erase the line and return the cursor to the start of the line.
# The following VT100 escape sequence will erase the current line.
sys.stderr.write("\33[2K")
sys.stderr.write(fmt.format(action=action,
done=("=" * (ticks - 1) + ">") if ticks > 0 else "",
pending=" " * (num_ticks - ticks),
done_bytes=bytes_downloaded,
remaining=" of {size:,}".format(size=file_size) if file_size else "",
percent=percent,
name=filename))
sys.stderr.flush()
sys.stderr.write("\r")
sys.stderr.flush()
_bytes = 0
if isinstance(dxid, DXFile):
dxfile = dxid
else:
dxfile = DXFile(dxid, mode="r", project=(project if project != DXFile.NO_PROJECT_HINT else None))
if describe_output and describe_output.get("parts") is not None:
dxfile_desc = describe_output
else:
dxfile_desc = dxfile.describe(fields={"parts"}, default_fields=True, **kwargs)
if 'drive' in dxfile_desc:
# A symbolic link. Get the MD5 checksum, if we have it
if 'md5' in dxfile_desc:
md5 = dxfile_desc['md5']
else:
md5 = None
_download_symbolic_link(dxid, md5, project, filename)
return True
parts = dxfile_desc["parts"]
parts_to_get = sorted(parts, key=int)
file_size = dxfile_desc.get("size")
offset = 0
for part_id in parts_to_get:
parts[part_id]["start"] = offset
offset += parts[part_id]["size"]
if append:
fh = open(filename, "ab")
else:
try:
fh = open(filename, "rb+")
except IOError:
fh = open(filename, "wb")
if show_progress:
print_progress(0, None)
def get_chunk(part_id_to_get, start, end):
url, headers = dxfile.get_download_url(project=project, **kwargs)
# If we're fetching the whole object in one shot, avoid setting the Range header to take advantage of gzip
# transfer compression
sub_range = False
if len(parts) > 1 or (start > 0) or (end - start + 1 < parts[part_id_to_get]["size"]):
sub_range = True
data = dxpy._dxhttp_read_range(url, headers, start, end, FILE_REQUEST_TIMEOUT, sub_range)
return part_id_to_get, data
def chunk_requests():
for part_id_to_chunk in parts_to_get:
part_info = parts[part_id_to_chunk]
for chunk_start in range(part_info["start"], part_info["start"] + part_info["size"], chunksize):
chunk_end = min(chunk_start + chunksize, part_info["start"] + part_info["size"]) - 1
yield get_chunk, [part_id_to_chunk, chunk_start, chunk_end], {}
def verify_part(_part_id, got_bytes, hasher):
if got_bytes is not None and got_bytes != parts[_part_id]["size"]:
msg = "Unexpected part data size in {} part {} (expected {}, got {})"
msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["size"], got_bytes)
raise DXPartLengthMismatchError(msg)
if hasher is not None and "md5" not in parts[_part_id]:
warnings.warn("Download of file {} is not being checked for integrity".format(dxfile.get_id()))
elif hasher is not None and hasher.hexdigest() != parts[_part_id]["md5"]:
msg = "Checksum mismatch in {} part {} (expected {}, got {})"
msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["md5"], hasher.hexdigest())
raise DXChecksumMismatchError(msg)
with fh:
last_verified_pos = 0
if fh.mode == "rb+":
# We already downloaded the beginning of the file, verify that the
# chunk checksums match the metadata.
last_verified_part, max_verify_chunk_size = None, 1024*1024
try:
for part_id in parts_to_get:
part_info = parts[part_id]
if "md5" not in part_info:
raise DXFileError("File {} does not contain part md5 checksums".format(dxfile.get_id()))
bytes_to_read = part_info["size"]
hasher = hashlib.md5()
while bytes_to_read > 0:
chunk = fh.read(min(max_verify_chunk_size, bytes_to_read))
if len(chunk) < min(max_verify_chunk_size, bytes_to_read):
raise DXFileError("Local data for part {} is truncated".format(part_id))
hasher.update(chunk)
bytes_to_read -= max_verify_chunk_size
if hasher.hexdigest() != part_info["md5"]:
raise DXFileError("Checksum mismatch when verifying downloaded part {}".format(part_id))
else:
last_verified_part = part_id
last_verified_pos = fh.tell()
if show_progress:
_bytes += part_info["size"]
print_progress(_bytes, file_size, action="Verified")
except (IOError, DXFileError) as e:
logger.debug(e)
fh.seek(last_verified_pos)
fh.truncate()
if last_verified_part is not None:
del parts_to_get[:parts_to_get.index(last_verified_part)+1]
if show_progress and len(parts_to_get) < len(parts):
print_progress(last_verified_pos, file_size, action="Resuming at")
logger.debug("Verified %s/%d downloaded parts", last_verified_part, len(parts_to_get))
try:
# Main loop. In parallel: download chunks, verify them, and write them to disk.
get_first_chunk_sequentially = (file_size > 128 * 1024 and last_verified_pos == 0 and dxpy.JOB_ID)
cur_part, got_bytes, hasher = None, None, None
for chunk_part, chunk_data in response_iterator(chunk_requests(),
dxfile._http_threadpool,
do_first_task_sequentially=get_first_chunk_sequentially):
if chunk_part != cur_part:
verify_part(cur_part, got_bytes, hasher)
cur_part, got_bytes, hasher = chunk_part, 0, hashlib.md5()
got_bytes += len(chunk_data)
hasher.update(chunk_data)
fh.write(chunk_data)
if show_progress:
_bytes += len(chunk_data)
print_progress(_bytes, file_size)
verify_part(cur_part, got_bytes, hasher)
if show_progress:
print_progress(_bytes, file_size, action="Completed")
except DXFileError:
print(traceback.format_exc(), file=sys.stderr)
part_retry_counter[cur_part] -= 1
if part_retry_counter[cur_part] > 0:
print("Retrying {} ({} tries remain for part {})".format(dxfile.get_id(), part_retry_counter[cur_part], cur_part),
file=sys.stderr)
return False
raise
if show_progress:
sys.stderr.write("\n")
return True | python | def _download_dxfile(dxid, filename, part_retry_counter,
chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
project=None, describe_output=None, **kwargs):
'''
Core of download logic. Download file-id *dxid* and store it in
a local file *filename*.
The return value is as follows:
- True means the download was successfully completed
- False means the download was stopped because of a retryable error
- Exception raised for other errors
'''
def print_progress(bytes_downloaded, file_size, action="Downloaded"):
num_ticks = 60
effective_file_size = file_size or 1
if bytes_downloaded > effective_file_size:
effective_file_size = bytes_downloaded
ticks = int(round((bytes_downloaded / float(effective_file_size)) * num_ticks))
percent = int(math.floor((bytes_downloaded / float(effective_file_size)) * 100))
fmt = "[{done}{pending}] {action} {done_bytes:,}{remaining} bytes ({percent}%) {name}"
# Erase the line and return the cursor to the start of the line.
# The following VT100 escape sequence will erase the current line.
sys.stderr.write("\33[2K")
sys.stderr.write(fmt.format(action=action,
done=("=" * (ticks - 1) + ">") if ticks > 0 else "",
pending=" " * (num_ticks - ticks),
done_bytes=bytes_downloaded,
remaining=" of {size:,}".format(size=file_size) if file_size else "",
percent=percent,
name=filename))
sys.stderr.flush()
sys.stderr.write("\r")
sys.stderr.flush()
_bytes = 0
if isinstance(dxid, DXFile):
dxfile = dxid
else:
dxfile = DXFile(dxid, mode="r", project=(project if project != DXFile.NO_PROJECT_HINT else None))
if describe_output and describe_output.get("parts") is not None:
dxfile_desc = describe_output
else:
dxfile_desc = dxfile.describe(fields={"parts"}, default_fields=True, **kwargs)
if 'drive' in dxfile_desc:
# A symbolic link. Get the MD5 checksum, if we have it
if 'md5' in dxfile_desc:
md5 = dxfile_desc['md5']
else:
md5 = None
_download_symbolic_link(dxid, md5, project, filename)
return True
parts = dxfile_desc["parts"]
parts_to_get = sorted(parts, key=int)
file_size = dxfile_desc.get("size")
offset = 0
for part_id in parts_to_get:
parts[part_id]["start"] = offset
offset += parts[part_id]["size"]
if append:
fh = open(filename, "ab")
else:
try:
fh = open(filename, "rb+")
except IOError:
fh = open(filename, "wb")
if show_progress:
print_progress(0, None)
def get_chunk(part_id_to_get, start, end):
url, headers = dxfile.get_download_url(project=project, **kwargs)
# If we're fetching the whole object in one shot, avoid setting the Range header to take advantage of gzip
# transfer compression
sub_range = False
if len(parts) > 1 or (start > 0) or (end - start + 1 < parts[part_id_to_get]["size"]):
sub_range = True
data = dxpy._dxhttp_read_range(url, headers, start, end, FILE_REQUEST_TIMEOUT, sub_range)
return part_id_to_get, data
def chunk_requests():
for part_id_to_chunk in parts_to_get:
part_info = parts[part_id_to_chunk]
for chunk_start in range(part_info["start"], part_info["start"] + part_info["size"], chunksize):
chunk_end = min(chunk_start + chunksize, part_info["start"] + part_info["size"]) - 1
yield get_chunk, [part_id_to_chunk, chunk_start, chunk_end], {}
def verify_part(_part_id, got_bytes, hasher):
if got_bytes is not None and got_bytes != parts[_part_id]["size"]:
msg = "Unexpected part data size in {} part {} (expected {}, got {})"
msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["size"], got_bytes)
raise DXPartLengthMismatchError(msg)
if hasher is not None and "md5" not in parts[_part_id]:
warnings.warn("Download of file {} is not being checked for integrity".format(dxfile.get_id()))
elif hasher is not None and hasher.hexdigest() != parts[_part_id]["md5"]:
msg = "Checksum mismatch in {} part {} (expected {}, got {})"
msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["md5"], hasher.hexdigest())
raise DXChecksumMismatchError(msg)
with fh:
last_verified_pos = 0
if fh.mode == "rb+":
# We already downloaded the beginning of the file, verify that the
# chunk checksums match the metadata.
last_verified_part, max_verify_chunk_size = None, 1024*1024
try:
for part_id in parts_to_get:
part_info = parts[part_id]
if "md5" not in part_info:
raise DXFileError("File {} does not contain part md5 checksums".format(dxfile.get_id()))
bytes_to_read = part_info["size"]
hasher = hashlib.md5()
while bytes_to_read > 0:
chunk = fh.read(min(max_verify_chunk_size, bytes_to_read))
if len(chunk) < min(max_verify_chunk_size, bytes_to_read):
raise DXFileError("Local data for part {} is truncated".format(part_id))
hasher.update(chunk)
bytes_to_read -= max_verify_chunk_size
if hasher.hexdigest() != part_info["md5"]:
raise DXFileError("Checksum mismatch when verifying downloaded part {}".format(part_id))
else:
last_verified_part = part_id
last_verified_pos = fh.tell()
if show_progress:
_bytes += part_info["size"]
print_progress(_bytes, file_size, action="Verified")
except (IOError, DXFileError) as e:
logger.debug(e)
fh.seek(last_verified_pos)
fh.truncate()
if last_verified_part is not None:
del parts_to_get[:parts_to_get.index(last_verified_part)+1]
if show_progress and len(parts_to_get) < len(parts):
print_progress(last_verified_pos, file_size, action="Resuming at")
logger.debug("Verified %s/%d downloaded parts", last_verified_part, len(parts_to_get))
try:
# Main loop. In parallel: download chunks, verify them, and write them to disk.
get_first_chunk_sequentially = (file_size > 128 * 1024 and last_verified_pos == 0 and dxpy.JOB_ID)
cur_part, got_bytes, hasher = None, None, None
for chunk_part, chunk_data in response_iterator(chunk_requests(),
dxfile._http_threadpool,
do_first_task_sequentially=get_first_chunk_sequentially):
if chunk_part != cur_part:
verify_part(cur_part, got_bytes, hasher)
cur_part, got_bytes, hasher = chunk_part, 0, hashlib.md5()
got_bytes += len(chunk_data)
hasher.update(chunk_data)
fh.write(chunk_data)
if show_progress:
_bytes += len(chunk_data)
print_progress(_bytes, file_size)
verify_part(cur_part, got_bytes, hasher)
if show_progress:
print_progress(_bytes, file_size, action="Completed")
except DXFileError:
print(traceback.format_exc(), file=sys.stderr)
part_retry_counter[cur_part] -= 1
if part_retry_counter[cur_part] > 0:
print("Retrying {} ({} tries remain for part {})".format(dxfile.get_id(), part_retry_counter[cur_part], cur_part),
file=sys.stderr)
return False
raise
if show_progress:
sys.stderr.write("\n")
return True | Core of download logic. Download file-id *dxid* and store it in
a local file *filename*.
The return value is as follows:
- True means the download was successfully completed
- False means the download was stopped because of a retryable error
- Exception raised for other errors | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L225-L401 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | upload_local_file | def upload_local_file(filename=None, file=None, media_type=None, keep_open=False,
wait_on_close=False, use_existing_dxfile=None, show_progress=False,
write_buffer_size=None, multithread=True, **kwargs):
'''
:param filename: Local filename
:type filename: string
:param file: File-like object
:type file: File-like object
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param write_buffer_size: Buffer size to use for upload
:type write_buffer_size: int
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:param use_existing_dxfile: Instead of creating a new file object, upload to the specified file
:type use_existing_dxfile: :class:`~dxpy.bindings.dxfile.DXFile`
:param multithread: If True, sends multiple write requests asynchronously
:type multithread: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Exactly one of *filename* or *file* is required.
Uploads *filename* or reads from *file* into a new file object (with
media type *media_type* if given) and returns the associated remote
file handler. The "name" property of the newly created remote file
is set to the basename of *filename* or to *file.name* (if it
exists).
Examples::
# Upload from a path
dxpy.upload_local_file("/home/ubuntu/reads.fastq.gz")
# Upload from a file-like object
with open("reads.fastq") as fh:
dxpy.upload_local_file(file=fh)
'''
fd = file if filename is None else open(filename, 'rb')
try:
file_size = os.fstat(fd.fileno()).st_size
except:
file_size = 0
file_is_mmapd = hasattr(fd, "fileno")
if write_buffer_size is None:
write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE
if use_existing_dxfile:
handler = use_existing_dxfile
else:
# Set a reasonable name for the file if none has been set
# already
creation_kwargs = kwargs.copy()
if 'name' not in kwargs:
if filename is not None:
creation_kwargs['name'] = os.path.basename(filename)
else:
# Try to get filename from file-like object
try:
local_file_name = file.name
except AttributeError:
pass
else:
creation_kwargs['name'] = os.path.basename(local_file_name)
# Use 'a' mode because we will be responsible for closing the file
# ourselves later (if requested).
handler = new_dxfile(mode='a', media_type=media_type, write_buffer_size=write_buffer_size,
expected_file_size=file_size, file_is_mmapd=file_is_mmapd, **creation_kwargs)
# For subsequent API calls, don't supply the dataobject metadata
# parameters that are only needed at creation time.
_, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
num_ticks = 60
offset = 0
handler._ensure_write_bufsize(**remaining_kwargs)
def can_be_mmapd(fd):
if not hasattr(fd, "fileno"):
return False
mode = os.fstat(fd.fileno()).st_mode
return not (stat.S_ISCHR(mode) or stat.S_ISFIFO(mode))
def read(num_bytes):
"""
Returns a string or mmap'd data containing the next num_bytes of
the file, or up to the end if there are fewer than num_bytes
left.
"""
# If file cannot be mmap'd (e.g. is stdin, or a fifo), fall back
# to doing an actual read from the file.
if not can_be_mmapd(fd):
return fd.read(handler._write_bufsize)
bytes_available = max(file_size - offset, 0)
if bytes_available == 0:
return b""
return mmap.mmap(fd.fileno(), min(handler._write_bufsize, bytes_available), offset=offset, access=mmap.ACCESS_READ)
handler._num_bytes_transmitted = 0
def report_progress(handler, num_bytes):
handler._num_bytes_transmitted += num_bytes
if file_size > 0:
ticks = int(round((handler._num_bytes_transmitted / float(file_size)) * num_ticks))
percent = int(round((handler._num_bytes_transmitted / float(file_size)) * 100))
fmt = "[{done}{pending}] Uploaded {done_bytes:,} of {total:,} bytes ({percent}%) {name}"
sys.stderr.write(fmt.format(done='=' * (ticks - 1) + '>' if ticks > 0 else '',
pending=' ' * (num_ticks - ticks),
done_bytes=handler._num_bytes_transmitted,
total=file_size,
percent=percent,
name=filename if filename is not None else ''))
sys.stderr.flush()
sys.stderr.write("\r")
sys.stderr.flush()
if show_progress:
report_progress(handler, 0)
while True:
buf = read(handler._write_bufsize)
offset += len(buf)
if len(buf) == 0:
break
handler.write(buf,
report_progress_fn=report_progress if show_progress else None,
multithread=multithread,
**remaining_kwargs)
if filename is not None:
fd.close()
handler.flush(report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)
if show_progress:
sys.stderr.write("\n")
sys.stderr.flush()
if not keep_open:
handler.close(block=wait_on_close, report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)
return handler | python | def upload_local_file(filename=None, file=None, media_type=None, keep_open=False,
wait_on_close=False, use_existing_dxfile=None, show_progress=False,
write_buffer_size=None, multithread=True, **kwargs):
'''
:param filename: Local filename
:type filename: string
:param file: File-like object
:type file: File-like object
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param write_buffer_size: Buffer size to use for upload
:type write_buffer_size: int
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:param use_existing_dxfile: Instead of creating a new file object, upload to the specified file
:type use_existing_dxfile: :class:`~dxpy.bindings.dxfile.DXFile`
:param multithread: If True, sends multiple write requests asynchronously
:type multithread: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Exactly one of *filename* or *file* is required.
Uploads *filename* or reads from *file* into a new file object (with
media type *media_type* if given) and returns the associated remote
file handler. The "name" property of the newly created remote file
is set to the basename of *filename* or to *file.name* (if it
exists).
Examples::
# Upload from a path
dxpy.upload_local_file("/home/ubuntu/reads.fastq.gz")
# Upload from a file-like object
with open("reads.fastq") as fh:
dxpy.upload_local_file(file=fh)
'''
fd = file if filename is None else open(filename, 'rb')
try:
file_size = os.fstat(fd.fileno()).st_size
except:
file_size = 0
file_is_mmapd = hasattr(fd, "fileno")
if write_buffer_size is None:
write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE
if use_existing_dxfile:
handler = use_existing_dxfile
else:
# Set a reasonable name for the file if none has been set
# already
creation_kwargs = kwargs.copy()
if 'name' not in kwargs:
if filename is not None:
creation_kwargs['name'] = os.path.basename(filename)
else:
# Try to get filename from file-like object
try:
local_file_name = file.name
except AttributeError:
pass
else:
creation_kwargs['name'] = os.path.basename(local_file_name)
# Use 'a' mode because we will be responsible for closing the file
# ourselves later (if requested).
handler = new_dxfile(mode='a', media_type=media_type, write_buffer_size=write_buffer_size,
expected_file_size=file_size, file_is_mmapd=file_is_mmapd, **creation_kwargs)
# For subsequent API calls, don't supply the dataobject metadata
# parameters that are only needed at creation time.
_, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
num_ticks = 60
offset = 0
handler._ensure_write_bufsize(**remaining_kwargs)
def can_be_mmapd(fd):
if not hasattr(fd, "fileno"):
return False
mode = os.fstat(fd.fileno()).st_mode
return not (stat.S_ISCHR(mode) or stat.S_ISFIFO(mode))
def read(num_bytes):
"""
Returns a string or mmap'd data containing the next num_bytes of
the file, or up to the end if there are fewer than num_bytes
left.
"""
# If file cannot be mmap'd (e.g. is stdin, or a fifo), fall back
# to doing an actual read from the file.
if not can_be_mmapd(fd):
return fd.read(handler._write_bufsize)
bytes_available = max(file_size - offset, 0)
if bytes_available == 0:
return b""
return mmap.mmap(fd.fileno(), min(handler._write_bufsize, bytes_available), offset=offset, access=mmap.ACCESS_READ)
handler._num_bytes_transmitted = 0
def report_progress(handler, num_bytes):
handler._num_bytes_transmitted += num_bytes
if file_size > 0:
ticks = int(round((handler._num_bytes_transmitted / float(file_size)) * num_ticks))
percent = int(round((handler._num_bytes_transmitted / float(file_size)) * 100))
fmt = "[{done}{pending}] Uploaded {done_bytes:,} of {total:,} bytes ({percent}%) {name}"
sys.stderr.write(fmt.format(done='=' * (ticks - 1) + '>' if ticks > 0 else '',
pending=' ' * (num_ticks - ticks),
done_bytes=handler._num_bytes_transmitted,
total=file_size,
percent=percent,
name=filename if filename is not None else ''))
sys.stderr.flush()
sys.stderr.write("\r")
sys.stderr.flush()
if show_progress:
report_progress(handler, 0)
while True:
buf = read(handler._write_bufsize)
offset += len(buf)
if len(buf) == 0:
break
handler.write(buf,
report_progress_fn=report_progress if show_progress else None,
multithread=multithread,
**remaining_kwargs)
if filename is not None:
fd.close()
handler.flush(report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)
if show_progress:
sys.stderr.write("\n")
sys.stderr.flush()
if not keep_open:
handler.close(block=wait_on_close, report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)
return handler | :param filename: Local filename
:type filename: string
:param file: File-like object
:type file: File-like object
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param write_buffer_size: Buffer size to use for upload
:type write_buffer_size: int
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:param use_existing_dxfile: Instead of creating a new file object, upload to the specified file
:type use_existing_dxfile: :class:`~dxpy.bindings.dxfile.DXFile`
:param multithread: If True, sends multiple write requests asynchronously
:type multithread: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Exactly one of *filename* or *file* is required.
Uploads *filename* or reads from *file* into a new file object (with
media type *media_type* if given) and returns the associated remote
file handler. The "name" property of the newly created remote file
is set to the basename of *filename* or to *file.name* (if it
exists).
Examples::
# Upload from a path
dxpy.upload_local_file("/home/ubuntu/reads.fastq.gz")
# Upload from a file-like object
with open("reads.fastq") as fh:
dxpy.upload_local_file(file=fh) | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L403-L559 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | upload_string | def upload_string(to_upload, media_type=None, keep_open=False, wait_on_close=False, **kwargs):
"""
:param to_upload: String to upload into a file
:type to_upload: string
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Uploads the data in the string *to_upload* into a new file object
(with media type *media_type* if given) and returns the associated
remote file handler.
"""
# Use 'a' mode because we will be responsible for closing the file
# ourselves later (if requested).
handler = new_dxfile(media_type=media_type, mode='a', **kwargs)
# For subsequent API calls, don't supply the dataobject metadata
# parameters that are only needed at creation time.
_, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
handler.write(to_upload, **remaining_kwargs)
if not keep_open:
handler.close(block=wait_on_close, **remaining_kwargs)
return handler | python | def upload_string(to_upload, media_type=None, keep_open=False, wait_on_close=False, **kwargs):
"""
:param to_upload: String to upload into a file
:type to_upload: string
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Uploads the data in the string *to_upload* into a new file object
(with media type *media_type* if given) and returns the associated
remote file handler.
"""
# Use 'a' mode because we will be responsible for closing the file
# ourselves later (if requested).
handler = new_dxfile(media_type=media_type, mode='a', **kwargs)
# For subsequent API calls, don't supply the dataobject metadata
# parameters that are only needed at creation time.
_, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
handler.write(to_upload, **remaining_kwargs)
if not keep_open:
handler.close(block=wait_on_close, **remaining_kwargs)
return handler | :param to_upload: String to upload into a file
:type to_upload: string
:param media_type: Internet Media Type
:type media_type: string
:param keep_open: If False, closes the file after uploading
:type keep_open: boolean
:param wait_on_close: If True, waits for the file to close
:type wait_on_close: boolean
:returns: Remote file handler
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Uploads the data in the string *to_upload* into a new file object
(with media type *media_type* if given) and returns the associated
remote file handler. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L561-L595 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | list_subfolders | def list_subfolders(project, path, recurse=True):
'''
:param project: Project ID to use as context for the listing
:type project: string
:param path: Subtree root path
:type path: string
:param recurse: Return a complete subfolders tree
:type recurse: boolean
Returns a list of subfolders for the remote *path* (included to the result) of the *project*.
Example::
list_subfolders("project-xxxx", folder="/input")
'''
project_folders = dxpy.get_handler(project).describe(input_params={'folders': True})['folders']
# TODO: support shell-style path globbing (i.e. /a*/c matches /ab/c but not /a/b/c)
# return pathmatch.filter(project_folders, os.path.join(path, '*'))
if recurse:
return (f for f in project_folders if f.startswith(path))
else:
return (f for f in project_folders if f.startswith(path) and '/' not in f[len(path)+1:]) | python | def list_subfolders(project, path, recurse=True):
'''
:param project: Project ID to use as context for the listing
:type project: string
:param path: Subtree root path
:type path: string
:param recurse: Return a complete subfolders tree
:type recurse: boolean
Returns a list of subfolders for the remote *path* (included to the result) of the *project*.
Example::
list_subfolders("project-xxxx", folder="/input")
'''
project_folders = dxpy.get_handler(project).describe(input_params={'folders': True})['folders']
# TODO: support shell-style path globbing (i.e. /a*/c matches /ab/c but not /a/b/c)
# return pathmatch.filter(project_folders, os.path.join(path, '*'))
if recurse:
return (f for f in project_folders if f.startswith(path))
else:
return (f for f in project_folders if f.startswith(path) and '/' not in f[len(path)+1:]) | :param project: Project ID to use as context for the listing
:type project: string
:param path: Subtree root path
:type path: string
:param recurse: Return a complete subfolders tree
:type recurse: boolean
Returns a list of subfolders for the remote *path* (included to the result) of the *project*.
Example::
list_subfolders("project-xxxx", folder="/input") | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L597-L619 |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxfile_functions.py | download_folder | def download_folder(project, destdir, folder="/", overwrite=False, chunksize=dxfile.DEFAULT_BUFFER_SIZE,
show_progress=False, **kwargs):
'''
:param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
'''
def ensure_local_dir(d):
if not os.path.isdir(d):
if os.path.exists(d):
raise DXFileError("Destination location '{}' already exists and is not a directory".format(d))
logger.debug("Creating destination directory: '%s'", d)
os.makedirs(d)
def compose_local_dir(d, remote_folder, remote_subfolder):
suffix = remote_subfolder[1:] if remote_folder == "/" else remote_subfolder[len(remote_folder) + 1:]
if os.sep != '/':
suffix = suffix.replace('/', os.sep)
return os.path.join(d, suffix) if suffix != "" else d
normalized_folder = folder.strip()
if normalized_folder != "/" and normalized_folder.endswith("/"):
normalized_folder = normalized_folder[:-1]
if normalized_folder == "":
raise DXFileError("Invalid remote folder name: '{}'".format(folder))
normalized_dest_dir = os.path.normpath(destdir).strip()
if normalized_dest_dir == "":
raise DXFileError("Invalid destination directory name: '{}'".format(destdir))
# Creating target directory tree
remote_folders = list(list_subfolders(project, normalized_folder, recurse=True))
if len(remote_folders) <= 0:
raise DXFileError("Remote folder '{}' not found".format(normalized_folder))
remote_folders.sort()
for remote_subfolder in remote_folders:
ensure_local_dir(compose_local_dir(normalized_dest_dir, normalized_folder, remote_subfolder))
# Downloading files
describe_input = dict(fields=dict(folder=True,
name=True,
id=True,
parts=True,
size=True,
drive=True,
md5=True))
# A generator that returns the files one by one. We don't want to materialize it, because
# there could be many files here.
files_gen = dxpy.search.find_data_objects(classname='file', state='closed', project=project,
folder=normalized_folder, recurse=True, describe=describe_input)
if files_gen is None:
# In python 3, the generator can be None, and iterating on it
# will cause an error.
return
# Now it is safe, in both python 2 and 3, to iterate on the generator
for remote_file in files_gen:
local_filename = os.path.join(compose_local_dir(normalized_dest_dir,
normalized_folder,
remote_file['describe']['folder']),
remote_file['describe']['name'])
if os.path.exists(local_filename) and not overwrite:
raise DXFileError(
"Destination file '{}' already exists but no overwrite option is provided".format(local_filename)
)
logger.debug("Downloading '%s/%s' remote file to '%s' location",
("" if remote_file['describe']['folder'] == "/" else remote_file['describe']['folder']),
remote_file['describe']['name'],
local_filename)
download_dxfile(remote_file['describe']['id'],
local_filename,
chunksize=chunksize,
project=project,
show_progress=show_progress,
describe_output=remote_file['describe'],
**kwargs) | python | def download_folder(project, destdir, folder="/", overwrite=False, chunksize=dxfile.DEFAULT_BUFFER_SIZE,
show_progress=False, **kwargs):
'''
:param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
'''
def ensure_local_dir(d):
if not os.path.isdir(d):
if os.path.exists(d):
raise DXFileError("Destination location '{}' already exists and is not a directory".format(d))
logger.debug("Creating destination directory: '%s'", d)
os.makedirs(d)
def compose_local_dir(d, remote_folder, remote_subfolder):
suffix = remote_subfolder[1:] if remote_folder == "/" else remote_subfolder[len(remote_folder) + 1:]
if os.sep != '/':
suffix = suffix.replace('/', os.sep)
return os.path.join(d, suffix) if suffix != "" else d
normalized_folder = folder.strip()
if normalized_folder != "/" and normalized_folder.endswith("/"):
normalized_folder = normalized_folder[:-1]
if normalized_folder == "":
raise DXFileError("Invalid remote folder name: '{}'".format(folder))
normalized_dest_dir = os.path.normpath(destdir).strip()
if normalized_dest_dir == "":
raise DXFileError("Invalid destination directory name: '{}'".format(destdir))
# Creating target directory tree
remote_folders = list(list_subfolders(project, normalized_folder, recurse=True))
if len(remote_folders) <= 0:
raise DXFileError("Remote folder '{}' not found".format(normalized_folder))
remote_folders.sort()
for remote_subfolder in remote_folders:
ensure_local_dir(compose_local_dir(normalized_dest_dir, normalized_folder, remote_subfolder))
# Downloading files
describe_input = dict(fields=dict(folder=True,
name=True,
id=True,
parts=True,
size=True,
drive=True,
md5=True))
# A generator that returns the files one by one. We don't want to materialize it, because
# there could be many files here.
files_gen = dxpy.search.find_data_objects(classname='file', state='closed', project=project,
folder=normalized_folder, recurse=True, describe=describe_input)
if files_gen is None:
# In python 3, the generator can be None, and iterating on it
# will cause an error.
return
# Now it is safe, in both python 2 and 3, to iterate on the generator
for remote_file in files_gen:
local_filename = os.path.join(compose_local_dir(normalized_dest_dir,
normalized_folder,
remote_file['describe']['folder']),
remote_file['describe']['name'])
if os.path.exists(local_filename) and not overwrite:
raise DXFileError(
"Destination file '{}' already exists but no overwrite option is provided".format(local_filename)
)
logger.debug("Downloading '%s/%s' remote file to '%s' location",
("" if remote_file['describe']['folder'] == "/" else remote_file['describe']['folder']),
remote_file['describe']['name'],
local_filename)
download_dxfile(remote_file['describe']['id'],
local_filename,
chunksize=chunksize,
project=project,
show_progress=show_progress,
describe_output=remote_file['describe'],
**kwargs) | :param project: Project ID to use as context for this download.
:type project: string
:param destdir: Local destination location
:type destdir: string
:param folder: Path to the remote folder to download
:type folder: string
:param overwrite: Overwrite existing files
:type overwrite: boolean
Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*.
Example::
download_folder("project-xxxx", "/home/jsmith/input", folder="/input") | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L621-L708 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | build | def build(src_dir, parallel_build=True):
"""
Runs any build scripts that are found in the specified directory.
In particular, runs ``./configure`` if it exists, followed by ``make -jN``
if it exists (building with as many parallel tasks as there are CPUs on the
system).
"""
# TODO: use Gentoo or deb buildsystem
config_script = os.path.join(src_dir, "configure")
if os.path.isfile(config_script) and os.access(config_script, os.X_OK):
logger.debug("Running ./configure in {cwd}".format(cwd=os.path.abspath(src_dir)))
try:
subprocess.check_call([config_script])
except subprocess.CalledProcessError as e:
raise AppBuilderException("./configure in target directory failed with exit code %d" % (e.returncode,))
if os.path.isfile(os.path.join(src_dir, "Makefile")) \
or os.path.isfile(os.path.join(src_dir, "makefile")) \
or os.path.isfile(os.path.join(src_dir, "GNUmakefile")):
if parallel_build:
make_shortcmd = "make -j%d" % (NUM_CORES,)
else:
make_shortcmd = "make"
logger.debug("Building with {make} in {cwd}".format(make=make_shortcmd, cwd=os.path.abspath(src_dir)))
try:
make_cmd = ["make", "-C", src_dir]
if parallel_build:
make_cmd.append("-j" + str(NUM_CORES))
subprocess.check_call(make_cmd)
except subprocess.CalledProcessError as e:
raise AppBuilderException("%s in target directory failed with exit code %d" % (make_shortcmd, e.returncode)) | python | def build(src_dir, parallel_build=True):
"""
Runs any build scripts that are found in the specified directory.
In particular, runs ``./configure`` if it exists, followed by ``make -jN``
if it exists (building with as many parallel tasks as there are CPUs on the
system).
"""
# TODO: use Gentoo or deb buildsystem
config_script = os.path.join(src_dir, "configure")
if os.path.isfile(config_script) and os.access(config_script, os.X_OK):
logger.debug("Running ./configure in {cwd}".format(cwd=os.path.abspath(src_dir)))
try:
subprocess.check_call([config_script])
except subprocess.CalledProcessError as e:
raise AppBuilderException("./configure in target directory failed with exit code %d" % (e.returncode,))
if os.path.isfile(os.path.join(src_dir, "Makefile")) \
or os.path.isfile(os.path.join(src_dir, "makefile")) \
or os.path.isfile(os.path.join(src_dir, "GNUmakefile")):
if parallel_build:
make_shortcmd = "make -j%d" % (NUM_CORES,)
else:
make_shortcmd = "make"
logger.debug("Building with {make} in {cwd}".format(make=make_shortcmd, cwd=os.path.abspath(src_dir)))
try:
make_cmd = ["make", "-C", src_dir]
if parallel_build:
make_cmd.append("-j" + str(NUM_CORES))
subprocess.check_call(make_cmd)
except subprocess.CalledProcessError as e:
raise AppBuilderException("%s in target directory failed with exit code %d" % (make_shortcmd, e.returncode)) | Runs any build scripts that are found in the specified directory.
In particular, runs ``./configure`` if it exists, followed by ``make -jN``
if it exists (building with as many parallel tasks as there are CPUs on the
system). | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L93-L123 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | is_link_local | def is_link_local(link_target):
"""
:param link_target: The target of a symbolic link, as given by os.readlink()
:type link_target: string
:returns: A boolean indicating the link is local to the current directory.
This is defined to mean that os.path.isabs(link_target) == False
and the link NEVER references the parent directory, so
"./foo/../../curdir/foo" would return False.
:rtype: boolean
"""
is_local=(not os.path.isabs(link_target))
if is_local:
# make sure that the path NEVER extends outside the resources directory!
d,l = os.path.split(link_target)
link_parts = []
while l:
link_parts.append(l)
d,l = os.path.split(d)
curr_path = os.sep
for p in reversed(link_parts):
is_local = (is_local and not (curr_path == os.sep and p == os.pardir) )
curr_path = os.path.abspath(os.path.join(curr_path, p))
return is_local | python | def is_link_local(link_target):
"""
:param link_target: The target of a symbolic link, as given by os.readlink()
:type link_target: string
:returns: A boolean indicating the link is local to the current directory.
This is defined to mean that os.path.isabs(link_target) == False
and the link NEVER references the parent directory, so
"./foo/../../curdir/foo" would return False.
:rtype: boolean
"""
is_local=(not os.path.isabs(link_target))
if is_local:
# make sure that the path NEVER extends outside the resources directory!
d,l = os.path.split(link_target)
link_parts = []
while l:
link_parts.append(l)
d,l = os.path.split(d)
curr_path = os.sep
for p in reversed(link_parts):
is_local = (is_local and not (curr_path == os.sep and p == os.pardir) )
curr_path = os.path.abspath(os.path.join(curr_path, p))
return is_local | :param link_target: The target of a symbolic link, as given by os.readlink()
:type link_target: string
:returns: A boolean indicating the link is local to the current directory.
This is defined to mean that os.path.isabs(link_target) == False
and the link NEVER references the parent directory, so
"./foo/../../curdir/foo" would return False.
:rtype: boolean | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L140-L165 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | _fix_perms | def _fix_perms(perm_obj):
"""
:param perm_obj: A permissions object, as given by os.stat()
:type perm_obj: integer
:returns: A permissions object that is the result of "chmod a+rX" on the
given permission object. This is defined to be the permission object
bitwise or-ed with all stat.S_IR*, and if the stat.S_IXUSR bit is
set, then the permission object should also be returned bitwise or-ed
with stat.S_IX* (stat.S_IXUSR not included because it would be redundant).
:rtype: integer
"""
ret_perm = perm_obj | stat.S_IROTH | stat.S_IRGRP | stat.S_IRUSR
if ret_perm & stat.S_IXUSR:
ret_perm = ret_perm | stat.S_IXGRP | stat.S_IXOTH
return ret_perm | python | def _fix_perms(perm_obj):
"""
:param perm_obj: A permissions object, as given by os.stat()
:type perm_obj: integer
:returns: A permissions object that is the result of "chmod a+rX" on the
given permission object. This is defined to be the permission object
bitwise or-ed with all stat.S_IR*, and if the stat.S_IXUSR bit is
set, then the permission object should also be returned bitwise or-ed
with stat.S_IX* (stat.S_IXUSR not included because it would be redundant).
:rtype: integer
"""
ret_perm = perm_obj | stat.S_IROTH | stat.S_IRGRP | stat.S_IRUSR
if ret_perm & stat.S_IXUSR:
ret_perm = ret_perm | stat.S_IXGRP | stat.S_IXOTH
return ret_perm | :param perm_obj: A permissions object, as given by os.stat()
:type perm_obj: integer
:returns: A permissions object that is the result of "chmod a+rX" on the
given permission object. This is defined to be the permission object
bitwise or-ed with all stat.S_IR*, and if the stat.S_IXUSR bit is
set, then the permission object should also be returned bitwise or-ed
with stat.S_IX* (stat.S_IXUSR not included because it would be redundant).
:rtype: integer | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L167-L182 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | upload_resources | def upload_resources(src_dir, project=None, folder='/', ensure_upload=False, force_symlinks=False):
"""
:param ensure_upload: If True, will bypass checksum of resources directory
and upload resources bundle unconditionally;
will NOT be able to reuse this bundle in future builds.
Else if False, will compute checksum and upload bundle
if checksum is different from a previously uploaded
bundle's checksum.
:type ensure_upload: boolean
:param force_symlinks: If true, will bypass the attempt to dereference any
non-local symlinks and will unconditionally include
the link as-is. Note that this will almost certainly
result in a broken link within the resource directory
unless you really know what you're doing.
:type force_symlinks: boolean
:returns: A list (possibly empty) of references to the generated archive(s)
:rtype: list
If it exists, archives and uploads the contents of the
``resources/`` subdirectory of *src_dir* to a new remote file
object, and returns a list describing a single bundled dependency in
the form expected by the ``bundledDepends`` field of a run
specification. Returns an empty list, if no archive was created.
"""
applet_spec = _get_applet_spec(src_dir)
if project is None:
dest_project = applet_spec['project']
else:
dest_project = project
applet_spec['project'] = project
resources_dir = os.path.join(src_dir, "resources")
if os.path.exists(resources_dir) and len(os.listdir(resources_dir)) > 0:
target_folder = applet_spec['folder'] if 'folder' in applet_spec else folder
# While creating the resource bundle, optimistically look for a
# resource bundle with the same contents, and reuse it if possible.
# The resource bundle carries a property 'resource_bundle_checksum'
# that indicates the checksum; the way in which the checksum is
# computed is given below. If the checksum matches (and
# ensure_upload is False), then we will use the existing file,
# otherwise, we will compress and upload the tarball.
# The input to the SHA1 contains entries of the form (whitespace
# only included here for readability):
#
# / \0 MODE \0 MTIME \0
# /foo \0 MODE \0 MTIME \0
# ...
#
# where there is one entry for each directory or file (order is
# specified below), followed by a numeric representation of the
# mode, and the mtime in milliseconds since the epoch.
#
# Note when looking at a link, if the link is to be dereferenced,
# the mtime and mode used are that of the target (using os.stat())
# If the link is to be kept as a link, the mtime and mode are those
# of the link itself (using os.lstat())
with tempfile.NamedTemporaryFile(suffix=".tar") as tar_tmp_fh:
output_sha1 = hashlib.sha1()
tar_fh = tarfile.open(fileobj=tar_tmp_fh, mode='w')
for dirname, subdirs, files in os.walk(resources_dir):
if not dirname.startswith(resources_dir):
raise AssertionError('Expected %r to start with root directory %r' % (dirname, resources_dir))
# Add an entry for the directory itself
relative_dirname = dirname[len(resources_dir):]
dir_stat = os.lstat(dirname)
if not relative_dirname.startswith('/'):
relative_dirname = '/' + relative_dirname
fields = [relative_dirname, str(_fix_perms(dir_stat.st_mode)), str(int(dir_stat.st_mtime * 1000))]
output_sha1.update(b''.join(s.encode('utf-8') + b'\0' for s in fields))
# add an entry in the tar file for the current directory, but
# do not recurse!
tar_fh.add(dirname, arcname='.' + relative_dirname, recursive=False, filter=_fix_perm_filter)
# Canonicalize the order of subdirectories; this is the order in
# which they will be visited by os.walk
subdirs.sort()
# check the subdirectories for symlinks. We should throw an error
# if there are any links that point outside of the directory (unless
# --force-symlinks is given). If a link is pointing internal to
# the directory (or --force-symlinks is given), we should add it
# as a file.
for subdir_name in subdirs:
dir_path = os.path.join(dirname, subdir_name)
# If we do have a symlink,
if os.path.islink(dir_path):
# Let's get the pointed-to path to ensure that it is
# still in the directory
link_target = os.readlink(dir_path)
# If this is a local link, add it to the list of files (case 1)
# else raise an error
if force_symlinks or is_link_local(link_target):
files.append(subdir_name)
else:
raise AppBuilderException("Cannot include symlinks to directories outside of the resource directory. '%s' points to directory '%s'" % (dir_path, os.path.realpath(dir_path)))
# Canonicalize the order of files so that we compute the
# checksum in a consistent order
for filename in sorted(files):
deref_link = False
relative_filename = os.path.join(relative_dirname, filename)
true_filename = os.path.join(dirname, filename)
file_stat = os.lstat(true_filename)
# check for a link here, please!
if os.path.islink(true_filename):
# Get the pointed-to path
link_target = os.readlink(true_filename)
if not (force_symlinks or is_link_local(link_target)):
# if we are pointing outside of the directory, then:
# try to get the true stat of the file and make sure
# to dereference the link!
try:
file_stat = os.stat(os.path.join(dirname, link_target))
deref_link = True
except OSError:
# uh-oh! looks like we have a broken link!
# since this is guaranteed to cause problems (and
# we know we're not forcing symlinks here), we
# should throw an error
raise AppBuilderException("Broken symlink: Link '%s' points to '%s', which does not exist" % (true_filename, os.path.realpath(true_filename)) )
fields = [relative_filename, str(_fix_perms(file_stat.st_mode)), str(int(file_stat.st_mtime * 1000))]
output_sha1.update(b''.join(s.encode('utf-8') + b'\0' for s in fields))
# If we are to dereference, use the target fn
if deref_link:
true_filename = os.path.realpath(true_filename)
tar_fh.add(true_filename, arcname='.' + relative_filename, filter=_fix_perm_filter)
# end for filename in sorted(files)
# end for dirname, subdirs, files in os.walk(resources_dir):
# at this point, the tar is complete, so close the tar_fh
tar_fh.close()
# Optimistically look for a resource bundle with the same
# contents, and reuse it if possible. The resource bundle
# carries a property 'resource_bundle_checksum' that indicates
# the checksum; the way in which the checksum is computed is
# given in the documentation of _directory_checksum.
if ensure_upload:
properties_dict = {}
existing_resources = False
else:
directory_checksum = output_sha1.hexdigest()
properties_dict = dict(resource_bundle_checksum=directory_checksum)
existing_resources = dxpy.find_one_data_object(
project=dest_project,
folder=target_folder,
properties=dict(resource_bundle_checksum=directory_checksum),
visibility='either',
zero_ok=True,
state='closed',
return_handler=True
)
if existing_resources:
logger.info("Found existing resource bundle that matches local resources directory: " +
existing_resources.get_id())
dx_resource_archive = existing_resources
else:
logger.debug("Uploading in " + src_dir)
# We need to compress the tar that we've created
targz_fh = tempfile.NamedTemporaryFile(suffix=".tar.gz", delete=False)
# compress the file by reading the tar file and passing
# it though a GzipFile object, writing the given
# block size (by default 8192 bytes) at a time
targz_gzf = gzip.GzipFile(fileobj=targz_fh, mode='wb')
tar_tmp_fh.seek(0)
dat = tar_tmp_fh.read(io.DEFAULT_BUFFER_SIZE)
while dat:
targz_gzf.write(dat)
dat = tar_tmp_fh.read(io.DEFAULT_BUFFER_SIZE)
targz_gzf.flush()
targz_gzf.close()
targz_fh.close()
if 'folder' in applet_spec:
try:
dxpy.get_handler(dest_project).new_folder(applet_spec['folder'], parents=True)
except dxpy.exceptions.DXAPIError:
pass # TODO: make this better
dx_resource_archive = dxpy.upload_local_file(
targz_fh.name,
wait_on_close=True,
project=dest_project,
folder=target_folder,
hidden=True,
properties=properties_dict
)
os.unlink(targz_fh.name)
# end compressed file creation and upload
archive_link = dxpy.dxlink(dx_resource_archive.get_id())
# end tempfile.NamedTemporaryFile(suffix=".tar") as tar_fh
return [{'name': 'resources.tar.gz', 'id': archive_link}]
else:
return [] | python | def upload_resources(src_dir, project=None, folder='/', ensure_upload=False, force_symlinks=False):
"""
:param ensure_upload: If True, will bypass checksum of resources directory
and upload resources bundle unconditionally;
will NOT be able to reuse this bundle in future builds.
Else if False, will compute checksum and upload bundle
if checksum is different from a previously uploaded
bundle's checksum.
:type ensure_upload: boolean
:param force_symlinks: If true, will bypass the attempt to dereference any
non-local symlinks and will unconditionally include
the link as-is. Note that this will almost certainly
result in a broken link within the resource directory
unless you really know what you're doing.
:type force_symlinks: boolean
:returns: A list (possibly empty) of references to the generated archive(s)
:rtype: list
If it exists, archives and uploads the contents of the
``resources/`` subdirectory of *src_dir* to a new remote file
object, and returns a list describing a single bundled dependency in
the form expected by the ``bundledDepends`` field of a run
specification. Returns an empty list, if no archive was created.
"""
applet_spec = _get_applet_spec(src_dir)
if project is None:
dest_project = applet_spec['project']
else:
dest_project = project
applet_spec['project'] = project
resources_dir = os.path.join(src_dir, "resources")
if os.path.exists(resources_dir) and len(os.listdir(resources_dir)) > 0:
target_folder = applet_spec['folder'] if 'folder' in applet_spec else folder
# While creating the resource bundle, optimistically look for a
# resource bundle with the same contents, and reuse it if possible.
# The resource bundle carries a property 'resource_bundle_checksum'
# that indicates the checksum; the way in which the checksum is
# computed is given below. If the checksum matches (and
# ensure_upload is False), then we will use the existing file,
# otherwise, we will compress and upload the tarball.
# The input to the SHA1 contains entries of the form (whitespace
# only included here for readability):
#
# / \0 MODE \0 MTIME \0
# /foo \0 MODE \0 MTIME \0
# ...
#
# where there is one entry for each directory or file (order is
# specified below), followed by a numeric representation of the
# mode, and the mtime in milliseconds since the epoch.
#
# Note when looking at a link, if the link is to be dereferenced,
# the mtime and mode used are that of the target (using os.stat())
# If the link is to be kept as a link, the mtime and mode are those
# of the link itself (using os.lstat())
with tempfile.NamedTemporaryFile(suffix=".tar") as tar_tmp_fh:
output_sha1 = hashlib.sha1()
tar_fh = tarfile.open(fileobj=tar_tmp_fh, mode='w')
for dirname, subdirs, files in os.walk(resources_dir):
if not dirname.startswith(resources_dir):
raise AssertionError('Expected %r to start with root directory %r' % (dirname, resources_dir))
# Add an entry for the directory itself
relative_dirname = dirname[len(resources_dir):]
dir_stat = os.lstat(dirname)
if not relative_dirname.startswith('/'):
relative_dirname = '/' + relative_dirname
fields = [relative_dirname, str(_fix_perms(dir_stat.st_mode)), str(int(dir_stat.st_mtime * 1000))]
output_sha1.update(b''.join(s.encode('utf-8') + b'\0' for s in fields))
# add an entry in the tar file for the current directory, but
# do not recurse!
tar_fh.add(dirname, arcname='.' + relative_dirname, recursive=False, filter=_fix_perm_filter)
# Canonicalize the order of subdirectories; this is the order in
# which they will be visited by os.walk
subdirs.sort()
# check the subdirectories for symlinks. We should throw an error
# if there are any links that point outside of the directory (unless
# --force-symlinks is given). If a link is pointing internal to
# the directory (or --force-symlinks is given), we should add it
# as a file.
for subdir_name in subdirs:
dir_path = os.path.join(dirname, subdir_name)
# If we do have a symlink,
if os.path.islink(dir_path):
# Let's get the pointed-to path to ensure that it is
# still in the directory
link_target = os.readlink(dir_path)
# If this is a local link, add it to the list of files (case 1)
# else raise an error
if force_symlinks or is_link_local(link_target):
files.append(subdir_name)
else:
raise AppBuilderException("Cannot include symlinks to directories outside of the resource directory. '%s' points to directory '%s'" % (dir_path, os.path.realpath(dir_path)))
# Canonicalize the order of files so that we compute the
# checksum in a consistent order
for filename in sorted(files):
deref_link = False
relative_filename = os.path.join(relative_dirname, filename)
true_filename = os.path.join(dirname, filename)
file_stat = os.lstat(true_filename)
# check for a link here, please!
if os.path.islink(true_filename):
# Get the pointed-to path
link_target = os.readlink(true_filename)
if not (force_symlinks or is_link_local(link_target)):
# if we are pointing outside of the directory, then:
# try to get the true stat of the file and make sure
# to dereference the link!
try:
file_stat = os.stat(os.path.join(dirname, link_target))
deref_link = True
except OSError:
# uh-oh! looks like we have a broken link!
# since this is guaranteed to cause problems (and
# we know we're not forcing symlinks here), we
# should throw an error
raise AppBuilderException("Broken symlink: Link '%s' points to '%s', which does not exist" % (true_filename, os.path.realpath(true_filename)) )
fields = [relative_filename, str(_fix_perms(file_stat.st_mode)), str(int(file_stat.st_mtime * 1000))]
output_sha1.update(b''.join(s.encode('utf-8') + b'\0' for s in fields))
# If we are to dereference, use the target fn
if deref_link:
true_filename = os.path.realpath(true_filename)
tar_fh.add(true_filename, arcname='.' + relative_filename, filter=_fix_perm_filter)
# end for filename in sorted(files)
# end for dirname, subdirs, files in os.walk(resources_dir):
# at this point, the tar is complete, so close the tar_fh
tar_fh.close()
# Optimistically look for a resource bundle with the same
# contents, and reuse it if possible. The resource bundle
# carries a property 'resource_bundle_checksum' that indicates
# the checksum; the way in which the checksum is computed is
# given in the documentation of _directory_checksum.
if ensure_upload:
properties_dict = {}
existing_resources = False
else:
directory_checksum = output_sha1.hexdigest()
properties_dict = dict(resource_bundle_checksum=directory_checksum)
existing_resources = dxpy.find_one_data_object(
project=dest_project,
folder=target_folder,
properties=dict(resource_bundle_checksum=directory_checksum),
visibility='either',
zero_ok=True,
state='closed',
return_handler=True
)
if existing_resources:
logger.info("Found existing resource bundle that matches local resources directory: " +
existing_resources.get_id())
dx_resource_archive = existing_resources
else:
logger.debug("Uploading in " + src_dir)
# We need to compress the tar that we've created
targz_fh = tempfile.NamedTemporaryFile(suffix=".tar.gz", delete=False)
# compress the file by reading the tar file and passing
# it though a GzipFile object, writing the given
# block size (by default 8192 bytes) at a time
targz_gzf = gzip.GzipFile(fileobj=targz_fh, mode='wb')
tar_tmp_fh.seek(0)
dat = tar_tmp_fh.read(io.DEFAULT_BUFFER_SIZE)
while dat:
targz_gzf.write(dat)
dat = tar_tmp_fh.read(io.DEFAULT_BUFFER_SIZE)
targz_gzf.flush()
targz_gzf.close()
targz_fh.close()
if 'folder' in applet_spec:
try:
dxpy.get_handler(dest_project).new_folder(applet_spec['folder'], parents=True)
except dxpy.exceptions.DXAPIError:
pass # TODO: make this better
dx_resource_archive = dxpy.upload_local_file(
targz_fh.name,
wait_on_close=True,
project=dest_project,
folder=target_folder,
hidden=True,
properties=properties_dict
)
os.unlink(targz_fh.name)
# end compressed file creation and upload
archive_link = dxpy.dxlink(dx_resource_archive.get_id())
# end tempfile.NamedTemporaryFile(suffix=".tar") as tar_fh
return [{'name': 'resources.tar.gz', 'id': archive_link}]
else:
return [] | :param ensure_upload: If True, will bypass checksum of resources directory
and upload resources bundle unconditionally;
will NOT be able to reuse this bundle in future builds.
Else if False, will compute checksum and upload bundle
if checksum is different from a previously uploaded
bundle's checksum.
:type ensure_upload: boolean
:param force_symlinks: If true, will bypass the attempt to dereference any
non-local symlinks and will unconditionally include
the link as-is. Note that this will almost certainly
result in a broken link within the resource directory
unless you really know what you're doing.
:type force_symlinks: boolean
:returns: A list (possibly empty) of references to the generated archive(s)
:rtype: list
If it exists, archives and uploads the contents of the
``resources/`` subdirectory of *src_dir* to a new remote file
object, and returns a list describing a single bundled dependency in
the form expected by the ``bundledDepends`` field of a run
specification. Returns an empty list, if no archive was created. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L195-L424 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | upload_applet | def upload_applet(src_dir, uploaded_resources, check_name_collisions=True, overwrite=False, archive=False, project=None, override_folder=None, override_name=None, dx_toolkit_autodep="stable", dry_run=False, **kwargs):
"""
Creates a new applet object.
:param project: ID of container in which to create the applet.
:type project: str, or None to use whatever is specified in dxapp.json
:param override_folder: folder name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_folder: str
:param override_name: name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_name: str
:param dx_toolkit_autodep: What type of dx-toolkit dependency to
inject if none is present. "stable" for the APT package; "git"
for HEAD of dx-toolkit master branch; or False for no
dependency.
:type dx_toolkit_autodep: boolean or string
"""
applet_spec = _get_applet_spec(src_dir)
if project is None:
dest_project = applet_spec['project']
else:
dest_project = project
applet_spec['project'] = project
if 'name' not in applet_spec:
try:
applet_spec['name'] = os.path.basename(os.path.abspath(src_dir))
except:
raise AppBuilderException("Could not determine applet name from the specification (dxapp.json) or from the name of the working directory (%r)" % (src_dir,))
if override_folder:
applet_spec['folder'] = override_folder
if 'folder' not in applet_spec:
applet_spec['folder'] = '/'
if override_name:
applet_spec['name'] = override_name
if 'dxapi' not in applet_spec:
applet_spec['dxapi'] = dxpy.API_VERSION
applets_to_overwrite = []
archived_applet = None
if check_name_collisions and not dry_run:
destination_path = applet_spec['folder'] + ('/' if not applet_spec['folder'].endswith('/') else '') + applet_spec['name']
logger.debug("Checking for existing applet at " + destination_path)
for result in dxpy.find_data_objects(classname="applet", name=applet_spec["name"], folder=applet_spec['folder'], project=dest_project, recurse=False):
if overwrite:
# Don't remove the old applet until after the new one
# has been created. This avoids a race condition where
# we remove the old applet, but that causes garbage
# collection of the bundled resources that will be
# shared with the new applet
applets_to_overwrite.append(result['id'])
elif archive:
logger.debug("Archiving applet %s" % (result['id']))
proj = dxpy.DXProject(dest_project)
archive_folder = '/.Applet_archive'
try:
proj.list_folder(archive_folder)
except dxpy.DXAPIError:
proj.new_folder(archive_folder)
proj.move(objects=[result['id']], destination=archive_folder)
archived_applet = dxpy.DXApplet(result['id'], project=dest_project)
now = datetime.datetime.fromtimestamp(archived_applet.created/1000).ctime()
new_name = archived_applet.name + " ({d})".format(d=now)
archived_applet.rename(new_name)
logger.info("Archived applet %s to %s:\"%s/%s\"" % (result['id'], dest_project, archive_folder, new_name))
else:
raise AppBuilderException("An applet already exists at %s (id %s) and the --overwrite (-f) or --archive (-a) options were not given" % (destination_path, result['id']))
# -----
# Override various fields from the pristine dxapp.json
# Carry region-specific values from regionalOptions into the main
# runSpec
applet_spec["runSpec"].setdefault("bundledDepends", [])
applet_spec["runSpec"].setdefault("assetDepends", [])
if not dry_run:
region = dxpy.api.project_describe(dest_project, input_params={"fields": {"region": True}})["region"]
# if regionalOptions contain at least one region, they must include
# the region of the target project
if len(applet_spec.get('regionalOptions', {})) != 0 and region not in applet_spec.get('regionalOptions', {}):
err_mesg = "destination project is in region {} but \"regionalOptions\" do not contain this region. ".format(region)
err_mesg += "Please, update your \"regionalOptions\" specification"
raise AppBuilderException(err_mesg)
regional_options = applet_spec.get('regionalOptions', {}).get(region, {})
# We checked earlier that if region-specific values for the
# fields below are given, the same fields are not also specified
# in the top-level runSpec. So the operations below should not
# result in any user-supplied settings being clobbered.
if 'systemRequirements' in regional_options:
applet_spec["runSpec"]["systemRequirements"] = regional_options['systemRequirements']
if 'bundledDepends' in regional_options:
applet_spec["runSpec"]["bundledDepends"].extend(regional_options["bundledDepends"])
if 'assetDepends' in regional_options:
applet_spec["runSpec"]["assetDepends"].extend(regional_options["assetDepends"])
# Inline Readme.md and Readme.developer.md
dxpy.executable_builder.inline_documentation_files(applet_spec, src_dir)
# Inline the code of the program
if "file" in applet_spec["runSpec"]:
# Put it into runSpec.code instead
with open(os.path.join(src_dir, applet_spec["runSpec"]["file"])) as code_fh:
applet_spec["runSpec"]["code"] = code_fh.read()
del applet_spec["runSpec"]["file"]
# If this is applet requires a cluster, inline any bootstrapScript code that may be provided.
# bootstrapScript is an *optional* clusterSpec parameter.
# NOTE: assumes bootstrapScript is always provided as a filename
if "systemRequirements" in applet_spec["runSpec"]:
sys_reqs = applet_spec["runSpec"]["systemRequirements"]
for entry_point in sys_reqs:
try:
bootstrap_script = os.path.join(src_dir, sys_reqs[entry_point]["clusterSpec"]["bootstrapScript"])
with open(bootstrap_script) as code_fh:
sys_reqs[entry_point]["clusterSpec"]["bootstrapScript"] = code_fh.read()
except KeyError:
# either no "clusterSpec" or no "bootstrapScript" within "clusterSpec"
continue
except IOError:
raise AppBuilderException("The clusterSpec \"bootstrapScript\" could not be read.")
# Attach bundled resources to the app
if uploaded_resources is not None:
applet_spec["runSpec"]["bundledDepends"].extend(uploaded_resources)
# Validate and process assetDepends
asset_depends = applet_spec["runSpec"]["assetDepends"]
if type(asset_depends) is not list or any(type(dep) is not dict for dep in asset_depends):
raise AppBuilderException("Expected runSpec.assetDepends to be an array of objects")
for asset in asset_depends:
asset_project = asset.get("project", None)
asset_folder = asset.get("folder", '/')
asset_stages = asset.get("stages", None)
if "id" in asset:
asset_record = dxpy.DXRecord(asset["id"]).describe(fields={'details'}, default_fields=True)
elif "name" in asset and asset_project is not None and "version" in asset:
try:
asset_record = dxpy.find_one_data_object(zero_ok=True, classname="record", typename="AssetBundle",
name=asset["name"], properties=dict(version=asset["version"]),
project=asset_project, folder=asset_folder, recurse=False,
describe={"defaultFields": True, "fields": {"details": True}},
state="closed", more_ok=False)
except dxpy.exceptions.DXSearchError:
msg = "Found more than one asset record that matches: name={0}, folder={1} in project={2}."
raise AppBuilderException(msg.format(asset["name"], asset_folder, asset_project))
else:
raise AppBuilderException("Each runSpec.assetDepends element must have either {'id'} or "
"{'name', 'project' and 'version'} field(s).")
if asset_record:
if "id" in asset:
asset_details = asset_record["details"]
else:
asset_details = asset_record["describe"]["details"]
if "archiveFileId" in asset_details:
archive_file_id = asset_details["archiveFileId"]
else:
raise AppBuilderException("The required field 'archiveFileId' was not found in "
"the details of the asset bundle %s " % asset_record["id"])
archive_file_name = dxpy.DXFile(archive_file_id).describe()["name"]
bundle_depends = {
"name": archive_file_name,
"id": archive_file_id
}
if asset_stages:
bundle_depends["stages"] = asset_stages
applet_spec["runSpec"]["bundledDepends"].append(bundle_depends)
# If the file is not found in the applet destination project, clone it from the asset project
if (not dry_run and
dxpy.DXRecord(dxid=asset_record["id"], project=dest_project).describe()["project"] != dest_project):
dxpy.DXRecord(asset_record["id"], project=asset_record["project"]).clone(dest_project)
else:
raise AppBuilderException("No asset bundle was found that matched the specification %s"
% (json.dumps(asset)))
# Include the DNAnexus client libraries as an execution dependency, if they are not already
# there
if dx_toolkit_autodep == "git":
dx_toolkit_dep = {"name": "dx-toolkit",
"package_manager": "git",
"url": "git://github.com/dnanexus/dx-toolkit.git",
"tag": "master",
"build_commands": "make install DESTDIR=/ PREFIX=/opt/dnanexus"}
elif dx_toolkit_autodep == "stable":
dx_toolkit_dep = {"name": "dx-toolkit", "package_manager": "apt"}
elif dx_toolkit_autodep:
raise AppBuilderException("dx_toolkit_autodep must be one of 'stable', 'git', or False; got %r instead" % (dx_toolkit_autodep,))
if dx_toolkit_autodep:
applet_spec["runSpec"].setdefault("execDepends", [])
exec_depends = applet_spec["runSpec"]["execDepends"]
if type(exec_depends) is not list or any(type(dep) is not dict for dep in exec_depends):
raise AppBuilderException("Expected runSpec.execDepends to be an array of objects")
dx_toolkit_dep_found = any(dep.get('name') in DX_TOOLKIT_PKGS or dep.get('url') in DX_TOOLKIT_GIT_URLS for dep in exec_depends)
if not dx_toolkit_dep_found:
exec_depends.append(dx_toolkit_dep)
if dx_toolkit_autodep == "git":
applet_spec.setdefault("access", {})
applet_spec["access"].setdefault("network", [])
# Note: this can be set to "github.com" instead of "*" if the build doesn't download any deps
if "*" not in applet_spec["access"]["network"]:
applet_spec["access"]["network"].append("*")
merge(applet_spec, kwargs)
# -----
# Now actually create the applet
if dry_run:
print("Would create the following applet:")
print(json.dumps(applet_spec, indent=2))
print("*** DRY-RUN-- no applet was created ***")
return None, None
if applet_spec.get("categories", []):
if "tags" not in applet_spec:
applet_spec["tags"] = []
applet_spec["tags"] = list(set(applet_spec["tags"]) | set(applet_spec["categories"]))
applet_id = dxpy.api.applet_new(applet_spec)["id"]
if archived_applet:
archived_applet.set_properties({'replacedWith': applet_id})
# Now it is permissible to delete the old applet(s), if any
if applets_to_overwrite:
logger.info("Deleting applet(s) %s" % (','.join(applets_to_overwrite)))
dxpy.DXProject(dest_project).remove_objects(applets_to_overwrite)
return applet_id, applet_spec | python | def upload_applet(src_dir, uploaded_resources, check_name_collisions=True, overwrite=False, archive=False, project=None, override_folder=None, override_name=None, dx_toolkit_autodep="stable", dry_run=False, **kwargs):
"""
Creates a new applet object.
:param project: ID of container in which to create the applet.
:type project: str, or None to use whatever is specified in dxapp.json
:param override_folder: folder name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_folder: str
:param override_name: name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_name: str
:param dx_toolkit_autodep: What type of dx-toolkit dependency to
inject if none is present. "stable" for the APT package; "git"
for HEAD of dx-toolkit master branch; or False for no
dependency.
:type dx_toolkit_autodep: boolean or string
"""
applet_spec = _get_applet_spec(src_dir)
if project is None:
dest_project = applet_spec['project']
else:
dest_project = project
applet_spec['project'] = project
if 'name' not in applet_spec:
try:
applet_spec['name'] = os.path.basename(os.path.abspath(src_dir))
except:
raise AppBuilderException("Could not determine applet name from the specification (dxapp.json) or from the name of the working directory (%r)" % (src_dir,))
if override_folder:
applet_spec['folder'] = override_folder
if 'folder' not in applet_spec:
applet_spec['folder'] = '/'
if override_name:
applet_spec['name'] = override_name
if 'dxapi' not in applet_spec:
applet_spec['dxapi'] = dxpy.API_VERSION
applets_to_overwrite = []
archived_applet = None
if check_name_collisions and not dry_run:
destination_path = applet_spec['folder'] + ('/' if not applet_spec['folder'].endswith('/') else '') + applet_spec['name']
logger.debug("Checking for existing applet at " + destination_path)
for result in dxpy.find_data_objects(classname="applet", name=applet_spec["name"], folder=applet_spec['folder'], project=dest_project, recurse=False):
if overwrite:
# Don't remove the old applet until after the new one
# has been created. This avoids a race condition where
# we remove the old applet, but that causes garbage
# collection of the bundled resources that will be
# shared with the new applet
applets_to_overwrite.append(result['id'])
elif archive:
logger.debug("Archiving applet %s" % (result['id']))
proj = dxpy.DXProject(dest_project)
archive_folder = '/.Applet_archive'
try:
proj.list_folder(archive_folder)
except dxpy.DXAPIError:
proj.new_folder(archive_folder)
proj.move(objects=[result['id']], destination=archive_folder)
archived_applet = dxpy.DXApplet(result['id'], project=dest_project)
now = datetime.datetime.fromtimestamp(archived_applet.created/1000).ctime()
new_name = archived_applet.name + " ({d})".format(d=now)
archived_applet.rename(new_name)
logger.info("Archived applet %s to %s:\"%s/%s\"" % (result['id'], dest_project, archive_folder, new_name))
else:
raise AppBuilderException("An applet already exists at %s (id %s) and the --overwrite (-f) or --archive (-a) options were not given" % (destination_path, result['id']))
# -----
# Override various fields from the pristine dxapp.json
# Carry region-specific values from regionalOptions into the main
# runSpec
applet_spec["runSpec"].setdefault("bundledDepends", [])
applet_spec["runSpec"].setdefault("assetDepends", [])
if not dry_run:
region = dxpy.api.project_describe(dest_project, input_params={"fields": {"region": True}})["region"]
# if regionalOptions contain at least one region, they must include
# the region of the target project
if len(applet_spec.get('regionalOptions', {})) != 0 and region not in applet_spec.get('regionalOptions', {}):
err_mesg = "destination project is in region {} but \"regionalOptions\" do not contain this region. ".format(region)
err_mesg += "Please, update your \"regionalOptions\" specification"
raise AppBuilderException(err_mesg)
regional_options = applet_spec.get('regionalOptions', {}).get(region, {})
# We checked earlier that if region-specific values for the
# fields below are given, the same fields are not also specified
# in the top-level runSpec. So the operations below should not
# result in any user-supplied settings being clobbered.
if 'systemRequirements' in regional_options:
applet_spec["runSpec"]["systemRequirements"] = regional_options['systemRequirements']
if 'bundledDepends' in regional_options:
applet_spec["runSpec"]["bundledDepends"].extend(regional_options["bundledDepends"])
if 'assetDepends' in regional_options:
applet_spec["runSpec"]["assetDepends"].extend(regional_options["assetDepends"])
# Inline Readme.md and Readme.developer.md
dxpy.executable_builder.inline_documentation_files(applet_spec, src_dir)
# Inline the code of the program
if "file" in applet_spec["runSpec"]:
# Put it into runSpec.code instead
with open(os.path.join(src_dir, applet_spec["runSpec"]["file"])) as code_fh:
applet_spec["runSpec"]["code"] = code_fh.read()
del applet_spec["runSpec"]["file"]
# If this is applet requires a cluster, inline any bootstrapScript code that may be provided.
# bootstrapScript is an *optional* clusterSpec parameter.
# NOTE: assumes bootstrapScript is always provided as a filename
if "systemRequirements" in applet_spec["runSpec"]:
sys_reqs = applet_spec["runSpec"]["systemRequirements"]
for entry_point in sys_reqs:
try:
bootstrap_script = os.path.join(src_dir, sys_reqs[entry_point]["clusterSpec"]["bootstrapScript"])
with open(bootstrap_script) as code_fh:
sys_reqs[entry_point]["clusterSpec"]["bootstrapScript"] = code_fh.read()
except KeyError:
# either no "clusterSpec" or no "bootstrapScript" within "clusterSpec"
continue
except IOError:
raise AppBuilderException("The clusterSpec \"bootstrapScript\" could not be read.")
# Attach bundled resources to the app
if uploaded_resources is not None:
applet_spec["runSpec"]["bundledDepends"].extend(uploaded_resources)
# Validate and process assetDepends
asset_depends = applet_spec["runSpec"]["assetDepends"]
if type(asset_depends) is not list or any(type(dep) is not dict for dep in asset_depends):
raise AppBuilderException("Expected runSpec.assetDepends to be an array of objects")
for asset in asset_depends:
asset_project = asset.get("project", None)
asset_folder = asset.get("folder", '/')
asset_stages = asset.get("stages", None)
if "id" in asset:
asset_record = dxpy.DXRecord(asset["id"]).describe(fields={'details'}, default_fields=True)
elif "name" in asset and asset_project is not None and "version" in asset:
try:
asset_record = dxpy.find_one_data_object(zero_ok=True, classname="record", typename="AssetBundle",
name=asset["name"], properties=dict(version=asset["version"]),
project=asset_project, folder=asset_folder, recurse=False,
describe={"defaultFields": True, "fields": {"details": True}},
state="closed", more_ok=False)
except dxpy.exceptions.DXSearchError:
msg = "Found more than one asset record that matches: name={0}, folder={1} in project={2}."
raise AppBuilderException(msg.format(asset["name"], asset_folder, asset_project))
else:
raise AppBuilderException("Each runSpec.assetDepends element must have either {'id'} or "
"{'name', 'project' and 'version'} field(s).")
if asset_record:
if "id" in asset:
asset_details = asset_record["details"]
else:
asset_details = asset_record["describe"]["details"]
if "archiveFileId" in asset_details:
archive_file_id = asset_details["archiveFileId"]
else:
raise AppBuilderException("The required field 'archiveFileId' was not found in "
"the details of the asset bundle %s " % asset_record["id"])
archive_file_name = dxpy.DXFile(archive_file_id).describe()["name"]
bundle_depends = {
"name": archive_file_name,
"id": archive_file_id
}
if asset_stages:
bundle_depends["stages"] = asset_stages
applet_spec["runSpec"]["bundledDepends"].append(bundle_depends)
# If the file is not found in the applet destination project, clone it from the asset project
if (not dry_run and
dxpy.DXRecord(dxid=asset_record["id"], project=dest_project).describe()["project"] != dest_project):
dxpy.DXRecord(asset_record["id"], project=asset_record["project"]).clone(dest_project)
else:
raise AppBuilderException("No asset bundle was found that matched the specification %s"
% (json.dumps(asset)))
# Include the DNAnexus client libraries as an execution dependency, if they are not already
# there
if dx_toolkit_autodep == "git":
dx_toolkit_dep = {"name": "dx-toolkit",
"package_manager": "git",
"url": "git://github.com/dnanexus/dx-toolkit.git",
"tag": "master",
"build_commands": "make install DESTDIR=/ PREFIX=/opt/dnanexus"}
elif dx_toolkit_autodep == "stable":
dx_toolkit_dep = {"name": "dx-toolkit", "package_manager": "apt"}
elif dx_toolkit_autodep:
raise AppBuilderException("dx_toolkit_autodep must be one of 'stable', 'git', or False; got %r instead" % (dx_toolkit_autodep,))
if dx_toolkit_autodep:
applet_spec["runSpec"].setdefault("execDepends", [])
exec_depends = applet_spec["runSpec"]["execDepends"]
if type(exec_depends) is not list or any(type(dep) is not dict for dep in exec_depends):
raise AppBuilderException("Expected runSpec.execDepends to be an array of objects")
dx_toolkit_dep_found = any(dep.get('name') in DX_TOOLKIT_PKGS or dep.get('url') in DX_TOOLKIT_GIT_URLS for dep in exec_depends)
if not dx_toolkit_dep_found:
exec_depends.append(dx_toolkit_dep)
if dx_toolkit_autodep == "git":
applet_spec.setdefault("access", {})
applet_spec["access"].setdefault("network", [])
# Note: this can be set to "github.com" instead of "*" if the build doesn't download any deps
if "*" not in applet_spec["access"]["network"]:
applet_spec["access"]["network"].append("*")
merge(applet_spec, kwargs)
# -----
# Now actually create the applet
if dry_run:
print("Would create the following applet:")
print(json.dumps(applet_spec, indent=2))
print("*** DRY-RUN-- no applet was created ***")
return None, None
if applet_spec.get("categories", []):
if "tags" not in applet_spec:
applet_spec["tags"] = []
applet_spec["tags"] = list(set(applet_spec["tags"]) | set(applet_spec["categories"]))
applet_id = dxpy.api.applet_new(applet_spec)["id"]
if archived_applet:
archived_applet.set_properties({'replacedWith': applet_id})
# Now it is permissible to delete the old applet(s), if any
if applets_to_overwrite:
logger.info("Deleting applet(s) %s" % (','.join(applets_to_overwrite)))
dxpy.DXProject(dest_project).remove_objects(applets_to_overwrite)
return applet_id, applet_spec | Creates a new applet object.
:param project: ID of container in which to create the applet.
:type project: str, or None to use whatever is specified in dxapp.json
:param override_folder: folder name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_folder: str
:param override_name: name for the resulting applet which, if specified, overrides that given in dxapp.json
:type override_name: str
:param dx_toolkit_autodep: What type of dx-toolkit dependency to
inject if none is present. "stable" for the APT package; "git"
for HEAD of dx-toolkit master branch; or False for no
dependency.
:type dx_toolkit_autodep: boolean or string | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L427-L666 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | _create_or_update_version | def _create_or_update_version(app_name, version, app_spec, try_update=True):
"""
Creates a new version of the app. Returns an app_id, or None if the app has
already been created and published.
"""
# This has a race condition since the app could have been created or
# published since we last looked.
try:
app_id = dxpy.api.app_new(app_spec)["id"]
return app_id
except dxpy.exceptions.DXAPIError as e:
# TODO: detect this error more reliably
if e.name == 'InvalidInput' and e.msg == 'Specified name and version conflict with an existing alias':
print('App %s/%s already exists' % (app_spec["name"], version), file=sys.stderr)
# The version number was already taken, so app/new doesn't work.
# However, maybe it hasn't been published yet, so we might be able
# to app-xxxx/update it.
app_describe = dxpy.api.app_describe("app-" + app_name, alias=version)
if app_describe.get("published", 0) > 0:
return None
return _update_version(app_name, version, app_spec, try_update=try_update)
raise e | python | def _create_or_update_version(app_name, version, app_spec, try_update=True):
"""
Creates a new version of the app. Returns an app_id, or None if the app has
already been created and published.
"""
# This has a race condition since the app could have been created or
# published since we last looked.
try:
app_id = dxpy.api.app_new(app_spec)["id"]
return app_id
except dxpy.exceptions.DXAPIError as e:
# TODO: detect this error more reliably
if e.name == 'InvalidInput' and e.msg == 'Specified name and version conflict with an existing alias':
print('App %s/%s already exists' % (app_spec["name"], version), file=sys.stderr)
# The version number was already taken, so app/new doesn't work.
# However, maybe it hasn't been published yet, so we might be able
# to app-xxxx/update it.
app_describe = dxpy.api.app_describe("app-" + app_name, alias=version)
if app_describe.get("published", 0) > 0:
return None
return _update_version(app_name, version, app_spec, try_update=try_update)
raise e | Creates a new version of the app. Returns an app_id, or None if the app has
already been created and published. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L669-L690 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | _update_version | def _update_version(app_name, version, app_spec, try_update=True):
"""
Updates a version of the app in place. Returns an app_id, or None if the
app has already been published.
"""
if not try_update:
return None
try:
app_id = dxpy.api.app_update("app-" + app_name, version, app_spec)["id"]
return app_id
except dxpy.exceptions.DXAPIError as e:
if e.name == 'InvalidState':
print('App %s/%s has already been published' % (app_spec["name"], version), file=sys.stderr)
return None
raise e | python | def _update_version(app_name, version, app_spec, try_update=True):
"""
Updates a version of the app in place. Returns an app_id, or None if the
app has already been published.
"""
if not try_update:
return None
try:
app_id = dxpy.api.app_update("app-" + app_name, version, app_spec)["id"]
return app_id
except dxpy.exceptions.DXAPIError as e:
if e.name == 'InvalidState':
print('App %s/%s has already been published' % (app_spec["name"], version), file=sys.stderr)
return None
raise e | Updates a version of the app in place. Returns an app_id, or None if the
app has already been published. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L692-L706 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | create_app_multi_region | def create_app_multi_region(regional_options, app_name, src_dir, publish=False, set_default=False, billTo=None,
try_versions=None, try_update=True, confirm=True):
"""
Creates a new app object from the specified applet(s).
:param regional_options: Region-specific options for the app. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method:-/app/new
for details; this should contain keys for each region the app is
to be enabled in, and for the values, a dict containing (at
minimum) a key "applet" whose value is an applet ID for that
region.
:type regional_options: dict
"""
return _create_app(dict(regionalOptions=regional_options), app_name, src_dir, publish=publish,
set_default=set_default, billTo=billTo, try_versions=try_versions, try_update=try_update,
confirm=confirm) | python | def create_app_multi_region(regional_options, app_name, src_dir, publish=False, set_default=False, billTo=None,
try_versions=None, try_update=True, confirm=True):
"""
Creates a new app object from the specified applet(s).
:param regional_options: Region-specific options for the app. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method:-/app/new
for details; this should contain keys for each region the app is
to be enabled in, and for the values, a dict containing (at
minimum) a key "applet" whose value is an applet ID for that
region.
:type regional_options: dict
"""
return _create_app(dict(regionalOptions=regional_options), app_name, src_dir, publish=publish,
set_default=set_default, billTo=billTo, try_versions=try_versions, try_update=try_update,
confirm=confirm) | Creates a new app object from the specified applet(s).
:param regional_options: Region-specific options for the app. See
https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method:-/app/new
for details; this should contain keys for each region the app is
to be enabled in, and for the values, a dict containing (at
minimum) a key "applet" whose value is an applet ID for that
region.
:type regional_options: dict | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L709-L724 |
dnanexus/dx-toolkit | src/python/dxpy/app_builder.py | create_app | def create_app(applet_id, applet_name, src_dir, publish=False, set_default=False, billTo=None, try_versions=None,
try_update=True, confirm=True, regional_options=None):
"""
Creates a new app object from the specified applet.
.. deprecated:: 0.204.0
Use :func:`create_app_multi_region()` instead.
"""
# In this case we don't know the region of the applet, so we use the
# legacy API {"applet": applet_id} without specifying a region
# specifically.
return _create_app(dict(applet=applet_id), applet_name, src_dir, publish=publish, set_default=set_default,
billTo=billTo, try_versions=try_versions, try_update=try_update, confirm=confirm) | python | def create_app(applet_id, applet_name, src_dir, publish=False, set_default=False, billTo=None, try_versions=None,
try_update=True, confirm=True, regional_options=None):
"""
Creates a new app object from the specified applet.
.. deprecated:: 0.204.0
Use :func:`create_app_multi_region()` instead.
"""
# In this case we don't know the region of the applet, so we use the
# legacy API {"applet": applet_id} without specifying a region
# specifically.
return _create_app(dict(applet=applet_id), applet_name, src_dir, publish=publish, set_default=set_default,
billTo=billTo, try_versions=try_versions, try_update=try_update, confirm=confirm) | Creates a new app object from the specified applet.
.. deprecated:: 0.204.0
Use :func:`create_app_multi_region()` instead. | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/app_builder.py#L727-L740 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.