Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def _with_env(self, env): res = self._browse(env, self._ids) return res
[ "As the `with_env` class method but for recordset." ]
Please provide a description of the function:def _init_values(self, context=None): if context is None: context = self.env.context # Get basic fields (no relational ones) basic_fields = [] for field_name in self._columns: field = self._columns[field_name] if not getattr(field, 'relation', False): basic_fields.append(field_name) # Fetch values from the server if self.ids: rows = self.__class__.read( self.ids, basic_fields, context=context, load='_classic_write') ids_fetched = set() for row in rows: ids_fetched.add(row['id']) for field_name in row: if field_name == 'id': continue self._values[field_name][row['id']] = row[field_name] ids_in_error = set(self.ids) - ids_fetched if ids_in_error: raise ValueError( "There is no '{model}' record with IDs {ids}.".format( model=self._name, ids=list(ids_in_error))) # No ID: fields filled with default values else: default_get = self.__class__.default_get( list(self._columns), context=context) for field_name in self._columns: self._values[field_name][None] = default_get.get( field_name, False)
[ "Retrieve field values from the server.\n May be used to restore the original values in the purpose to cancel\n all changes made.\n " ]
Please provide a description of the function:def from_wei(number: int, unit: str) -> Union[int, decimal.Decimal]: if unit.lower() not in units: raise ValueError( "Unknown unit. Must be one of {0}".format("/".join(units.keys())) ) if number == 0: return 0 if number < MIN_WEI or number > MAX_WEI: raise ValueError("value must be between 1 and 2**256 - 1") unit_value = units[unit.lower()] with localcontext() as ctx: ctx.prec = 999 d_number = decimal.Decimal(value=number, context=ctx) result_value = d_number / unit_value return result_value
[ "\n Takes a number of wei and converts it to any other ether unit.\n " ]
Please provide a description of the function:def to_wei(number: int, unit: str) -> int: if unit.lower() not in units: raise ValueError( "Unknown unit. Must be one of {0}".format("/".join(units.keys())) ) if is_integer(number) or is_string(number): d_number = decimal.Decimal(value=number) elif isinstance(number, float): d_number = decimal.Decimal(value=str(number)) elif isinstance(number, decimal.Decimal): d_number = number else: raise TypeError("Unsupported type. Must be one of integer, float, or string") s_number = str(number) unit_value = units[unit.lower()] if d_number == 0: return 0 if d_number < 1 and "." in s_number: with localcontext() as ctx: multiplier = len(s_number) - s_number.index(".") - 1 ctx.prec = multiplier d_number = decimal.Decimal(value=number, context=ctx) * 10 ** multiplier unit_value /= 10 ** multiplier with localcontext() as ctx: ctx.prec = 999 result_value = decimal.Decimal(value=d_number, context=ctx) * unit_value if result_value < MIN_WEI or result_value > MAX_WEI: raise ValueError("Resulting wei value must be between 1 and 2**256 - 1") return int(result_value)
[ "\n Takes a number of a unit and converts it to wei.\n " ]
Please provide a description of the function:def to_hex( primitive: Primitives = None, hexstr: HexStr = None, text: str = None ) -> HexStr: if hexstr is not None: return HexStr(add_0x_prefix(hexstr.lower())) if text is not None: return HexStr(encode_hex(text.encode("utf-8"))) if is_boolean(primitive): return HexStr("0x1") if primitive else HexStr("0x0") if isinstance(primitive, (bytes, bytearray)): return HexStr(encode_hex(primitive)) elif is_string(primitive): raise TypeError( "Unsupported type: The primitive argument must be one of: bytes," "bytearray, int or bool and not str" ) if is_integer(primitive): return HexStr(hex(cast(int, primitive))) raise TypeError( "Unsupported type: '{0}'. Must be one of: bool, str, bytes, bytearray" "or int.".format(repr(type(primitive))) )
[ "\n Auto converts any supported value into its hex representation.\n Trims leading zeros, as defined in:\n https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding\n " ]
Please provide a description of the function:def to_int( primitive: Primitives = None, hexstr: HexStr = None, text: str = None ) -> int: if hexstr is not None: return int(hexstr, 16) elif text is not None: return int(text) elif isinstance(primitive, (bytes, bytearray)): return big_endian_to_int(primitive) elif isinstance(primitive, str): raise TypeError("Pass in strings with keyword hexstr or text") elif isinstance(primitive, (int, bool)): return int(primitive) else: raise TypeError( "Invalid type. Expected one of int/bool/str/bytes/bytearray. Got " "{0}".format(type(primitive)) )
[ "\n Converts value to its integer representation.\n Values are converted this way:\n\n * primitive:\n\n * bytes, bytearrays: big-endian integer\n * bool: True => 1, False => 0\n * hexstr: interpret hex as integer\n * text: interpret as string of digits, like '12' => 12\n " ]
Please provide a description of the function:def text_if_str( to_type: Callable[..., T], text_or_primitive: Union[bytes, int, str] ) -> T: if isinstance(text_or_primitive, str): return to_type(text=text_or_primitive) else: return to_type(text_or_primitive)
[ "\n Convert to a type, assuming that strings can be only unicode text (not a hexstr)\n\n :param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n :param text_or_primitive bytes, str, int: value to convert\n " ]
Please provide a description of the function:def hexstr_if_str( to_type: Callable[..., T], hexstr_or_primitive: Union[bytes, int, str] ) -> T: if isinstance(hexstr_or_primitive, str): if remove_0x_prefix(hexstr_or_primitive) and not is_hex(hexstr_or_primitive): raise ValueError( "when sending a str, it must be a hex string. Got: {0!r}".format( hexstr_or_primitive ) ) return to_type(hexstr=hexstr_or_primitive) else: return to_type(hexstr_or_primitive)
[ "\n Convert to a type, assuming that strings can be only hexstr (not unicode text)\n\n :param to_type function: takes the arguments (primitive, hexstr=hexstr, text=text),\n eg~ to_bytes, to_text, to_hex, to_int, etc\n :param hexstr_or_primitive bytes, str, int: value to convert\n " ]
Please provide a description of the function:def validate_conversion_arguments(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): _assert_one_val(*args, **kwargs) if kwargs: _validate_supported_kwarg(kwargs) if len(args) == 0 and "primitive" not in kwargs: _assert_hexstr_or_text_kwarg_is_text_type(**kwargs) return to_wrap(*args, **kwargs) return wrapper
[ "\n Validates arguments for conversion functions.\n - Only a single argument is present\n - Kwarg must be 'primitive' 'hexstr' or 'text'\n - If it is 'hexstr' or 'text' that it is a text type\n " ]
Please provide a description of the function:def return_arg_type(at_position): def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): result = to_wrap(*args, **kwargs) ReturnType = type(args[at_position]) return ReturnType(result) return wrapper return decorator
[ "\n Wrap the return value with the result of `type(args[at_position])`\n " ]
Please provide a description of the function:def replace_exceptions( old_to_new_exceptions: Dict[Type[BaseException], Type[BaseException]] ) -> Callable[..., Any]: old_exceptions = tuple(old_to_new_exceptions.keys()) def decorator(to_wrap: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(to_wrap) # String type b/c pypy3 throws SegmentationFault with Iterable as arg on nested fn # Ignore so we don't have to import `Iterable` def wrapper( *args: Iterable[Any], **kwargs: Dict[str, Any] ) -> Callable[..., Any]: try: return to_wrap(*args, **kwargs) except old_exceptions as err: try: raise old_to_new_exceptions[type(err)] from err except KeyError: raise TypeError( "could not look up new exception to use for %r" % err ) from err return wrapper return decorator
[ "\n Replaces old exceptions with new exceptions to be raised in their place.\n " ]
Please provide a description of the function:def collapse_if_tuple(abi): typ = abi["type"] if not typ.startswith("tuple"): return typ delimited = ",".join(collapse_if_tuple(c) for c in abi["components"]) # Whatever comes after "tuple" is the array dims. The ABI spec states that # this will have the form "", "[]", or "[k]". array_dim = typ[5:] collapsed = "({}){}".format(delimited, array_dim) return collapsed
[ "Converts a tuple from a dict to a parenthesized list of its types.\n\n >>> from eth_utils.abi import collapse_if_tuple\n >>> collapse_if_tuple(\n ... {\n ... 'components': [\n ... {'name': 'anAddress', 'type': 'address'},\n ... {'name': 'anInt', 'type': 'uint256'},\n ... {'name': 'someBytes', 'type': 'bytes'},\n ... ],\n ... 'type': 'tuple',\n ... }\n ... )\n '(address,uint256,bytes)'\n " ]
Please provide a description of the function:def is_hex_address(value: Any) -> bool: if not is_text(value): return False elif not is_hex(value): return False else: unprefixed = remove_0x_prefix(value) return len(unprefixed) == 40
[ "\n Checks if the given string of text type is an address in hexadecimal encoded form.\n " ]
Please provide a description of the function:def is_binary_address(value: Any) -> bool: if not is_bytes(value): return False elif len(value) != 20: return False else: return True
[ "\n Checks if the given string is an address in raw bytes form.\n " ]
Please provide a description of the function:def is_address(value: Any) -> bool: if is_checksum_formatted_address(value): return is_checksum_address(value) elif is_hex_address(value): return True elif is_binary_address(value): return True else: return False
[ "\n Checks if the given string in a supported value\n is an address in any of the known formats.\n " ]
Please provide a description of the function:def to_normalized_address(value: AnyStr) -> HexAddress: try: hex_address = hexstr_if_str(to_hex, value).lower() except AttributeError: raise TypeError( "Value must be any string, instead got type {}".format(type(value)) ) if is_address(hex_address): return HexAddress(hex_address) else: raise ValueError( "Unknown format {}, attempted to normalize to {}".format(value, hex_address) )
[ "\n Converts an address to its normalized hexadecimal representation.\n " ]
Please provide a description of the function:def is_normalized_address(value: Any) -> bool: if not is_address(value): return False else: return value == to_normalized_address(value)
[ "\n Returns whether the provided value is an address in its normalized form.\n " ]
Please provide a description of the function:def is_canonical_address(address: Any) -> bool: if not is_bytes(address) or len(address) != 20: return False return address == to_canonical_address(address)
[ "\n Returns `True` if the `value` is an address in its canonical form.\n " ]
Please provide a description of the function:def is_same_address(left: AnyAddress, right: AnyAddress) -> bool: if not is_address(left) or not is_address(right): raise ValueError("Both values must be valid addresses") else: return to_normalized_address(left) == to_normalized_address(right)
[ "\n Checks if both addresses are same or not.\n " ]
Please provide a description of the function:def to_checksum_address(value: AnyStr) -> ChecksumAddress: norm_address = to_normalized_address(value) address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address))) checksum_address = add_0x_prefix( "".join( ( norm_address[i].upper() if int(address_hash[i], 16) > 7 else norm_address[i] ) for i in range(2, 42) ) ) return ChecksumAddress(HexAddress(checksum_address))
[ "\n Makes a checksum address given a supported format.\n " ]
Please provide a description of the function:def get_msi_token(resource, port=50342, msi_conf=None): request_uri = os.environ.get("MSI_ENDPOINT", 'http://localhost:{}/oauth2/token'.format(port)) payload = { 'resource': resource } if msi_conf: if len(msi_conf) > 1: raise ValueError("{} are mutually exclusive".format(list(msi_conf.keys()))) payload.update(msi_conf) try: result = requests.post(request_uri, data=payload, headers={'Metadata': 'true'}) _LOGGER.debug("MSI: Retrieving a token from %s, with payload %s", request_uri, payload) result.raise_for_status() except Exception as ex: # pylint: disable=broad-except _LOGGER.warning("MSI: Failed to retrieve a token from '%s' with an error of '%s'. This could be caused " "by the MSI extension not yet fully provisioned.", request_uri, ex) raise token_entry = result.json() return token_entry['token_type'], token_entry['access_token'], token_entry
[ "Get MSI token if MSI_ENDPOINT is set.\n\n IF MSI_ENDPOINT is not set, will try legacy access through 'http://localhost:{}/oauth2/token'.format(port).\n\n If msi_conf is used, must be a dict of one key in [\"client_id\", \"object_id\", \"msi_res_id\"]\n\n :param str resource: The resource where the token would be use.\n :param int port: The port if not the default 50342 is used. Ignored if MSI_ENDPOINT is set.\n :param dict[str,str] msi_conf: msi_conf if to request a token through a User Assigned Identity (if not specified, assume System Assigned)\n " ]
Please provide a description of the function:def get_msi_token_webapp(resource): try: msi_endpoint = os.environ['MSI_ENDPOINT'] msi_secret = os.environ['MSI_SECRET'] except KeyError as err: err_msg = "{} required env variable was not found. You might need to restart your app/function.".format(err) _LOGGER.critical(err_msg) raise RuntimeError(err_msg) request_uri = '{}/?resource={}&api-version=2017-09-01'.format(msi_endpoint, resource) headers = { 'secret': msi_secret } err = None try: result = requests.get(request_uri, headers=headers) _LOGGER.debug("MSI: Retrieving a token from %s", request_uri) if result.status_code != 200: err = result.text # Workaround since not all failures are != 200 if 'ExceptionMessage' in result.text: err = result.text except Exception as ex: # pylint: disable=broad-except err = str(ex) if err: err_msg = "MSI: Failed to retrieve a token from '{}' with an error of '{}'.".format( request_uri, err ) _LOGGER.critical(err_msg) raise RuntimeError(err_msg) _LOGGER.debug('MSI: token retrieved') token_entry = result.json() return token_entry['token_type'], token_entry['access_token'], token_entry
[ "Get a MSI token from inside a webapp or functions.\n\n Env variable will look like:\n\n - MSI_ENDPOINT = http://127.0.0.1:41741/MSI/token/\n - MSI_SECRET = 69418689F1E342DD946CB82994CDA3CB\n " ]
Please provide a description of the function:def _configure(self, **kwargs): if kwargs.get('china'): err_msg = ("china parameter is deprecated, " "please use " "cloud_environment=msrestazure.azure_cloud.AZURE_CHINA_CLOUD") warnings.warn(err_msg, DeprecationWarning) self._cloud_environment = AZURE_CHINA_CLOUD else: self._cloud_environment = AZURE_PUBLIC_CLOUD self._cloud_environment = kwargs.get('cloud_environment', self._cloud_environment) auth_endpoint = self._cloud_environment.endpoints.active_directory resource = self._cloud_environment.endpoints.active_directory_resource_id self._tenant = kwargs.get('tenant', "common") self._verify = kwargs.get('verify') # 'None' will honor ADAL_PYTHON_SSL_NO_VERIFY self.resource = kwargs.get('resource', resource) self._proxies = kwargs.get('proxies') self._timeout = kwargs.get('timeout') self._cache = kwargs.get('cache') self.store_key = "{}_{}".format( auth_endpoint.strip('/'), self.store_key) self.secret = None self._context = None
[ "Configure authentication endpoint.\n\n Optional kwargs may include:\n\n - cloud_environment (msrestazure.azure_cloud.Cloud): A targeted cloud environment\n - china (bool): Configure auth for China-based service,\n default is 'False'.\n - tenant (str): Alternative tenant, default is 'common'.\n - resource (str): Alternative authentication resource, default\n is 'https://management.core.windows.net/'.\n - verify (bool): Verify secure connection, default is 'True'.\n - timeout (int): Timeout of the request in seconds.\n - proxies (dict): Dictionary mapping protocol or protocol and\n hostname to the URL of the proxy.\n - cache (adal.TokenCache): A adal.TokenCache, see ADAL configuration\n for details. This parameter is not used here and directly passed to ADAL.\n " ]
Please provide a description of the function:def _convert_token(self, token): # Beware that ADAL returns a pointer to its own dict, do # NOT change it in place token = token.copy() # If it's from ADAL, expiresOn will be in ISO form. # Bring it back to float, using expiresIn if "expiresOn" in token and "expiresIn" in token: token["expiresOn"] = token['expiresIn'] + time.time() return {self._case.sub(r'\1_\2', k).lower(): v for k, v in token.items()}
[ "Convert token fields from camel case.\n\n :param dict token: An authentication token.\n :rtype: dict\n " ]
Please provide a description of the function:def signed_session(self, session=None): self.set_token() # Adal does the caching. self._parse_token() return super(AADMixin, self).signed_session(session)
[ "Create token-friendly Requests session, using auto-refresh.\n Used internally when a request is made.\n\n If a session object is provided, configure it directly. Otherwise,\n create a new session and return it.\n\n :param session: The session to configure for authentication\n :type session: requests.Session\n " ]
Please provide a description of the function:def refresh_session(self, session=None): if 'refresh_token' in self.token: try: token = self._context.acquire_token_with_refresh_token( self.token['refresh_token'], self.id, self.resource, self.secret # This is needed when using Confidential Client ) self.token = self._convert_token(token) except adal.AdalError as err: raise_with_traceback(AuthenticationError, "", err) return self.signed_session(session)
[ "Return updated session if token has expired, attempts to\n refresh using newly acquired token.\n\n If a session object is provided, configure it directly. Otherwise,\n create a new session and return it.\n\n :param session: The session to configure for authentication\n :type session: requests.Session\n :rtype: requests.Session.\n " ]
Please provide a description of the function:def set_token(self): super(UserPassCredentials, self).set_token() try: token = self._context.acquire_token_with_username_password( self.resource, self.username, self.password, self.id ) self.token = self._convert_token(token) except adal.AdalError as err: raise_with_traceback(AuthenticationError, "", err)
[ "Get token using Username/Password credentials.\n\n :raises: AuthenticationError if credentials invalid, or call fails.\n " ]
Please provide a description of the function:def set_token(self): super(ServicePrincipalCredentials, self).set_token() try: token = self._context.acquire_token_with_client_credentials( self.resource, self.id, self.secret ) self.token = self._convert_token(token) except adal.AdalError as err: raise_with_traceback(AuthenticationError, "", err)
[ "Get token using Client ID/Secret credentials.\n\n :raises: AuthenticationError if credentials invalid, or call fails.\n " ]
Please provide a description of the function:def signed_session(self, session=None): session = super(AdalAuthentication, self).signed_session(session) try: raw_token = self._adal_method(*self._args, **self._kwargs) except adal.AdalError as err: # pylint: disable=no-member if 'AADSTS70008:' in ((getattr(err, 'error_response', None) or {}).get('error_description') or ''): raise Expired("Credentials have expired due to inactivity.") else: raise AuthenticationError(err) except ConnectionError as err: raise AuthenticationError('Please ensure you have network connection. Error detail: ' + str(err)) scheme, token = raw_token['tokenType'], raw_token['accessToken'] header = "{} {}".format(scheme, token) session.headers['Authorization'] = header return session
[ "Create requests session with any required auth headers applied.\n\n If a session object is provided, configure it directly. Otherwise,\n create a new session and return it.\n\n :param session: The session to configure for authentication\n :type session: requests.Session\n :rtype: requests.Session\n " ]
Please provide a description of the function:def signed_session(self, session=None): # Token cache is handled by the VM extension, call each time to avoid expiration self.set_token() return super(MSIAuthentication, self).signed_session(session)
[ "Create requests session with any required auth headers applied.\n\n If a session object is provided, configure it directly. Otherwise,\n create a new session and return it.\n\n :param session: The session to configure for authentication\n :type session: requests.Session\n :rtype: requests.Session\n " ]
Please provide a description of the function:def _validate(url): if url is None: return parsed = urlparse(url) if not parsed.scheme or not parsed.netloc: raise ValueError("Invalid URL header")
[ "Validate a url.\n\n :param str url: Polling URL extracted from response header.\n :raises: ValueError if URL has no scheme or host.\n " ]
Please provide a description of the function:def _get_header_url(response, header_name): url = response.headers.get(header_name) try: _validate(url) except ValueError: return None else: return url
[ "Get a URL from a header requests.\n\n :param requests.Response response: REST call response.\n :param str header_name: Header name.\n :returns: URL if not None AND valid, None otherwise\n " ]
Please provide a description of the function:def _raise_if_bad_http_status_and_method(self, response): code = response.status_code if code in {200, 202} or \ (code == 201 and self.method in {'PUT', 'PATCH'}) or \ (code == 204 and self.method in {'DELETE', 'POST'}): return raise BadStatus( "Invalid return status for {!r} operation".format(self.method))
[ "Check response status code is valid for a Put or Patch\n request. Must be 200, 201, 202, or 204.\n\n :raises: BadStatus if invalid status.\n " ]
Please provide a description of the function:def _is_empty(self, response): if not response.content: return True try: body = response.json() return not body except ValueError: raise DeserializationError( "Error occurred in deserializing the response body.")
[ "Check if response body contains meaningful content.\n\n :rtype: bool\n :raises: DeserializationError if response body contains invalid\n json data.\n " ]
Please provide a description of the function:def _deserialize(self, response): # Hacking response with initial status_code previous_status = response.status_code response.status_code = self.initial_status_code resource = self.get_outputs(response) response.status_code = previous_status # Hack for Storage or SQL, to workaround the bug in the Python generator if resource is None: previous_status = response.status_code for status_code_to_test in [200, 201]: try: response.status_code = status_code_to_test resource = self.get_outputs(response) except ClientException: pass else: return resource finally: response.status_code = previous_status return resource
[ "Attempt to deserialize resource from response.\n\n :param requests.Response response: latest REST call response.\n " ]
Please provide a description of the function:def _get_async_status(self, response): if self._is_empty(response): return None body = response.json() return body.get('status')
[ "Attempt to find status info in response body.\n\n :param requests.Response response: latest REST call response.\n :rtype: str\n :returns: Status if found, else 'None'.\n " ]
Please provide a description of the function:def _get_provisioning_state(self, response): if self._is_empty(response): return None body = response.json() return body.get("properties", {}).get("provisioningState")
[ "\n Attempt to get provisioning state from resource.\n :param requests.Response response: latest REST call response.\n :returns: Status if found, else 'None'.\n " ]
Please provide a description of the function:def get_status_from_location(self, response): self._raise_if_bad_http_status_and_method(response) code = response.status_code if code == 202: self.status = "InProgress" else: self.status = 'Succeeded' if self._is_empty(response): self.resource = None else: self.resource = self._deserialize(response)
[ "Process the latest status update retrieved from a 'location'\n header.\n\n :param requests.Response response: latest REST call response.\n :raises: BadResponse if response has no body and not status 202.\n " ]
Please provide a description of the function:def get_status_from_resource(self, response): self._raise_if_bad_http_status_and_method(response) if self._is_empty(response): raise BadResponse('The response from long running operation ' 'does not contain a body.') status = self._get_provisioning_state(response) self.status = status or 'Succeeded' self.resource = self._deserialize(response)
[ "Process the latest status update retrieved from the same URL as\n the previous request.\n\n :param requests.Response response: latest REST call response.\n :raises: BadResponse if status not 200 or 204.\n " ]
Please provide a description of the function:def _start(self, update_cmd): try: self._poll(update_cmd) except BadStatus: self._operation.status = 'Failed' self._exception = CloudError(self._response) except BadResponse as err: self._operation.status = 'Failed' self._exception = CloudError(self._response, str(err)) except OperationFailed: self._exception = CloudError(self._response) except Exception as err: self._exception = err finally: self._done.set() callbacks, self._callbacks = self._callbacks, [] while callbacks: for call in callbacks: call(self._operation) callbacks, self._callbacks = self._callbacks, []
[ "Start the long running operation.\n On completion, runs any callbacks.\n\n :param callable update_cmd: The API reuqest to check the status of\n the operation.\n " ]
Please provide a description of the function:def _polling_cookie(self): parsed_url = urlparse(self._response.request.url) host = parsed_url.hostname.strip('.') if host == 'localhost': return {'cookie': self._response.headers.get('set-cookie', '')} return {}
[ "Collect retry cookie - we only want to do this for the test server\n at this point, unless we implement a proper cookie policy.\n\n :returns: Dictionary containing a cookie header if required,\n otherwise an empty dictionary.\n " ]
Please provide a description of the function:def _poll(self, update_cmd): initial_url = self._response.request.url while not finished(self.status()): self._delay() headers = self._polling_cookie() if self._operation.async_url: self._response = update_cmd( self._operation.async_url, headers) self._operation.set_async_url_if_present(self._response) self._operation.get_status_from_async( self._response) elif self._operation.location_url: self._response = update_cmd( self._operation.location_url, headers) self._operation.set_async_url_if_present(self._response) self._operation.get_status_from_location( self._response) elif self._operation.method == "PUT": self._response = update_cmd(initial_url, headers) self._operation.set_async_url_if_present(self._response) self._operation.get_status_from_resource( self._response) else: raise BadResponse( 'Location header is missing from long running operation.') if failed(self._operation.status): raise OperationFailed("Operation failed or cancelled") elif self._operation.should_do_final_get(): self._response = update_cmd(initial_url) self._operation.get_status_from_resource( self._response)
[ "Poll status of operation so long as operation is incomplete and\n we have an endpoint to query.\n\n :param callable update_cmd: The function to call to retrieve the\n latest status of the long running operation.\n :raises: OperationFailed if operation status 'Failed' or 'Cancelled'.\n :raises: BadStatus if response status invalid.\n :raises: BadResponse if response invalid.\n " ]
Please provide a description of the function:def add_done_callback(self, func): if self._done is None or self._done.is_set(): raise ValueError("Process is complete.") self._callbacks.append(func)
[ "Add callback function to be run once the long running operation\n has completed - regardless of the status of the operation.\n\n :param callable func: Callback function that takes at least one\n argument, a completed LongRunningOperation.\n :raises: ValueError if the long running operation has already\n completed.\n " ]
Please provide a description of the function:def remove_done_callback(self, func): if self._done is None or self._done.is_set(): raise ValueError("Process is complete.") self._callbacks = [c for c in self._callbacks if c != func]
[ "Remove a callback from the long running operation.\n\n :param callable func: The function to be removed from the callbacks.\n :raises: ValueError if the long running operation has already\n completed.\n " ]
Please provide a description of the function:def register_rp_hook(r, *args, **kwargs): if r.status_code == 409 and 'msrest' in kwargs: rp_name = _check_rp_not_registered_err(r) if rp_name: session = kwargs['msrest']['session'] url_prefix = _extract_subscription_url(r.request.url) if not _register_rp(session, url_prefix, rp_name): return req = r.request # Change the 'x-ms-client-request-id' otherwise the Azure endpoint # just returns the same 409 payload without looking at the actual query if 'x-ms-client-request-id' in req.headers: req.headers['x-ms-client-request-id'] = str(uuid.uuid1()) return session.send(req)
[ "This is a requests hook to register RP automatically.\n\n You should not use this command manually, this is added automatically\n by the SDK.\n\n See requests documentation for details of the signature of this function.\n http://docs.python-requests.org/en/master/user/advanced/#event-hooks\n " ]
Please provide a description of the function:def _extract_subscription_url(url): match = re.match(r".*/subscriptions/[a-f0-9-]+/", url, re.IGNORECASE) if not match: raise ValueError("Unable to extract subscription ID from URL") return match.group(0)
[ "Extract the first part of the URL, just after subscription:\n https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/\n " ]
Please provide a description of the function:def _register_rp(session, url_prefix, rp_name): post_url = "{}providers/{}/register?api-version=2016-02-01".format(url_prefix, rp_name) get_url = "{}providers/{}?api-version=2016-02-01".format(url_prefix, rp_name) _LOGGER.warning("Resource provider '%s' used by this operation is not " "registered. We are registering for you.", rp_name) post_response = session.post(post_url) if post_response.status_code != 200: _LOGGER.warning("Registration failed. Please register manually.") return False while True: time.sleep(10) rp_info = session.get(get_url).json() if rp_info['registrationState'] == 'Registered': _LOGGER.warning("Registration succeeded.") return True
[ "Synchronously register the RP is paremeter.\n \n Return False if we have a reason to believe this didn't work\n " ]
Please provide a description of the function:def parse_resource_id(rid): if not rid: return {} match = _ARMID_RE.match(rid) if match: result = match.groupdict() children = _CHILDREN_RE.finditer(result['children'] or '') count = None for count, child in enumerate(children): result.update({ key + '_%d' % (count + 1): group for key, group in child.groupdict().items()}) result['last_child_num'] = count + 1 if isinstance(count, int) else None result = _populate_alternate_kwargs(result) else: result = dict(name=rid) return {key: value for key, value in result.items() if value is not None}
[ "Parses a resource_id into its various parts.\n\n Returns a dictionary with a single key-value pair, 'name': rid, if invalid resource id.\n\n :param rid: The resource id being parsed\n :type rid: str\n :returns: A dictionary with with following key/value pairs (if found):\n\n - subscription: Subscription id\n - resource_group: Name of resource group\n - namespace: Namespace for the resource provider (i.e. Microsoft.Compute)\n - type: Type of the root resource (i.e. virtualMachines)\n - name: Name of the root resource\n - child_namespace_{level}: Namespace for the child resoure of that level\n - child_type_{level}: Type of the child resource of that level\n - child_name_{level}: Name of the child resource of that level\n - last_child_num: Level of the last child\n - resource_parent: Computed parent in the following pattern: providers/{namespace}\\\n /{parent}/{type}/{name}\n - resource_namespace: Same as namespace. Note that this may be different than the \\\n target resource's namespace.\n - resource_type: Type of the target resource (not the parent)\n - resource_name: Name of the target resource (not the parent)\n\n :rtype: dict[str,str]\n " ]
Please provide a description of the function:def _populate_alternate_kwargs(kwargs): resource_namespace = kwargs['namespace'] resource_type = kwargs.get('child_type_{}'.format(kwargs['last_child_num'])) or kwargs['type'] resource_name = kwargs.get('child_name_{}'.format(kwargs['last_child_num'])) or kwargs['name'] _get_parents_from_parts(kwargs) kwargs['resource_namespace'] = resource_namespace kwargs['resource_type'] = resource_type kwargs['resource_name'] = resource_name return kwargs
[ " Translates the parsed arguments into a format used by generic ARM commands\n such as the resource and lock commands.\n " ]
Please provide a description of the function:def _get_parents_from_parts(kwargs): parent_builder = [] if kwargs['last_child_num'] is not None: parent_builder.append('{type}/{name}/'.format(**kwargs)) for index in range(1, kwargs['last_child_num']): child_namespace = kwargs.get('child_namespace_{}'.format(index)) if child_namespace is not None: parent_builder.append('providers/{}/'.format(child_namespace)) kwargs['child_parent_{}'.format(index)] = ''.join(parent_builder) parent_builder.append( '{{child_type_{0}}}/{{child_name_{0}}}/' .format(index).format(**kwargs)) child_namespace = kwargs.get('child_namespace_{}'.format(kwargs['last_child_num'])) if child_namespace is not None: parent_builder.append('providers/{}/'.format(child_namespace)) kwargs['child_parent_{}'.format(kwargs['last_child_num'])] = ''.join(parent_builder) kwargs['resource_parent'] = ''.join(parent_builder) if kwargs['name'] else None return kwargs
[ " Get the parents given all the children parameters.\n " ]
Please provide a description of the function:def resource_id(**kwargs): kwargs = {k: v for k, v in kwargs.items() if v is not None} rid_builder = ['/subscriptions/{subscription}'.format(**kwargs)] try: try: rid_builder.append('resourceGroups/{resource_group}'.format(**kwargs)) except KeyError: pass rid_builder.append('providers/{namespace}'.format(**kwargs)) rid_builder.append('{type}/{name}'.format(**kwargs)) count = 1 while True: try: rid_builder.append('providers/{{child_namespace_{}}}' .format(count).format(**kwargs)) except KeyError: pass rid_builder.append('{{child_type_{0}}}/{{child_name_{0}}}' .format(count).format(**kwargs)) count += 1 except KeyError: pass return '/'.join(rid_builder)
[ "Create a valid resource id string from the given parts.\n\n This method builds the resource id from the left until the next required id parameter\n to be appended is not found. It then returns the built up id.\n\n :param dict kwargs: The keyword arguments that will make up the id.\n\n The method accepts the following keyword arguments:\n - subscription (required): Subscription id\n - resource_group: Name of resource group\n - namespace: Namespace for the resource provider (i.e. Microsoft.Compute)\n - type: Type of the resource (i.e. virtualMachines)\n - name: Name of the resource (or parent if child_name is also \\\n specified)\n - child_namespace_{level}: Namespace for the child resoure of that level (optional)\n - child_type_{level}: Type of the child resource of that level\n - child_name_{level}: Name of the child resource of that level\n\n :returns: A resource id built from the given arguments.\n :rtype: str\n " ]
Please provide a description of the function:def is_valid_resource_id(rid, exception_type=None): is_valid = False try: is_valid = rid and resource_id(**parse_resource_id(rid)).lower() == rid.lower() except KeyError: pass if not is_valid and exception_type: raise exception_type() return is_valid
[ "Validates the given resource id.\n\n :param rid: The resource id being validated.\n :type rid: str\n :param exception_type: Raises this Exception if invalid.\n :type exception_type: :class:`Exception`\n :returns: A boolean describing whether the id is valid.\n :rtype: bool\n " ]
Please provide a description of the function:def is_valid_resource_name(rname, exception_type=None): match = _ARMNAME_RE.match(rname) if match: return True if exception_type: raise exception_type() return False
[ "Validates the given resource name to ARM guidelines, individual services may be more restrictive.\n\n :param rname: The resource name being validated.\n :type rname: str\n :param exception_type: Raises this Exception if invalid.\n :type exception_type: :class:`Exception`\n :returns: A boolean describing whether the name is valid.\n :rtype: bool\n " ]
Please provide a description of the function:def save(self, filepath): self._config.add_section("Azure") self._config.set("Azure", "long_running_operation_timeout", self.long_running_operation_timeout) return super(AzureConfiguration, self).save(filepath)
[ "Save current configuration to file.\n\n :param str filepath: Path to save file to.\n :raises: ValueError if supplied filepath cannot be written to.\n " ]
Please provide a description of the function:def load(self, filepath): try: self._config.read(filepath) self.long_running_operation_timeout = self._config.getint( "Azure", "long_running_operation_timeout") except (ValueError, EnvironmentError, NoOptionError): msg = "Supplied config file incompatible" raise_with_traceback(ValueError, msg) finally: self._clear_config() return super(AzureConfiguration, self).load(filepath)
[ "Load configuration from existing file.\n\n :param str filepath: Path to existing config file.\n :raises: ValueError if supplied config file is invalid.\n " ]
Please provide a description of the function:async def _poll(self): while not self.finished(): await self._delay() await self.update_status() if failed(self._operation.status): raise OperationFailed("Operation failed or cancelled") elif self._operation.should_do_final_get(): if self._operation.method == 'POST' and self._operation.location_url: final_get_url = self._operation.location_url else: final_get_url = self._operation.initial_response.request.url self._response = await self.request_status(final_get_url) self._operation.get_status_from_resource(self._response)
[ "Poll status of operation so long as operation is incomplete and\n we have an endpoint to query.\n\n :param callable update_cmd: The function to call to retrieve the\n latest status of the long running operation.\n :raises: OperationFailed if operation status 'Failed' or 'Cancelled'.\n :raises: BadStatus if response status invalid.\n :raises: BadResponse if response invalid.\n " ]
Please provide a description of the function:async def _delay(self): if self._response is None: await asyncio.sleep(0) if self._response.headers.get('retry-after'): await asyncio.sleep(int(self._response.headers['retry-after'])) else: await asyncio.sleep(self._timeout)
[ "Check for a 'retry-after' header to set timeout,\n otherwise use configured timeout.\n " ]
Please provide a description of the function:async def update_status(self): if self._operation.async_url: self._response = await self.request_status(self._operation.async_url) self._operation.set_async_url_if_present(self._response) self._operation.get_status_from_async(self._response) elif self._operation.location_url: self._response = await self.request_status(self._operation.location_url) self._operation.set_async_url_if_present(self._response) self._operation.get_status_from_location(self._response) elif self._operation.method == "PUT": initial_url = self._operation.initial_response.request.url self._response = await self.request_status(initial_url) self._operation.set_async_url_if_present(self._response) self._operation.get_status_from_resource(self._response) else: raise BadResponse("Unable to find status link for polling.")
[ "Update the current status of the LRO.\n " ]
Please provide a description of the function:async def request_status(self, status_link): # ARM requires to re-inject 'x-ms-client-request-id' while polling header_parameters = { 'x-ms-client-request-id': self._operation.initial_response.request.headers['x-ms-client-request-id'] } request = self._client.get(status_link, headers=header_parameters) return await self._client.async_send(request, stream=False, **self._operation_config)
[ "Do a simple GET to this status link.\n\n This method re-inject 'x-ms-client-request-id'.\n\n :rtype: requests.Response\n " ]
Please provide a description of the function:def message(self, value): try: import ast value = ast.literal_eval(value) except (SyntaxError, TypeError, ValueError): pass try: value = value.get('value', value) msg_data = value.split('\n') self._message = msg_data[0] except AttributeError: self._message = value return try: self.request_id = msg_data[1].partition(':')[2] time_str = msg_data[2].partition(':') self.error_time = Deserializer.deserialize_iso( "".join(time_str[2:])) except (IndexError, DeserializationError): pass
[ "Attempt to deconstruct error message to retrieve further\n error data.\n " ]
Please provide a description of the function:def get_cloud_from_metadata_endpoint(arm_endpoint, name=None, session=None): cloud = Cloud(name or arm_endpoint) cloud.endpoints.management = arm_endpoint cloud.endpoints.resource_manager = arm_endpoint _populate_from_metadata_endpoint(cloud, arm_endpoint, session) return cloud
[ "Get a Cloud object from an ARM endpoint.\n\n .. versionadded:: 0.4.11\n\n :Example:\n\n .. code:: python\n\n get_cloud_from_metadata_endpoint(https://management.azure.com/, \"Public Azure\")\n\n :param str arm_endpoint: The ARM management endpoint\n :param str name: An optional name for the Cloud object. Otherwise it's the ARM endpoint\n :params requests.Session session: A requests session object if you need to configure proxy, cert, etc.\n :rtype Cloud:\n :returns: a Cloud object\n :raises: MetadataEndpointError if unable to build the Cloud object\n " ]
Please provide a description of the function:def get_header_url(response, header_name): url = response.headers.get(header_name) try: _validate(url) except ValueError: return None else: return url
[ "Get a URL from a header requests.\n\n :param requests.Response response: REST call response.\n :param str header_name: Header name.\n :returns: URL if not None AND valid, None otherwise\n " ]
Please provide a description of the function:def _is_empty(self, response): # Assume ClientResponse has "body", and otherwise it's a requests.Response content = response.text() if hasattr(response, "body") else response.text if not content: return True try: return not json.loads(content) except ValueError: raise DeserializationError( "Error occurred in deserializing the response body.")
[ "Check if response body contains meaningful content.\n\n :rtype: bool\n :raises: DeserializationError if response body contains invalid json data.\n " ]
Please provide a description of the function:def _as_json(self, response): # Assume ClientResponse has "body", and otherwise it's a requests.Response content = response.text() if hasattr(response, "body") else response.text try: return json.loads(content) except ValueError: raise DeserializationError( "Error occurred in deserializing the response body.")
[ "Assuming this is not empty, return the content as JSON.\n\n Result/exceptions is not determined if you call this method without testing _is_empty.\n\n :raises: DeserializationError if response body contains invalid json data.\n " ]
Please provide a description of the function:def _get_async_status(self, response): if self._is_empty(response): return None body = self._as_json(response) return body.get('status')
[ "Attempt to find status info in response body.\n\n :param requests.Response response: latest REST call response.\n :rtype: str\n :returns: Status if found, else 'None'.\n " ]
Please provide a description of the function:def _get_provisioning_state(self, response): if self._is_empty(response): return None body = self._as_json(response) return body.get("properties", {}).get("provisioningState")
[ "\n Attempt to get provisioning state from resource.\n :param requests.Response response: latest REST call response.\n :returns: Status if found, else 'None'.\n " ]
Please provide a description of the function:def should_do_final_get(self): return ((self.async_url or not self.resource) and self.method in {'PUT', 'PATCH'}) \ or (self.lro_options['final-state-via'] == _LOCATION_FINAL_STATE and self.location_url and self.async_url and self.method == 'POST')
[ "Check whether the polling should end doing a final GET.\n\n :param requests.Response response: latest REST call response.\n :rtype: bool\n " ]
Please provide a description of the function:def set_initial_status(self, response): self._raise_if_bad_http_status_and_method(response) if self._is_empty(response): self.resource = None else: try: self.resource = self._deserialize(response) except DeserializationError: self.resource = None self.set_async_url_if_present(response) if response.status_code in {200, 201, 202, 204}: if self.async_url or self.location_url or response.status_code == 202: self.status = 'InProgress' elif response.status_code == 201: status = self._get_provisioning_state(response) self.status = status or 'InProgress' elif response.status_code == 200: status = self._get_provisioning_state(response) self.status = status or 'Succeeded' elif response.status_code == 204: self.status = 'Succeeded' self.resource = None else: raise OperationFailed("Invalid status found") return raise OperationFailed("Operation failed or cancelled")
[ "Process first response after initiating long running\n operation and set self.status attribute.\n\n :param requests.Response response: initial REST call response.\n " ]
Please provide a description of the function:def get_status_from_resource(self, response): self._raise_if_bad_http_status_and_method(response) if self._is_empty(response): raise BadResponse('The response from long running operation ' 'does not contain a body.') status = self._get_provisioning_state(response) self.status = status or 'Succeeded' self.parse_resource(response)
[ "Process the latest status update retrieved from the same URL as\n the previous request.\n\n :param requests.Response response: latest REST call response.\n :raises: BadResponse if status not 200 or 204.\n " ]
Please provide a description of the function:def parse_resource(self, response): self._raise_if_bad_http_status_and_method(response) if not self._is_empty(response): self.resource = self._deserialize(response) else: self.resource = None
[ "Assuming this response is a resource, use the deserialization callback to parse it.\n If body is empty, assuming no resource to return.\n " ]
Please provide a description of the function:def get_status_from_async(self, response): self._raise_if_bad_http_status_and_method(response) if self._is_empty(response): raise BadResponse('The response from long running operation ' 'does not contain a body.') self.status = self._get_async_status(response) if not self.status: raise BadResponse("No status found in body") # Status can contains information, see ARM spec: # https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#operation-resource-format # "properties": { # /\* The resource provider can choose the values here, but it should only be # returned on a successful operation (status being "Succeeded"). \*/ #}, # So try to parse it try: self.resource = self._deserialize(response) except Exception: self.resource = None
[ "Process the latest status update retrieved from a\n 'azure-asyncoperation' header.\n\n :param requests.Response response: latest REST call response.\n :raises: BadResponse if response has no body, or body does not\n contain status.\n " ]
Please provide a description of the function:def initialize(self, client, initial_response, deserialization_callback): self._client = client self._response = initial_response self._operation = LongRunningOperation(initial_response, deserialization_callback, self._lro_options) try: self._operation.set_initial_status(initial_response) except BadStatus: self._operation.status = 'Failed' raise CloudError(initial_response) except BadResponse as err: self._operation.status = 'Failed' raise CloudError(initial_response, str(err)) except OperationFailed: raise CloudError(initial_response)
[ "Set the initial status of this LRO.\n\n :param initial_response: The initial response of the poller\n :raises: CloudError if initial status is incorrect LRO state\n " ]
Please provide a description of the function:def worker(): import torch import torch.distributed as dist from torch.multiprocessing import Process import numpy as np print("Initializing distributed pytorch") os.environ['MASTER_ADDR'] = str(args.master_addr) os.environ['MASTER_PORT'] = str(args.master_port) # Use TCP backend. Gloo needs nightly, where it currently fails with # dist.init_process_group('gloo', rank=args.rank, # AttributeError: module 'torch.distributed' has no attribute 'init_process_group' dist.init_process_group('tcp', rank=args.rank, world_size=args.size) tensor = torch.ones(args.size_mb*250*1000)*(args.rank+1) time_list = [] outfile = 'out' if args.rank == 0 else '/dev/null' log = util.FileLogger(outfile) for i in range(args.iters): # print('before: rank ', args.rank, ' has data ', tensor[0]) start_time = time.perf_counter() if args.rank == 0: dist.send(tensor=tensor, dst=1) else: dist.recv(tensor=tensor, src=0) elapsed_time_ms = (time.perf_counter() - start_time)*1000 time_list.append(elapsed_time_ms) # print('after: rank ', args.rank, ' has data ', tensor[0]) rate = args.size_mb/(elapsed_time_ms/1000) log('%03d/%d added %d MBs in %.1f ms: %.2f MB/second' % (i, args.iters, args.size_mb, elapsed_time_ms, rate)) min = np.min(time_list) median = np.median(time_list) log(f"min: {min:8.2f}, median: {median:8.2f}, mean: {np.mean(time_list):8.2f}")
[ " Initialize the distributed environment. " ]
Please provide a description of the function:def set_backend(backend_name: str): global _backend, _backend_name _backend_name = backend_name assert not ncluster_globals.task_launched, "Not allowed to change backend after launching a task (this pattern is error-prone)" if backend_name == 'aws': _backend = aws_backend elif backend_name == 'local': _backend = local_backend else: assert False, f"Unknown backend {backend_name}" ncluster_globals.LOGDIR_ROOT = _backend.LOGDIR_ROOT
[ "Sets backend (local or aws)" ]
Please provide a description of the function:def make_job(name: str = '', run_name: str = '', num_tasks: int = 0, install_script: str = '', **kwargs ) -> backend.Job: return _backend.make_job(name=name, run_name=run_name, num_tasks=num_tasks, install_script=install_script, **kwargs)
[ "\n Create a job using current backend. Blocks until all tasks are up and initialized.\n\n Args:\n name: name of the job\n run_name: name of the run (auto-assigned if empty)\n num_tasks: number of tasks\n install_script: bash-runnable script\n **kwargs:\n\n Returns:\n backend.Job\n " ]
Please provide a description of the function:def make_task(name='', run_name='', **kwargs) -> Task: ncluster_globals.task_launched = True name = ncluster_globals.auto_assign_task_name_if_needed(name) # tmux can't use . for session names tmux_session = name.replace('.', '=') tmux_window_id = 0 util.log(f'killing session {tmux_session}') if not util.is_set("NCLUSTER_NOKILL_TMUX"): os.system(f'tmux kill-session -t {tmux_session}') os.system(f'tmux new-session -s {tmux_session} -n {tmux_window_id} -d') task = Task(name, tmux_session=tmux_session, # propagate optional args run_name=run_name, **kwargs) ncluster_globals.register_task(task, run_name) return task
[ "Create task, also create dummy run if not specified." ]
Please provide a description of the function:def switch_window(self, window_id: int): # windows are numbered sequentially 0, 1, 2, ... # create any missing windows and make them point to the same directory if window_id not in self.tmux_available_window_ids: for i in range(max(self.tmux_available_window_ids)+1, window_id+1): self._run_raw(f'tmux new-window -t {self.tmux_session} -d') tmux_window = self.tmux_session + ':' + str(i) cmd = shlex.quote(f'cd {self.taskdir}') tmux_cmd = f'tmux send-keys -t {tmux_window} {cmd} Enter' self._run_raw(tmux_cmd) self.tmux_available_window_ids.append(i) self.tmux_window_id = window_id
[ "\n Switches currently active tmux window for given task. 0 is the default window\n Args:\n window_id: integer id of tmux window to use\n " ]
Please provide a description of the function:def _run_raw(self, cmd, ignore_errors=False): # TODO: capture stdout/stderr for feature parity with aws_backend result = os.system(cmd) if result != 0: if ignore_errors: self.log(f"command ({cmd}) failed.") assert False, "_run_raw failed"
[ "Runs command directly, skipping tmux interface" ]
Please provide a description of the function:def upload(self, local_fn, remote_fn=None, dont_overwrite=False): # support wildcard through glob if '*' in local_fn: for local_subfn in glob.glob(local_fn): self.upload(local_subfn) return if remote_fn is None: remote_fn = os.path.basename(local_fn) if dont_overwrite and self.exists(remote_fn): self.log("Remote file %s exists, skipping" % (remote_fn,)) return if not remote_fn.startswith('/'): remote_fn = self.taskdir + '/' + remote_fn remote_fn = remote_fn.replace('~', self.homedir) self.log('uploading ' + local_fn + ' to ' + remote_fn) local_fn = os.path.abspath(local_fn) self._run_raw("cp -R %s %s" % (local_fn, remote_fn))
[ "Uploads file to remote instance. If location not specified, dumps it\n into default directory. Creates missing directories in path name." ]
Please provide a description of the function:def logdir(self): run_name = ncluster_globals.get_run_for_task(self) logdir = ncluster_globals.get_logdir(run_name) if logdir: return logdir # create logdir. Only single task in a group creates the logdir if ncluster_globals.is_chief(self, run_name): chief = self else: chief = ncluster_globals.get_chief(run_name) chief.setup_logdir() return ncluster_globals.get_logdir(run_name)
[ "Returns logging directory, creating one if necessary. See \"Logdir\" section of design doc on naming convention." ]
Please provide a description of the function:def setup_logdir(self): # todo: locking on logdir creation run_name = ncluster_globals.get_run_for_task(self) self.log("Creating logdir for run "+run_name) logdir_root = ncluster_globals.LOGDIR_ROOT assert logdir_root self.run(f'mkdir -p {logdir_root}') find_command = f'find {logdir_root} -maxdepth 1 -type d' stdout, stderr = self.run_with_output(find_command) logdir = f"{logdir_root}/{run_name}" counter = 0 while logdir in stdout: counter += 1 new_logdir = f'{logdir_root}/{run_name}.{counter:02d}' self.log(f'Warning, logdir {logdir} exists, deduping to {new_logdir}') logdir = new_logdir self.run(f'mkdir -p {logdir}') ncluster_globals.set_logdir(run_name, logdir) return logdir
[ "Create logdir for task/job/run. No-op if the task is not chief (0'th task of 0'th job of run)\n " ]
Please provide a description of the function:def run(self, *args, **kwargs): for job in self.jobs: job.run(*args, **kwargs)
[ "Runs command on every job in the run." ]
Please provide a description of the function:def run_with_output(self, *args, **kwargs): for job in self.jobs: job.run_with_output(*args, **kwargs)
[ "Runs command on every first job in the run, returns stdout." ]
Please provide a description of the function:def _run_raw(self, *args, **kwargs): for job in self.jobs: job._run_raw(*args, **kwargs)
[ "_run_raw on every job in the run." ]
Please provide a description of the function:def upload(self, *args, **kwargs): for job in self.jobs: job.upload(*args, **kwargs)
[ "Runs command on every job in the run." ]
Please provide a description of the function:def network_setup(): # from https://gist.github.com/nguyendv/8cfd92fc8ed32ebb78e366f44c2daea6 ec2 = u.get_ec2_resource() client = u.get_ec2_client() existing_vpcs = u.get_vpc_dict() zones = u.get_zones() # create VPC from scratch. Remove this if default VPC works well enough. vpc_name = u.get_vpc_name() if u.get_vpc_name() in existing_vpcs: print("Reusing VPC " + vpc_name) vpc = existing_vpcs[vpc_name] subnets = list(vpc.subnets.all()) assert len(subnets) == len( zones), "Has %s subnets, but %s zones, something went wrong during resource creation, try delete_resources.py/create_resources.py" % ( len(subnets), len(zones)) else: print("Creating VPC " + vpc_name) vpc = ec2.create_vpc(CidrBlock='192.168.0.0/16') # enable DNS on the VPC response = vpc.modify_attribute(EnableDnsHostnames={"Value": True}) assert u.is_good_response(response) response = vpc.modify_attribute(EnableDnsSupport={"Value": True}) assert u.is_good_response(response) vpc.create_tags(Tags=u.create_name_tags(vpc_name)) vpc.wait_until_available() gateways = u.get_gateway_dict(vpc) gateway_name = u.get_gateway_name() if gateway_name in gateways: print("Reusing gateways " + gateway_name) else: print("Creating internet gateway " + gateway_name) ig = ec2.create_internet_gateway() ig.attach_to_vpc(VpcId=vpc.id) ig.create_tags(Tags=u.create_name_tags(gateway_name)) # check that attachment succeeded attach_state = u.extract_attr_for_match(ig.attachments, State=-1, VpcId=vpc.id) assert attach_state == 'available', "vpc %s is in state %s" % (vpc.id, attach_state) route_table = vpc.create_route_table() route_table_name = u.get_route_table_name() route_table.create_tags(Tags=u.create_name_tags(route_table_name)) dest_cidr = '0.0.0.0/0' route_table.create_route( DestinationCidrBlock=dest_cidr, GatewayId=ig.id ) # check success for route in route_table.routes: # result looks like this # ec2.Route(route_table_id='rtb-a8b438cf', # destination_cidr_block='0.0.0.0/0') if route.destination_cidr_block == dest_cidr: break else: # sometimes get # AssertionError: Route for 0.0.0.0/0 not found in [ec2.Route(route_table_id='rtb-cd9153b0', destination_cidr_block='192.168.0.0/16')] # TODO: add a wait/retry? assert False, "Route for %s not found in %s" % (dest_cidr, route_table.routes) assert len(zones) <= 16 # for cidr/20 to fit into cidr/16 ip = 0 for zone in zones: cidr_block = '192.168.%d.0/20' % (ip,) ip += 16 print("Creating subnet %s in zone %s" % (cidr_block, zone)) subnet = vpc.create_subnet(CidrBlock=cidr_block, AvailabilityZone=zone) subnet.create_tags(Tags=[{'Key': 'Name', 'Value': f'{vpc_name}-subnet'}, {'Key': 'Region', 'Value': zone}]) response = client.modify_subnet_attribute( MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet.id ) assert u.is_good_response(response) u.wait_until_available(subnet) assert subnet.map_public_ip_on_launch, "Subnet doesn't enable public IP by default, why?" route_table.associate_with_subnet(SubnetId=subnet.id) # Use default VPC from now on vpc = u.get_default_vpc() if not vpc: util.log(f"Creating default VPC for region {u.get_region()}") client.create_default_vpc() vpc = u.get_default_vpc() assert vpc, "Could not create default VPC?" existing_security_groups = u.get_security_group_dict() security_group_name = u.get_security_group_name() if security_group_name in existing_security_groups: print("Reusing security group " + security_group_name) security_group = existing_security_groups[security_group_name] assert security_group.vpc_id == vpc.id, f"Found security group {security_group} " \ f"attached to {security_group.vpc_id} but expected {vpc.id}" else: print("Creating security group " + security_group_name) security_group = ec2.create_security_group( GroupName=security_group_name, Description=security_group_name, VpcId=vpc.id) security_group.create_tags(Tags=u.create_name_tags(security_group_name)) # allow ICMP access for public ping security_group.authorize_ingress( CidrIp='0.0.0.0/0', IpProtocol='icmp', FromPort=-1, ToPort=-1 ) # open public ports # always include SSH port which is required for basic functionality assert 22 in PUBLIC_TCP_RANGES, "Must enable SSH access" for port in PUBLIC_TCP_RANGES: if util.is_iterable(port): assert len(port) == 2 from_port, to_port = port else: from_port, to_port = port, port response = security_group.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=from_port, ToPort=to_port) assert u.is_good_response(response) for port in PUBLIC_UDP_RANGES: if util.is_iterable(port): assert len(port) == 2 from_port, to_port = port else: from_port, to_port = port, port response = security_group.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=from_port, ToPort=to_port) assert u.is_good_response(response) # allow ingress within security group # Authorizing ingress doesn't work with names in a non-default VPC, # so must use more complicated syntax # https://github.com/boto/boto3/issues/158 response = {} for protocol in ['icmp']: try: rule = {'FromPort': -1, 'IpProtocol': protocol, 'IpRanges': [], 'PrefixListIds': [], 'ToPort': -1, 'UserIdGroupPairs': [{'GroupId': security_group.id}]} response = security_group.authorize_ingress(IpPermissions=[rule]) except Exception as e: if response['Error']['Code'] == 'InvalidPermission.Duplicate': print("Warning, got " + str(e)) else: assert False, "Failed while authorizing ingress with " + str(e) for protocol in ['tcp', 'udp']: try: rule = {'FromPort': 0, 'IpProtocol': protocol, 'IpRanges': [], 'PrefixListIds': [], 'ToPort': 65535, 'UserIdGroupPairs': [{'GroupId': security_group.id}]} response = security_group.authorize_ingress(IpPermissions=[rule]) except Exception as e: if response['Error']['Code'] == 'InvalidPermission.Duplicate': print("Warning, got " + str(e)) else: assert False, "Failed while authorizing ingress with " + str(e) return vpc, security_group
[ "Creates VPC if it doesn't already exists, configures it for public\n internet access, returns vpc, subnet, security_group" ]
Please provide a description of the function:def keypair_setup(): os.system('mkdir -p ' + u.PRIVATE_KEY_LOCATION) keypair_name = u.get_keypair_name() keypair = u.get_keypair_dict().get(keypair_name, None) keypair_fn = u.get_keypair_fn() if keypair: print("Reusing keypair " + keypair_name) # check that local pem file exists and is readable assert os.path.exists( keypair_fn), "Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through console and run again to recreate keypair/.pem together" % ( keypair_name, keypair_fn, keypair_name) keypair_contents = open(keypair_fn).read() assert len(keypair_contents) > 0 else: print("Creating keypair " + keypair_name) ec2 = u.get_ec2_resource() assert not os.path.exists( keypair_fn), "previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding keypair through console" % ( keypair_fn) keypair = ec2.create_key_pair(KeyName=keypair_name) open(keypair_fn, 'w').write(keypair.key_material) os.system('chmod 400 ' + keypair_fn) return keypair
[ "Creates keypair if necessary, saves private key locally, returns contents\n of private key file." ]
Please provide a description of the function:def placement_group_setup(group_name): existing_placement_groups = u.get_placement_group_dict() group = existing_placement_groups.get(group_name, None) if group: assert group.state == 'available' assert group.strategy == 'cluster' print("Reusing group ", group.name) return group print("Creating group " + group_name) ec2 = u.get_ec2_resource() group = ec2.create_placement_group(GroupName=group_name, Strategy='cluster') return group
[ "Creates placement_group group if necessary. Returns True if new placement_group\n group was created, False otherwise." ]
Please provide a description of the function:def wait_for_file(self, fn: str, max_wait_sec: int = 3600 * 24 * 365, check_interval: float = 0.02) -> bool: print("Waiting for file", fn) start_time = time.time() while True: if time.time() - start_time > max_wait_sec: util.log(f"Timeout exceeded ({max_wait_sec} sec) for {fn}") return False if not self.exists(fn): time.sleep(check_interval) continue else: break return True
[ "\n Waits for file maximum of max_wait_sec. Returns True if file was detected within specified max_wait_sec\n Args:\n fn: filename on task machine\n max_wait_sec: how long to wait in seconds\n check_interval: how often to check in seconds\n Returns:\n False if waiting was was cut short by max_wait_sec limit, True otherwise\n " ]
Please provide a description of the function:def upload(self, local_fn: str, remote_fn: str = '', dont_overwrite: bool = False): raise NotImplementedError()
[ "Uploads given file to the task. If remote_fn is not specified, dumps it\n into task current directory with the same name.\n\n Args:\n local_fn: location of file locally\n remote_fn: location of file on task\n dont_overwrite: if True, will be no-op if target file exists\n " ]
Please provide a description of the function:def _non_blocking_wrapper(self, method, *args, **kwargs): exceptions = [] def task_run(task): try: getattr(task, method)(*args, **kwargs) except Exception as e: exceptions.append(e) threads = [threading.Thread(name=f'task_{method}_{i}', target=task_run, args=[t]) for i, t in enumerate(self.tasks)] for thread in threads: thread.start() for thread in threads: thread.join() if exceptions: raise exceptions[0]
[ "Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first\n failed task." ]
Please provide a description of the function:def get_vpc_dict(): client = get_ec2_client() response = client.describe_vpcs() assert is_good_response(response) result = OrderedDict() ec2 = get_ec2_resource() for vpc_response in response['Vpcs']: key = get_name(vpc_response.get('Tags', [])) if not key or key == EMPTY_NAME: # skip VPC's that don't have a name assigned continue if key in result: util.log(f"Warning: Duplicate VPC group {key} in {response}") if DUPLICATE_CHECKING: assert False result[key] = ec2.Vpc(vpc_response['VpcId']) return result
[ "Returns dictionary of named VPCs {name: vpc}\n\n Assert fails if there's more than one VPC with same name." ]
Please provide a description of the function:def get_default_vpc(): ec2 = get_ec2_resource() for vpc in ec2.vpcs.all(): if vpc.is_default: return vpc
[ "\n Return default VPC or none if not present\n\n " ]
Please provide a description of the function:def get_subnet_dict(): subnet_dict = {} vpc = get_vpc() for subnet in vpc.subnets.all(): zone = subnet.availability_zone assert zone not in subnet_dict, "More than one subnet in %s, why?" % (zone,) subnet_dict[zone] = subnet return subnet_dict
[ "Returns dictionary of \"availability zone\" -> subnet for current VPC." ]
Please provide a description of the function:def get_efs_dict(): # there's no EC2 resource for EFS objects, so return EFS_ID instead # https://stackoverflow.com/questions/47870342/no-ec2-resource-for-efs-objects efs_client = get_efs_client() response = call_with_retries(efs_client.describe_file_systems, 'efs_client.describe_file_systems') assert is_good_response(response) result = OrderedDict() for efs_response in response['FileSystems']: fs_id = efs_response['FileSystemId'] tag_response = call_with_retries(efs_client.describe_tags, "efs_client.describe_tags", FileSystemId=fs_id, retry_interval_sec=2) assert is_good_response(tag_response) key = get_name(tag_response['Tags']) if not key or key == EMPTY_NAME: # skip EFS's without a name continue assert key not in result result[key] = fs_id return result
[ "Returns dictionary of {efs_name: efs_id}" ]
Please provide a description of the function:def get_placement_group_dict(): client = get_ec2_client() response = client.describe_placement_groups() assert is_good_response(response) result = OrderedDict() ec2 = get_ec2_resource() for placement_group_response in response['PlacementGroups']: key = placement_group_response['GroupName'] if key in result: util.log(f"Warning: Duplicate placement_group group {key}") if DUPLICATE_CHECKING: assert False result[key] = ec2.PlacementGroup(key) return result
[ "Returns dictionary of {placement_group_name: (state, strategy)}" ]
Please provide a description of the function:def get_security_group_dict(): client = get_ec2_client() response = client.describe_security_groups() assert is_good_response(response) result = OrderedDict() ec2 = get_ec2_resource() for security_group_response in response['SecurityGroups']: key = get_name(security_group_response.get('Tags', [])) if not key or key == EMPTY_NAME: continue # ignore unnamed security groups # key = security_group_response['GroupName'] if key in result: util.log(f"Warning: Duplicate security group {key}") if DUPLICATE_CHECKING: assert key not in result, ("Duplicate security group " + key) result[key] = ec2.SecurityGroup(security_group_response['GroupId']) return result
[ "Returns dictionary of named security groups {name: securitygroup}." ]
Please provide a description of the function:def get_keypair_dict(): client = get_ec2_client() response = client.describe_key_pairs() assert is_good_response(response) result = {} ec2 = get_ec2_resource() for keypair in response['KeyPairs']: keypair_name = keypair.get('KeyName', '') if keypair_name in result: util.log(f"Warning: Duplicate key {keypair_name}") if DUPLICATE_CHECKING: assert keypair_name not in result, "Duplicate key " + keypair_name result[keypair_name] = ec2.KeyPair(keypair_name) return result
[ "Returns dictionary of {keypairname: keypair}" ]
Please provide a description of the function:def get_prefix(): name = os.environ.get('NCLUSTER_PREFIX', DEFAULT_PREFIX) if name != DEFAULT_PREFIX: validate_prefix(name) return name
[ "Global prefix to identify ncluster created resources name used to identify ncluster created resources,\n (name of EFS, VPC, keypair prefixes), can be changed through $NCLUSTER_PREFIX for debugging purposes. " ]
Please provide a description of the function:def get_keypair_name(): username = get_username() assert '-' not in username, "username must not contain -, change $USER" validate_aws_name(username) assert len(username) < 30 # to avoid exceeding AWS 127 char limit return get_prefix() + '-' + username
[ "Returns current keypair name." ]