body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def _ValidateTestPathPartName(name):
'Checks whether a Master, Bot or TestMetadata name is OK.'
if (name.startswith('__') and name.endswith('__')):
raise BadRequestError(('Invalid name: "%s". Names cannot start and end with "__".' % name)) | 3,896,287,173,283,997,700 | Checks whether a Master, Bot or TestMetadata name is OK. | dashboard/dashboard/add_point.py | _ValidateTestPathPartName | bopopescu/catapult-2 | python | def _ValidateTestPathPartName(name):
if (name.startswith('__') and name.endswith('__')):
raise BadRequestError(('Invalid name: "%s". Names cannot start and end with "__".' % name)) |
def _ValidateRowId(row_dict, test_map):
'Checks whether the ID for a Row is OK.\n\n Args:\n row_dict: A dictionary with new point properties, including "revision".\n test_map: A dictionary mapping test paths to the last previously added\n revision for each test.\n\n Raises:\n BadRequestError: The revision is not acceptable for some reason.\n '
row_id = GetAndValidateRowId(row_dict)
(master, bot, test) = (row_dict['master'], row_dict['bot'], row_dict['test'])
test_path = ('%s/%s/%s' % (master, bot, test))
last_row_id = test_map.get(test_path)
if (not last_row_id):
logging.warning('Test %s has no last added revision entry.', test_path)
return
allow_jump = (master.endswith('Internal') or (master.endswith('QA') and bot.startswith('release-tests-')))
if (not _IsAcceptableRowId(row_id, last_row_id, allow_jump=allow_jump)):
raise BadRequestError(('Invalid ID (revision) %d; compared to previous ID %s, it was larger or smaller by too much.' % (row_id, last_row_id))) | 4,812,546,671,652,088,000 | Checks whether the ID for a Row is OK.
Args:
row_dict: A dictionary with new point properties, including "revision".
test_map: A dictionary mapping test paths to the last previously added
revision for each test.
Raises:
BadRequestError: The revision is not acceptable for some reason. | dashboard/dashboard/add_point.py | _ValidateRowId | bopopescu/catapult-2 | python | def _ValidateRowId(row_dict, test_map):
'Checks whether the ID for a Row is OK.\n\n Args:\n row_dict: A dictionary with new point properties, including "revision".\n test_map: A dictionary mapping test paths to the last previously added\n revision for each test.\n\n Raises:\n BadRequestError: The revision is not acceptable for some reason.\n '
row_id = GetAndValidateRowId(row_dict)
(master, bot, test) = (row_dict['master'], row_dict['bot'], row_dict['test'])
test_path = ('%s/%s/%s' % (master, bot, test))
last_row_id = test_map.get(test_path)
if (not last_row_id):
logging.warning('Test %s has no last added revision entry.', test_path)
return
allow_jump = (master.endswith('Internal') or (master.endswith('QA') and bot.startswith('release-tests-')))
if (not _IsAcceptableRowId(row_id, last_row_id, allow_jump=allow_jump)):
raise BadRequestError(('Invalid ID (revision) %d; compared to previous ID %s, it was larger or smaller by too much.' % (row_id, last_row_id))) |
def _IsAcceptableRowId(row_id, last_row_id, allow_jump=False):
'Checks whether the given row id (aka revision) is not too large or small.\n\n For each data series (i.e. TestMetadata entity), we assume that row IDs are\n monotonically increasing. On a given chart, points are sorted by these\n row IDs. This way, points can arrive out of order but still be shown\n correctly in the chart.\n\n However, sometimes a bot might start to use a different *type* of row ID;\n for example it might change from revision numbers or build numbers to\n timestamps, or from timestamps to build numbers. This causes a lot of\n problems, including points being put out of order.\n\n If a sender of data actually wants to switch to a different type of\n row ID, it would be much cleaner for them to start sending it under a new\n chart name.\n\n Args:\n row_id: The proposed Row entity id (usually sent as "revision")\n last_row_id: The previous Row id, or None if there were none previous.\n\n Returns:\n True if acceptable, False otherwise.\n '
if (last_row_id is None):
return True
if (row_id <= 0):
return False
if (row_id < (0.5 * last_row_id)):
return False
if (allow_jump and (1470009600 < row_id < 1483228800)):
return True
if (row_id > (2 * last_row_id)):
return False
return True | -1,879,351,429,549,624,300 | Checks whether the given row id (aka revision) is not too large or small.
For each data series (i.e. TestMetadata entity), we assume that row IDs are
monotonically increasing. On a given chart, points are sorted by these
row IDs. This way, points can arrive out of order but still be shown
correctly in the chart.
However, sometimes a bot might start to use a different *type* of row ID;
for example it might change from revision numbers or build numbers to
timestamps, or from timestamps to build numbers. This causes a lot of
problems, including points being put out of order.
If a sender of data actually wants to switch to a different type of
row ID, it would be much cleaner for them to start sending it under a new
chart name.
Args:
row_id: The proposed Row entity id (usually sent as "revision")
last_row_id: The previous Row id, or None if there were none previous.
Returns:
True if acceptable, False otherwise. | dashboard/dashboard/add_point.py | _IsAcceptableRowId | bopopescu/catapult-2 | python | def _IsAcceptableRowId(row_id, last_row_id, allow_jump=False):
'Checks whether the given row id (aka revision) is not too large or small.\n\n For each data series (i.e. TestMetadata entity), we assume that row IDs are\n monotonically increasing. On a given chart, points are sorted by these\n row IDs. This way, points can arrive out of order but still be shown\n correctly in the chart.\n\n However, sometimes a bot might start to use a different *type* of row ID;\n for example it might change from revision numbers or build numbers to\n timestamps, or from timestamps to build numbers. This causes a lot of\n problems, including points being put out of order.\n\n If a sender of data actually wants to switch to a different type of\n row ID, it would be much cleaner for them to start sending it under a new\n chart name.\n\n Args:\n row_id: The proposed Row entity id (usually sent as "revision")\n last_row_id: The previous Row id, or None if there were none previous.\n\n Returns:\n True if acceptable, False otherwise.\n '
if (last_row_id is None):
return True
if (row_id <= 0):
return False
if (row_id < (0.5 * last_row_id)):
return False
if (allow_jump and (1470009600 < row_id < 1483228800)):
return True
if (row_id > (2 * last_row_id)):
return False
return True |
def GetAndValidateRowId(row_dict):
"Returns the integer ID for a new Row.\n\n This method is also responsible for validating the input fields related\n to making the new row ID.\n\n Args:\n row_dict: A dictionary obtained from the input JSON.\n\n Returns:\n An integer row ID.\n\n Raises:\n BadRequestError: The input wasn't formatted properly.\n "
if ('revision' not in row_dict):
raise BadRequestError('Required field "revision" missing.')
try:
return int(row_dict['revision'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "revision", should be numerical.') | 4,024,942,979,099,729,000 | Returns the integer ID for a new Row.
This method is also responsible for validating the input fields related
to making the new row ID.
Args:
row_dict: A dictionary obtained from the input JSON.
Returns:
An integer row ID.
Raises:
BadRequestError: The input wasn't formatted properly. | dashboard/dashboard/add_point.py | GetAndValidateRowId | bopopescu/catapult-2 | python | def GetAndValidateRowId(row_dict):
"Returns the integer ID for a new Row.\n\n This method is also responsible for validating the input fields related\n to making the new row ID.\n\n Args:\n row_dict: A dictionary obtained from the input JSON.\n\n Returns:\n An integer row ID.\n\n Raises:\n BadRequestError: The input wasn't formatted properly.\n "
if ('revision' not in row_dict):
raise BadRequestError('Required field "revision" missing.')
try:
return int(row_dict['revision'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "revision", should be numerical.') |
def GetAndValidateRowProperties(row):
'From the object received, make a dictionary of properties for a Row.\n\n This includes the default "value" and "error" columns as well as all\n supplemental columns, but it doesn\'t include "revision", and it doesn\'t\n include input fields that are properties of the parent TestMetadata, such as\n "units".\n\n This method is responsible for validating all properties that are to be\n properties of the new Row.\n\n Args:\n row: A dictionary obtained from the input JSON.\n\n Returns:\n A dictionary of the properties and property values to set when creating\n a Row. This will include "value" and "error" as well as all supplemental\n columns.\n\n Raises:\n BadRequestError: The properties weren\'t formatted correctly.\n '
columns = {}
if ('value' not in row):
raise BadRequestError('No "value" given.')
try:
columns['value'] = float(row['value'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "value", should be numerical.')
if ('error' in row):
try:
error = float(row['error'])
columns['error'] = error
except (ValueError, TypeError):
logging.warn('Bad value for "error".')
columns.update(_GetSupplementalColumns(row))
return columns | -2,355,803,680,181,531,000 | From the object received, make a dictionary of properties for a Row.
This includes the default "value" and "error" columns as well as all
supplemental columns, but it doesn't include "revision", and it doesn't
include input fields that are properties of the parent TestMetadata, such as
"units".
This method is responsible for validating all properties that are to be
properties of the new Row.
Args:
row: A dictionary obtained from the input JSON.
Returns:
A dictionary of the properties and property values to set when creating
a Row. This will include "value" and "error" as well as all supplemental
columns.
Raises:
BadRequestError: The properties weren't formatted correctly. | dashboard/dashboard/add_point.py | GetAndValidateRowProperties | bopopescu/catapult-2 | python | def GetAndValidateRowProperties(row):
'From the object received, make a dictionary of properties for a Row.\n\n This includes the default "value" and "error" columns as well as all\n supplemental columns, but it doesn\'t include "revision", and it doesn\'t\n include input fields that are properties of the parent TestMetadata, such as\n "units".\n\n This method is responsible for validating all properties that are to be\n properties of the new Row.\n\n Args:\n row: A dictionary obtained from the input JSON.\n\n Returns:\n A dictionary of the properties and property values to set when creating\n a Row. This will include "value" and "error" as well as all supplemental\n columns.\n\n Raises:\n BadRequestError: The properties weren\'t formatted correctly.\n '
columns = {}
if ('value' not in row):
raise BadRequestError('No "value" given.')
try:
columns['value'] = float(row['value'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "value", should be numerical.')
if ('error' in row):
try:
error = float(row['error'])
columns['error'] = error
except (ValueError, TypeError):
logging.warn('Bad value for "error".')
columns.update(_GetSupplementalColumns(row))
return columns |
def _GetSupplementalColumns(row):
'Gets a dict of supplemental columns.\n\n If any columns are invalid, a warning is logged and they just aren\'t included,\n but no exception is raised.\n\n Individual rows may specify up to _MAX_NUM_COLUMNS extra data, revision,\n and annotation columns. These columns must follow formatting rules for\n their type. Invalid columns are dropped with an error log, but the valid\n data will still be graphed.\n\n Args:\n row: A dict, possibly with the key "supplemental_columns", the value of\n which should be a dict.\n\n Returns:\n A dict of valid supplemental columns.\n '
columns = {}
for (name, value) in row.get('supplemental_columns', {}).iteritems():
if (len(columns) == _MAX_NUM_COLUMNS):
logging.warn('Too many columns, some being dropped.')
break
value = _CheckSupplementalColumn(name, value)
if value:
columns[name] = value
return columns | -916,637,843,531,906,400 | Gets a dict of supplemental columns.
If any columns are invalid, a warning is logged and they just aren't included,
but no exception is raised.
Individual rows may specify up to _MAX_NUM_COLUMNS extra data, revision,
and annotation columns. These columns must follow formatting rules for
their type. Invalid columns are dropped with an error log, but the valid
data will still be graphed.
Args:
row: A dict, possibly with the key "supplemental_columns", the value of
which should be a dict.
Returns:
A dict of valid supplemental columns. | dashboard/dashboard/add_point.py | _GetSupplementalColumns | bopopescu/catapult-2 | python | def _GetSupplementalColumns(row):
'Gets a dict of supplemental columns.\n\n If any columns are invalid, a warning is logged and they just aren\'t included,\n but no exception is raised.\n\n Individual rows may specify up to _MAX_NUM_COLUMNS extra data, revision,\n and annotation columns. These columns must follow formatting rules for\n their type. Invalid columns are dropped with an error log, but the valid\n data will still be graphed.\n\n Args:\n row: A dict, possibly with the key "supplemental_columns", the value of\n which should be a dict.\n\n Returns:\n A dict of valid supplemental columns.\n '
columns = {}
for (name, value) in row.get('supplemental_columns', {}).iteritems():
if (len(columns) == _MAX_NUM_COLUMNS):
logging.warn('Too many columns, some being dropped.')
break
value = _CheckSupplementalColumn(name, value)
if value:
columns[name] = value
return columns |
def _CheckSupplementalColumn(name, value):
'Returns a possibly modified value for a supplemental column, or None.'
name = str(name)
if (len(name) > _MAX_COLUMN_NAME_LENGTH):
logging.warn('Supplemental column name too long.')
return None
if (name[:2] not in ('d_', 'r_', 'a_')):
logging.warn('Bad column name "%s", invalid prefix.', name)
return None
if name.startswith('d_'):
try:
value = float(value)
except (ValueError, TypeError):
logging.warn('Bad value for column "%s", should be numerical.', name)
return None
if name.startswith('r_'):
revision_patterns = ['^\\d+$', '^\\d+\\.\\d+\\.\\d+\\.\\d+$', '^[A-Fa-f0-9]{40}$']
if ((not value) or (len(str(value)) > _STRING_COLUMN_MAX_LENGTH) or (not any((re.match(p, str(value)) for p in revision_patterns)))):
logging.warn('Bad value for revision column "%s".', name)
return None
value = str(value)
if name.startswith('a_'):
if (len(str(value)) > _STRING_COLUMN_MAX_LENGTH):
logging.warn('Value for "%s" too long, max length is %d.', name, _STRING_COLUMN_MAX_LENGTH)
return None
return value | 6,983,524,817,797,946,000 | Returns a possibly modified value for a supplemental column, or None. | dashboard/dashboard/add_point.py | _CheckSupplementalColumn | bopopescu/catapult-2 | python | def _CheckSupplementalColumn(name, value):
name = str(name)
if (len(name) > _MAX_COLUMN_NAME_LENGTH):
logging.warn('Supplemental column name too long.')
return None
if (name[:2] not in ('d_', 'r_', 'a_')):
logging.warn('Bad column name "%s", invalid prefix.', name)
return None
if name.startswith('d_'):
try:
value = float(value)
except (ValueError, TypeError):
logging.warn('Bad value for column "%s", should be numerical.', name)
return None
if name.startswith('r_'):
revision_patterns = ['^\\d+$', '^\\d+\\.\\d+\\.\\d+\\.\\d+$', '^[A-Fa-f0-9]{40}$']
if ((not value) or (len(str(value)) > _STRING_COLUMN_MAX_LENGTH) or (not any((re.match(p, str(value)) for p in revision_patterns)))):
logging.warn('Bad value for revision column "%s".', name)
return None
value = str(value)
if name.startswith('a_'):
if (len(str(value)) > _STRING_COLUMN_MAX_LENGTH):
logging.warn('Value for "%s" too long, max length is %d.', name, _STRING_COLUMN_MAX_LENGTH)
return None
return value |
def post(self):
'Validates data parameter and add task to queue to process points.\n\n The row data comes from a "data" parameter, which is a JSON encoding of a\n list of dictionaries, each of which represents one performance result\n (one point in a graph) and associated data.\n\n [\n {\n "master": "ChromiumPerf",\n "bot": "xp-release-dual-core",\n "test": "dromaeo/dom/modify",\n "revision": 123456789,\n "value": 24.66,\n "error": 2.33,\n "units": "ms",\n "supplemental_columns": {\n "d_median": 24234.12,\n "d_mean": 23.553,\n "r_webkit": 423340,\n ...\n },\n ...\n },\n ...\n ]\n\n In general, the required fields are "master", "bot", "test" (which together\n form the test path which identifies the series that this point belongs to),\n and "revision" and "value", which are the X and Y values for the point.\n\n This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),\n the first producer of which is Telemetry. Telemetry provides lightweight\n serialization of values it produces, as JSON. If a dashboard JSON object is\n passed, it will be a single dict rather than a list, with the test,\n value, error, and units fields replaced by a chart_data field containing a\n Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is\n processed by converting it into rows (which can be viewed as Dashboard JSON\n v0).\n\n {\n "master": "ChromiumPerf",\n <other row fields>,\n "chart_data": {\n "foo": {\n "bar": {\n "type": "scalar",\n "name": "foo.bar",\n "units": "ms",\n "value": 4.2,\n },\n "summary": {\n "type": "list_of_scalar_values",\n "name": "foo",\n "units": "ms",\n "values": [4.2, 5.7, 6.8],\n "std": 1.30512,\n },\n },\n }\n\n Request parameters:\n data: JSON encoding of a list of dictionaries.\n\n Outputs:\n Empty 200 response with if successful,\n 200 response with warning message if optional data is invalid,\n 403 response with error message if sender IP is not white-listed,\n 400 response with error message if required data is invalid.\n 500 with error message otherwise.\n '
datastore_hooks.SetPrivilegedRequest()
if (not self._CheckIpAgainstWhitelist()):
return
data = self.request.get('data')
if (not data):
self.ReportError('Missing "data" parameter.', status=400)
return
try:
data = json.loads(self.request.get('data'))
except ValueError:
self.ReportError('Invalid JSON string.', status=400)
return
logging.info('Received data: %s', data)
try:
if (type(data) is dict):
if data.get('chart_data'):
data = _DashboardJsonToRawRows(data)
if (not data):
return
else:
self.ReportError('Data should be a list of rows or a Dashboard JSON v1.0 dict.', status=400)
return
test_map = _ConstructTestPathMap(data)
for row_dict in data:
_ValidateRowDict(row_dict, test_map)
_AddTasks(data)
except BadRequestError as error:
self.ReportError(error.message, status=400) | 2,781,788,386,795,497,000 | Validates data parameter and add task to queue to process points.
The row data comes from a "data" parameter, which is a JSON encoding of a
list of dictionaries, each of which represents one performance result
(one point in a graph) and associated data.
[
{
"master": "ChromiumPerf",
"bot": "xp-release-dual-core",
"test": "dromaeo/dom/modify",
"revision": 123456789,
"value": 24.66,
"error": 2.33,
"units": "ms",
"supplemental_columns": {
"d_median": 24234.12,
"d_mean": 23.553,
"r_webkit": 423340,
...
},
...
},
...
]
In general, the required fields are "master", "bot", "test" (which together
form the test path which identifies the series that this point belongs to),
and "revision" and "value", which are the X and Y values for the point.
This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),
the first producer of which is Telemetry. Telemetry provides lightweight
serialization of values it produces, as JSON. If a dashboard JSON object is
passed, it will be a single dict rather than a list, with the test,
value, error, and units fields replaced by a chart_data field containing a
Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is
processed by converting it into rows (which can be viewed as Dashboard JSON
v0).
{
"master": "ChromiumPerf",
<other row fields>,
"chart_data": {
"foo": {
"bar": {
"type": "scalar",
"name": "foo.bar",
"units": "ms",
"value": 4.2,
},
"summary": {
"type": "list_of_scalar_values",
"name": "foo",
"units": "ms",
"values": [4.2, 5.7, 6.8],
"std": 1.30512,
},
},
}
Request parameters:
data: JSON encoding of a list of dictionaries.
Outputs:
Empty 200 response with if successful,
200 response with warning message if optional data is invalid,
403 response with error message if sender IP is not white-listed,
400 response with error message if required data is invalid.
500 with error message otherwise. | dashboard/dashboard/add_point.py | post | bopopescu/catapult-2 | python | def post(self):
'Validates data parameter and add task to queue to process points.\n\n The row data comes from a "data" parameter, which is a JSON encoding of a\n list of dictionaries, each of which represents one performance result\n (one point in a graph) and associated data.\n\n [\n {\n "master": "ChromiumPerf",\n "bot": "xp-release-dual-core",\n "test": "dromaeo/dom/modify",\n "revision": 123456789,\n "value": 24.66,\n "error": 2.33,\n "units": "ms",\n "supplemental_columns": {\n "d_median": 24234.12,\n "d_mean": 23.553,\n "r_webkit": 423340,\n ...\n },\n ...\n },\n ...\n ]\n\n In general, the required fields are "master", "bot", "test" (which together\n form the test path which identifies the series that this point belongs to),\n and "revision" and "value", which are the X and Y values for the point.\n\n This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),\n the first producer of which is Telemetry. Telemetry provides lightweight\n serialization of values it produces, as JSON. If a dashboard JSON object is\n passed, it will be a single dict rather than a list, with the test,\n value, error, and units fields replaced by a chart_data field containing a\n Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is\n processed by converting it into rows (which can be viewed as Dashboard JSON\n v0).\n\n {\n "master": "ChromiumPerf",\n <other row fields>,\n "chart_data": {\n "foo": {\n "bar": {\n "type": "scalar",\n "name": "foo.bar",\n "units": "ms",\n "value": 4.2,\n },\n "summary": {\n "type": "list_of_scalar_values",\n "name": "foo",\n "units": "ms",\n "values": [4.2, 5.7, 6.8],\n "std": 1.30512,\n },\n },\n }\n\n Request parameters:\n data: JSON encoding of a list of dictionaries.\n\n Outputs:\n Empty 200 response with if successful,\n 200 response with warning message if optional data is invalid,\n 403 response with error message if sender IP is not white-listed,\n 400 response with error message if required data is invalid.\n 500 with error message otherwise.\n '
datastore_hooks.SetPrivilegedRequest()
if (not self._CheckIpAgainstWhitelist()):
return
data = self.request.get('data')
if (not data):
self.ReportError('Missing "data" parameter.', status=400)
return
try:
data = json.loads(self.request.get('data'))
except ValueError:
self.ReportError('Invalid JSON string.', status=400)
return
logging.info('Received data: %s', data)
try:
if (type(data) is dict):
if data.get('chart_data'):
data = _DashboardJsonToRawRows(data)
if (not data):
return
else:
self.ReportError('Data should be a list of rows or a Dashboard JSON v1.0 dict.', status=400)
return
test_map = _ConstructTestPathMap(data)
for row_dict in data:
_ValidateRowDict(row_dict, test_map)
_AddTasks(data)
except BadRequestError as error:
self.ReportError(error.message, status=400) |
def S_IFMT(mode):
"Return the portion of the file's mode that describes the\n file type.\n "
return (mode & 61440) | -3,216,242,946,293,737,000 | Return the portion of the file's mode that describes the
file type. | others/explorer_standalone.py | S_IFMT | eggfly/M5StickVComputer | python | def S_IFMT(mode):
"Return the portion of the file's mode that describes the\n file type.\n "
return (mode & 61440) |
def S_ISDIR(mode):
'Return True if mode is from a directory.'
return (S_IFMT(mode) == S_IFDIR) | 4,509,911,602,829,706,000 | Return True if mode is from a directory. | others/explorer_standalone.py | S_ISDIR | eggfly/M5StickVComputer | python | def S_ISDIR(mode):
return (S_IFMT(mode) == S_IFDIR) |
def belong(in_list1: list, in_list2: list) -> list:
'\n Check wheter or not all the element in list in_list1 belong into in_list2\n :param in_list1: the source list\n :param in_list2: the target list where to find the element in in_list1\n :return: return True if the statement is verified otherwise return False\n '
return all(((element in in_list2) for element in in_list1)) | -9,019,206,028,006,766,000 | Check wheter or not all the element in list in_list1 belong into in_list2
:param in_list1: the source list
:param in_list2: the target list where to find the element in in_list1
:return: return True if the statement is verified otherwise return False | Python/List/14.belong.py | belong | angelmpalomares/ModelAndLanguagesForBioInformatics | python | def belong(in_list1: list, in_list2: list) -> list:
'\n Check wheter or not all the element in list in_list1 belong into in_list2\n :param in_list1: the source list\n :param in_list2: the target list where to find the element in in_list1\n :return: return True if the statement is verified otherwise return False\n '
return all(((element in in_list2) for element in in_list1)) |
def add_scaling(spot_fleet, template, cluster_name):
' Add scaling resources to a cluster '
ssm_param = Parameter('Scale{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Type='String', Value='0', Name=Sub('/ecs-maestro/${ClusterName}/${Version}/scaletime'))
template.add_resource(ssm_param)
function_name = sanitize_cfn_resource_name(cluster_name)
autoscaling_role = Role('AutoscalingRole', AssumeRolePolicyDocument={'Statement': [{'Effect': 'Allow', 'Action': 'sts:AssumeRole', 'Principal': {'Service': 'lambda.amazonaws.com'}}]}, Policies=[Policy(PolicyName='ec2-spot-fleet-scaler', PolicyDocument={'Statement': [{'Effect': 'Allow', 'Action': ['cloudwatch:Get*', 'ec2:DescribeSpotFleetRequests', 'ec2:ModifySpotFleetRequest', 'logs:*', 'ecs:ListContainerInstances', 'ecs:Update*', 'ecs:ListTasks', 's3:GetEncryptionConfiguration'], 'Resource': '*'}, {'Effect': 'Allow', 'Action': ['ssm:Get*', 'ssm:Put*', 'ssm:Delete*'], 'Resource': [{'Fn::Sub': 'arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/ecs-maestro/${ClusterName}/*'}]}]}), Policy(PolicyName='DeleteStack', PolicyDocument={'Statement': [{'Effect': 'Allow', 'Action': ['lambda:InvokeFunction'], 'Resource': [{'Fn::Sub': (('arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:' + function_name) + 'ASGCleanupLambda')}]}]})])
template.add_resource(autoscaling_role)
scaling_lambda = Function('ScalingLambda{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Code=Code(S3Bucket=Sub('${S3Bucket}'), S3Key=Sub('${S3Prefix}/deployment.zip')), Handler='scaling.scale_spot.lambda_handler', Role=GetAtt(autoscaling_role, 'Arn'), Environment=Environment(Variables={'CLUSTER_NAME': Sub('${ClusterName}'), 'SPOT_FLEET': Ref('SpotFleet{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name')))), 'STATUS': Sub('${Status}'), 'VERSION': Sub('${Version}'), 'SCALE_IN_THRESHOLD': Sub('${SpotTaskThresholdIn}'), 'SCALE_OUT_THRESHOLD': Sub('${SpotTaskThresholdOut}'), 'MAX_WEIGHT': Sub('${SpotMaxWeight}'), 'MIN_WEIGHT': Sub('${SpotMinWeight}')}), Timeout=900, MemorySize=128, Runtime='python3.7')
template.add_resource(scaling_lambda)
CronScaling = Rule('CronScaling{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), ScheduleExpression='rate(1 minute)', Description='Cron for cluster stats', Targets=[Target(Id='1', Arn=GetAtt(scaling_lambda, 'Arn'))])
template.add_resource(CronScaling)
ScalingPerm = Permission('ScalePerm{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Action='lambda:InvokeFunction', FunctionName=GetAtt(scaling_lambda, 'Arn'), Principal='events.amazonaws.com', SourceArn=GetAtt(CronScaling, 'Arn'))
template.add_resource(ScalingPerm) | 3,883,582,814,284,162,600 | Add scaling resources to a cluster | ecs_cluster_deployer/compute/lambda_scaler.py | add_scaling | apollusehs-devops/ecs-cluster-deployer | python | def add_scaling(spot_fleet, template, cluster_name):
' '
ssm_param = Parameter('Scale{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Type='String', Value='0', Name=Sub('/ecs-maestro/${ClusterName}/${Version}/scaletime'))
template.add_resource(ssm_param)
function_name = sanitize_cfn_resource_name(cluster_name)
autoscaling_role = Role('AutoscalingRole', AssumeRolePolicyDocument={'Statement': [{'Effect': 'Allow', 'Action': 'sts:AssumeRole', 'Principal': {'Service': 'lambda.amazonaws.com'}}]}, Policies=[Policy(PolicyName='ec2-spot-fleet-scaler', PolicyDocument={'Statement': [{'Effect': 'Allow', 'Action': ['cloudwatch:Get*', 'ec2:DescribeSpotFleetRequests', 'ec2:ModifySpotFleetRequest', 'logs:*', 'ecs:ListContainerInstances', 'ecs:Update*', 'ecs:ListTasks', 's3:GetEncryptionConfiguration'], 'Resource': '*'}, {'Effect': 'Allow', 'Action': ['ssm:Get*', 'ssm:Put*', 'ssm:Delete*'], 'Resource': [{'Fn::Sub': 'arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/ecs-maestro/${ClusterName}/*'}]}]}), Policy(PolicyName='DeleteStack', PolicyDocument={'Statement': [{'Effect': 'Allow', 'Action': ['lambda:InvokeFunction'], 'Resource': [{'Fn::Sub': (('arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:' + function_name) + 'ASGCleanupLambda')}]}]})])
template.add_resource(autoscaling_role)
scaling_lambda = Function('ScalingLambda{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Code=Code(S3Bucket=Sub('${S3Bucket}'), S3Key=Sub('${S3Prefix}/deployment.zip')), Handler='scaling.scale_spot.lambda_handler', Role=GetAtt(autoscaling_role, 'Arn'), Environment=Environment(Variables={'CLUSTER_NAME': Sub('${ClusterName}'), 'SPOT_FLEET': Ref('SpotFleet{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name')))), 'STATUS': Sub('${Status}'), 'VERSION': Sub('${Version}'), 'SCALE_IN_THRESHOLD': Sub('${SpotTaskThresholdIn}'), 'SCALE_OUT_THRESHOLD': Sub('${SpotTaskThresholdOut}'), 'MAX_WEIGHT': Sub('${SpotMaxWeight}'), 'MIN_WEIGHT': Sub('${SpotMinWeight}')}), Timeout=900, MemorySize=128, Runtime='python3.7')
template.add_resource(scaling_lambda)
CronScaling = Rule('CronScaling{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), ScheduleExpression='rate(1 minute)', Description='Cron for cluster stats', Targets=[Target(Id='1', Arn=GetAtt(scaling_lambda, 'Arn'))])
template.add_resource(CronScaling)
ScalingPerm = Permission('ScalePerm{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Action='lambda:InvokeFunction', FunctionName=GetAtt(scaling_lambda, 'Arn'), Principal='events.amazonaws.com', SourceArn=GetAtt(CronScaling, 'Arn'))
template.add_resource(ScalingPerm) |
@op_info_register(stack_init_op_info)
def _stack_init_aicpu():
'StackInit aicpu register'
return | 2,930,796,386,539,487,000 | StackInit aicpu register | mindspore/ops/_op_impl/aicpu/stack_push_pop.py | _stack_init_aicpu | 233-puchi/mindspore | python | @op_info_register(stack_init_op_info)
def _stack_init_aicpu():
return |
@op_info_register(stack_push_op_info)
def _stack_push_aicpu():
'StackPush aicpu register'
return | -1,631,848,826,431,700,500 | StackPush aicpu register | mindspore/ops/_op_impl/aicpu/stack_push_pop.py | _stack_push_aicpu | 233-puchi/mindspore | python | @op_info_register(stack_push_op_info)
def _stack_push_aicpu():
return |
@op_info_register(stack_pop_op_info)
def _stack_pop_aicpu():
'StackPop aicpu register'
return | 4,465,277,019,540,829,700 | StackPop aicpu register | mindspore/ops/_op_impl/aicpu/stack_push_pop.py | _stack_pop_aicpu | 233-puchi/mindspore | python | @op_info_register(stack_pop_op_info)
def _stack_pop_aicpu():
return |
@op_info_register(stack_destroy_op_info)
def _stack_destroy_aicpu():
'StackDestroy aicpu register'
return | 8,348,166,599,229,350,000 | StackDestroy aicpu register | mindspore/ops/_op_impl/aicpu/stack_push_pop.py | _stack_destroy_aicpu | 233-puchi/mindspore | python | @op_info_register(stack_destroy_op_info)
def _stack_destroy_aicpu():
return |
def min_depth(self, root):
'\n :type root: TreeNode\n :rtype: int\n '
if (root is None):
return 0
if ((root.left is not None) or (root.right is not None)):
return (max(self.minDepth(root.left), self.minDepth(root.right)) + 1)
return (min(self.minDepth(root.left), self.minDepth(root.right)) + 1) | -8,175,042,898,806,348,000 | :type root: TreeNode
:rtype: int | algorithms/tree/min_height.py | min_depth | AdrialYeoh/algorithms | python | def min_depth(self, root):
'\n :type root: TreeNode\n :rtype: int\n '
if (root is None):
return 0
if ((root.left is not None) or (root.right is not None)):
return (max(self.minDepth(root.left), self.minDepth(root.right)) + 1)
return (min(self.minDepth(root.left), self.minDepth(root.right)) + 1) |
def test_plugins(self):
'Test that plugins without dependencies work'
localrc = {'test_localrc': '1'}
local_conf = {'install': {'nova.conf': {'main': {'test_conf': '2'}}}}
services = {'cinder': True}
plugins = OrderedDict([('bar', 'git://git.openstack.org/openstack/bar-plugin'), ('foo', 'git://git.openstack.org/openstack/foo-plugin'), ('baz', 'git://git.openstack.org/openstack/baz-plugin')])
p = dict(localrc=localrc, local_conf=local_conf, base_services=[], services=services, plugins=plugins, base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'))
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), p.get('projects'), p.get('project'))
lc.write(p['path'])
plugins = []
with open(p['path']) as f:
for line in f:
if line.startswith('enable_plugin'):
plugins.append(line.split()[1])
self.assertEqual(['bar', 'baz', 'foo'], plugins) | 8,778,339,353,578,309,000 | Test that plugins without dependencies work | roles/write-devstack-local-conf/library/test.py | test_plugins | HoonMinJeongUm/HoonMin-devstack | python | def test_plugins(self):
localrc = {'test_localrc': '1'}
local_conf = {'install': {'nova.conf': {'main': {'test_conf': '2'}}}}
services = {'cinder': True}
plugins = OrderedDict([('bar', 'git://git.openstack.org/openstack/bar-plugin'), ('foo', 'git://git.openstack.org/openstack/foo-plugin'), ('baz', 'git://git.openstack.org/openstack/baz-plugin')])
p = dict(localrc=localrc, local_conf=local_conf, base_services=[], services=services, plugins=plugins, base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'))
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), p.get('projects'), p.get('project'))
lc.write(p['path'])
plugins = []
with open(p['path']) as f:
for line in f:
if line.startswith('enable_plugin'):
plugins.append(line.split()[1])
self.assertEqual(['bar', 'baz', 'foo'], plugins) |
def test_plugin_deps(self):
'Test that plugins with dependencies work'
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
with open(os.path.join(self.tmpdir, 'foo-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin foo\n')
with open(os.path.join(self.tmpdir, 'bar-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin bar\n')
f.write('plugin_requires bar foo\n')
localrc = {'test_localrc': '1'}
local_conf = {'install': {'nova.conf': {'main': {'test_conf': '2'}}}}
services = {'cinder': True}
plugins = OrderedDict([('bar', 'git://git.openstack.org/openstack/bar-plugin'), ('foo', 'git://git.openstack.org/openstack/foo-plugin')])
p = dict(localrc=localrc, local_conf=local_conf, base_services=[], services=services, plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf')) | 412,481,352,534,518,700 | Test that plugins with dependencies work | roles/write-devstack-local-conf/library/test.py | test_plugin_deps | HoonMinJeongUm/HoonMin-devstack | python | def test_plugin_deps(self):
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
with open(os.path.join(self.tmpdir, 'foo-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin foo\n')
with open(os.path.join(self.tmpdir, 'bar-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin bar\n')
f.write('plugin_requires bar foo\n')
localrc = {'test_localrc': '1'}
local_conf = {'install': {'nova.conf': {'main': {'test_conf': '2'}}}}
services = {'cinder': True}
plugins = OrderedDict([('bar', 'git://git.openstack.org/openstack/bar-plugin'), ('foo', 'git://git.openstack.org/openstack/foo-plugin')])
p = dict(localrc=localrc, local_conf=local_conf, base_services=[], services=services, plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf')) |
def test_libs_from_git(self):
'Test that LIBS_FROM_GIT is auto-generated'
projects = {'git.openstack.org/openstack/nova': {'required': True, 'short_name': 'nova'}, 'git.openstack.org/openstack/oslo.messaging': {'required': True, 'short_name': 'oslo.messaging'}, 'git.openstack.org/openstack/devstack-plugin': {'required': False, 'short_name': 'devstack-plugin'}}
project = {'short_name': 'glance'}
p = dict(base_services=[], base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'), projects=projects, project=project)
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), p.get('projects'), p.get('project'))
lc.write(p['path'])
lfg = None
with open(p['path']) as f:
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
self.assertEqual('nova,oslo.messaging,glance', lfg) | 5,259,233,446,241,077,000 | Test that LIBS_FROM_GIT is auto-generated | roles/write-devstack-local-conf/library/test.py | test_libs_from_git | HoonMinJeongUm/HoonMin-devstack | python | def test_libs_from_git(self):
projects = {'git.openstack.org/openstack/nova': {'required': True, 'short_name': 'nova'}, 'git.openstack.org/openstack/oslo.messaging': {'required': True, 'short_name': 'oslo.messaging'}, 'git.openstack.org/openstack/devstack-plugin': {'required': False, 'short_name': 'devstack-plugin'}}
project = {'short_name': 'glance'}
p = dict(base_services=[], base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'), projects=projects, project=project)
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), p.get('projects'), p.get('project'))
lc.write(p['path'])
lfg = None
with open(p['path']) as f:
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
self.assertEqual('nova,oslo.messaging,glance', lfg) |
def test_overridelibs_from_git(self):
'Test that LIBS_FROM_GIT can be overridden'
localrc = {'LIBS_FROM_GIT': 'oslo.db'}
projects = {'git.openstack.org/openstack/nova': {'required': True, 'short_name': 'nova'}, 'git.openstack.org/openstack/oslo.messaging': {'required': True, 'short_name': 'oslo.messaging'}, 'git.openstack.org/openstack/devstack-plugin': {'required': False, 'short_name': 'devstack-plugin'}}
p = dict(localrc=localrc, base_services=[], base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'), projects=projects)
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), p.get('projects'), p.get('project'))
lc.write(p['path'])
lfg = None
with open(p['path']) as f:
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
self.assertEqual('oslo.db', lfg) | -5,863,802,321,256,962,000 | Test that LIBS_FROM_GIT can be overridden | roles/write-devstack-local-conf/library/test.py | test_overridelibs_from_git | HoonMinJeongUm/HoonMin-devstack | python | def test_overridelibs_from_git(self):
localrc = {'LIBS_FROM_GIT': 'oslo.db'}
projects = {'git.openstack.org/openstack/nova': {'required': True, 'short_name': 'nova'}, 'git.openstack.org/openstack/oslo.messaging': {'required': True, 'short_name': 'oslo.messaging'}, 'git.openstack.org/openstack/devstack-plugin': {'required': False, 'short_name': 'devstack-plugin'}}
p = dict(localrc=localrc, base_services=[], base_dir='./test', path=os.path.join(self.tmpdir, 'test.local.conf'), projects=projects)
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'), p.get('projects'), p.get('project'))
lc.write(p['path'])
lfg = None
with open(p['path']) as f:
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
self.assertEqual('oslo.db', lfg) |
def test_plugin_circular_deps(self):
'Test that plugins with circular dependencies fail'
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
with open(os.path.join(self.tmpdir, 'foo-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin foo\n')
f.write('plugin_requires foo bar\n')
with open(os.path.join(self.tmpdir, 'bar-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin bar\n')
f.write('plugin_requires bar foo\n')
localrc = {'test_localrc': '1'}
local_conf = {'install': {'nova.conf': {'main': {'test_conf': '2'}}}}
services = {'cinder': True}
plugins = OrderedDict([('bar', 'git://git.openstack.org/openstack/bar-plugin'), ('foo', 'git://git.openstack.org/openstack/foo-plugin')])
p = dict(localrc=localrc, local_conf=local_conf, base_services=[], services=services, plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf'))
with self.assertRaises(Exception):
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'))
lc.write(p['path']) | -1,101,312,770,292,055,400 | Test that plugins with circular dependencies fail | roles/write-devstack-local-conf/library/test.py | test_plugin_circular_deps | HoonMinJeongUm/HoonMin-devstack | python | def test_plugin_circular_deps(self):
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
with open(os.path.join(self.tmpdir, 'foo-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin foo\n')
f.write('plugin_requires foo bar\n')
with open(os.path.join(self.tmpdir, 'bar-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin bar\n')
f.write('plugin_requires bar foo\n')
localrc = {'test_localrc': '1'}
local_conf = {'install': {'nova.conf': {'main': {'test_conf': '2'}}}}
services = {'cinder': True}
plugins = OrderedDict([('bar', 'git://git.openstack.org/openstack/bar-plugin'), ('foo', 'git://git.openstack.org/openstack/foo-plugin')])
p = dict(localrc=localrc, local_conf=local_conf, base_services=[], services=services, plugins=plugins, base_dir=self.tmpdir, path=os.path.join(self.tmpdir, 'test.local.conf'))
with self.assertRaises(Exception):
lc = LocalConf(p.get('localrc'), p.get('local_conf'), p.get('base_services'), p.get('services'), p.get('plugins'), p.get('base_dir'))
lc.write(p['path']) |
def _create_k8s_job(self, yaml_spec):
' _create_k8s_job creates a kubernetes job based on the yaml spec '
pod = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(generate_name=yaml_spec['metadata']['generateName']))
container = k8s_client.V1Container(name=yaml_spec['spec']['containers'][0]['name'], image=yaml_spec['spec']['containers'][0]['image'], args=yaml_spec['spec']['containers'][0]['args'], volume_mounts=[k8s_client.V1VolumeMount(name=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['name'], mount_path=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['mountPath'])], env=[k8s_client.V1EnvVar(name=yaml_spec['spec']['containers'][0]['env'][0]['name'], value=yaml_spec['spec']['containers'][0]['env'][0]['value'])])
pod.spec = k8s_client.V1PodSpec(restart_policy=yaml_spec['spec']['restartPolicy'], containers=[container], service_account_name=yaml_spec['spec']['serviceAccountName'], volumes=[k8s_client.V1Volume(name=yaml_spec['spec']['volumes'][0]['name'], secret=k8s_client.V1SecretVolumeSource(secret_name=yaml_spec['spec']['volumes'][0]['secret']['secretName']))])
try:
api_response = self._corev1.create_namespaced_pod(yaml_spec['metadata']['namespace'], pod)
return (api_response.metadata.name, True)
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->create_namespaced_pod: {}\n'.format(str(e)))
return ('', False) | 942,747,812,642,086,400 | _create_k8s_job creates a kubernetes job based on the yaml spec | sdk/python/kfp/compiler/_k8s_helper.py | _create_k8s_job | JohnPaton/pipelines | python | def _create_k8s_job(self, yaml_spec):
' '
pod = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(generate_name=yaml_spec['metadata']['generateName']))
container = k8s_client.V1Container(name=yaml_spec['spec']['containers'][0]['name'], image=yaml_spec['spec']['containers'][0]['image'], args=yaml_spec['spec']['containers'][0]['args'], volume_mounts=[k8s_client.V1VolumeMount(name=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['name'], mount_path=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['mountPath'])], env=[k8s_client.V1EnvVar(name=yaml_spec['spec']['containers'][0]['env'][0]['name'], value=yaml_spec['spec']['containers'][0]['env'][0]['value'])])
pod.spec = k8s_client.V1PodSpec(restart_policy=yaml_spec['spec']['restartPolicy'], containers=[container], service_account_name=yaml_spec['spec']['serviceAccountName'], volumes=[k8s_client.V1Volume(name=yaml_spec['spec']['volumes'][0]['name'], secret=k8s_client.V1SecretVolumeSource(secret_name=yaml_spec['spec']['volumes'][0]['secret']['secretName']))])
try:
api_response = self._corev1.create_namespaced_pod(yaml_spec['metadata']['namespace'], pod)
return (api_response.metadata.name, True)
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->create_namespaced_pod: {}\n'.format(str(e)))
return (, False) |
def _wait_for_k8s_job(self, pod_name, yaml_spec, timeout):
' _wait_for_k8s_job waits for the job to complete '
status = 'running'
start_time = datetime.now()
while (status in ['pending', 'running']):
try:
api_response = self._corev1.read_namespaced_pod(pod_name, yaml_spec['metadata']['namespace'])
status = api_response.status.phase.lower()
time.sleep(5)
elapsed_time = (datetime.now() - start_time).seconds
logging.info('{} seconds: waiting for job to complete'.format(elapsed_time))
if (elapsed_time > timeout):
logging.info('Kubernetes job timeout')
return False
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->read_namespaced_pod: {}\n'.format(str(e)))
return False
return (status == 'succeeded') | 390,679,719,422,102,600 | _wait_for_k8s_job waits for the job to complete | sdk/python/kfp/compiler/_k8s_helper.py | _wait_for_k8s_job | JohnPaton/pipelines | python | def _wait_for_k8s_job(self, pod_name, yaml_spec, timeout):
' '
status = 'running'
start_time = datetime.now()
while (status in ['pending', 'running']):
try:
api_response = self._corev1.read_namespaced_pod(pod_name, yaml_spec['metadata']['namespace'])
status = api_response.status.phase.lower()
time.sleep(5)
elapsed_time = (datetime.now() - start_time).seconds
logging.info('{} seconds: waiting for job to complete'.format(elapsed_time))
if (elapsed_time > timeout):
logging.info('Kubernetes job timeout')
return False
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->read_namespaced_pod: {}\n'.format(str(e)))
return False
return (status == 'succeeded') |
def _delete_k8s_job(self, pod_name, yaml_spec):
' _delete_k8s_job deletes a pod '
try:
api_response = self._corev1.delete_namespaced_pod(pod_name, yaml_spec['metadata']['namespace'], body=k8s_client.V1DeleteOptions())
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->delete_namespaced_pod: {}\n'.format(str(e))) | -4,173,525,661,513,618,000 | _delete_k8s_job deletes a pod | sdk/python/kfp/compiler/_k8s_helper.py | _delete_k8s_job | JohnPaton/pipelines | python | def _delete_k8s_job(self, pod_name, yaml_spec):
' '
try:
api_response = self._corev1.delete_namespaced_pod(pod_name, yaml_spec['metadata']['namespace'], body=k8s_client.V1DeleteOptions())
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->delete_namespaced_pod: {}\n'.format(str(e))) |
def run_job(self, yaml_spec, timeout=600):
' run_job runs a kubernetes job and clean up afterwards '
(pod_name, succ) = self._create_k8s_job(yaml_spec)
if (not succ):
return False
succ = self._wait_for_k8s_job(pod_name, yaml_spec, timeout)
if (not succ):
logging.info('Kubernetes job failed.')
return False
self._delete_k8s_job(pod_name, yaml_spec)
return succ | 8,632,286,401,087,466,000 | run_job runs a kubernetes job and clean up afterwards | sdk/python/kfp/compiler/_k8s_helper.py | run_job | JohnPaton/pipelines | python | def run_job(self, yaml_spec, timeout=600):
' '
(pod_name, succ) = self._create_k8s_job(yaml_spec)
if (not succ):
return False
succ = self._wait_for_k8s_job(pod_name, yaml_spec, timeout)
if (not succ):
logging.info('Kubernetes job failed.')
return False
self._delete_k8s_job(pod_name, yaml_spec)
return succ |
@staticmethod
def sanitize_k8s_name(name):
'From _make_kubernetes_name\n sanitize_k8s_name cleans and converts the names in the workflow.\n '
return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).lstrip('-').rstrip('-') | -6,757,738,004,173,168,000 | From _make_kubernetes_name
sanitize_k8s_name cleans and converts the names in the workflow. | sdk/python/kfp/compiler/_k8s_helper.py | sanitize_k8s_name | JohnPaton/pipelines | python | @staticmethod
def sanitize_k8s_name(name):
'From _make_kubernetes_name\n sanitize_k8s_name cleans and converts the names in the workflow.\n '
return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).lstrip('-').rstrip('-') |
@staticmethod
def convert_k8s_obj_to_json(k8s_obj):
'\n Builds a JSON K8s object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is swagger model, return the properties dict.\n\n Args:\n obj: The data to serialize.\n Returns: The serialized form of data.\n '
from six import text_type, integer_types, iteritems
PRIMITIVE_TYPES = ((float, bool, bytes, text_type) + integer_types)
from datetime import date, datetime
if (k8s_obj is None):
return None
elif isinstance(k8s_obj, PRIMITIVE_TYPES):
return k8s_obj
elif isinstance(k8s_obj, list):
return [K8sHelper.convert_k8s_obj_to_json(sub_obj) for sub_obj in k8s_obj]
elif isinstance(k8s_obj, tuple):
return tuple((K8sHelper.convert_k8s_obj_to_json(sub_obj) for sub_obj in k8s_obj))
elif isinstance(k8s_obj, (datetime, date)):
return k8s_obj.isoformat()
elif isinstance(k8s_obj, dsl.PipelineParam):
if isinstance(k8s_obj.value, str):
return k8s_obj.value
return ('{{inputs.parameters.%s}}' % k8s_obj.full_name)
if isinstance(k8s_obj, dict):
obj_dict = k8s_obj
else:
obj_dict = {k8s_obj.attribute_map[attr]: getattr(k8s_obj, attr) for (attr, _) in iteritems(k8s_obj.swagger_types) if (getattr(k8s_obj, attr) is not None)}
return {key: K8sHelper.convert_k8s_obj_to_json(val) for (key, val) in iteritems(obj_dict)} | -8,150,503,951,591,003,000 | Builds a JSON K8s object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
Args:
obj: The data to serialize.
Returns: The serialized form of data. | sdk/python/kfp/compiler/_k8s_helper.py | convert_k8s_obj_to_json | JohnPaton/pipelines | python | @staticmethod
def convert_k8s_obj_to_json(k8s_obj):
'\n Builds a JSON K8s object.\n\n If obj is None, return None.\n If obj is str, int, long, float, bool, return directly.\n If obj is datetime.datetime, datetime.date\n convert to string in iso8601 format.\n If obj is list, sanitize each element in the list.\n If obj is dict, return the dict.\n If obj is swagger model, return the properties dict.\n\n Args:\n obj: The data to serialize.\n Returns: The serialized form of data.\n '
from six import text_type, integer_types, iteritems
PRIMITIVE_TYPES = ((float, bool, bytes, text_type) + integer_types)
from datetime import date, datetime
if (k8s_obj is None):
return None
elif isinstance(k8s_obj, PRIMITIVE_TYPES):
return k8s_obj
elif isinstance(k8s_obj, list):
return [K8sHelper.convert_k8s_obj_to_json(sub_obj) for sub_obj in k8s_obj]
elif isinstance(k8s_obj, tuple):
return tuple((K8sHelper.convert_k8s_obj_to_json(sub_obj) for sub_obj in k8s_obj))
elif isinstance(k8s_obj, (datetime, date)):
return k8s_obj.isoformat()
elif isinstance(k8s_obj, dsl.PipelineParam):
if isinstance(k8s_obj.value, str):
return k8s_obj.value
return ('{{inputs.parameters.%s}}' % k8s_obj.full_name)
if isinstance(k8s_obj, dict):
obj_dict = k8s_obj
else:
obj_dict = {k8s_obj.attribute_map[attr]: getattr(k8s_obj, attr) for (attr, _) in iteritems(k8s_obj.swagger_types) if (getattr(k8s_obj, attr) is not None)}
return {key: K8sHelper.convert_k8s_obj_to_json(val) for (key, val) in iteritems(obj_dict)} |
def encrypt_payload(secret_key, payload):
'Return a encrypted payload given a key and dictionary of data.'
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip('libnacl/libsodium is not installed')
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode('utf-8')
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b'\x00')
payload = json.dumps(payload).encode('utf-8')
return SecretBox(prepped_key).encrypt(payload, encoder=Base64Encoder).decode('utf-8') | 1,771,727,756,332,680,400 | Return a encrypted payload given a key and dictionary of data. | tests/components/mobile_app/test_webhook.py | encrypt_payload | Bonnee/core | python | def encrypt_payload(secret_key, payload):
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip('libnacl/libsodium is not installed')
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode('utf-8')
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b'\x00')
payload = json.dumps(payload).encode('utf-8')
return SecretBox(prepped_key).encrypt(payload, encoder=Base64Encoder).decode('utf-8') |
def decrypt_payload(secret_key, encrypted_data):
'Return a decrypted payload given a key and a string of encrypted data.'
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip('libnacl/libsodium is not installed')
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode('utf-8')
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b'\x00')
decrypted_data = SecretBox(prepped_key).decrypt(encrypted_data, encoder=Base64Encoder)
decrypted_data = decrypted_data.decode('utf-8')
return json.loads(decrypted_data) | 1,057,821,506,854,998,300 | Return a decrypted payload given a key and a string of encrypted data. | tests/components/mobile_app/test_webhook.py | decrypt_payload | Bonnee/core | python | def decrypt_payload(secret_key, encrypted_data):
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip('libnacl/libsodium is not installed')
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode('utf-8')
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b'\x00')
decrypted_data = SecretBox(prepped_key).decrypt(encrypted_data, encoder=Base64Encoder)
decrypted_data = decrypted_data.decode('utf-8')
return json.loads(decrypted_data) |
async def test_webhook_handle_render_template(create_registrations, webhook_client):
'Test that we render templates properly.'
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json=RENDER_TEMPLATE))
assert (resp.status == 200)
json = (await resp.json())
assert (json == {'one': 'Hello world'}) | -2,946,827,424,843,341,000 | Test that we render templates properly. | tests/components/mobile_app/test_webhook.py | test_webhook_handle_render_template | Bonnee/core | python | async def test_webhook_handle_render_template(create_registrations, webhook_client):
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json=RENDER_TEMPLATE))
assert (resp.status == 200)
json = (await resp.json())
assert (json == {'one': 'Hello world'}) |
async def test_webhook_handle_call_services(hass, create_registrations, webhook_client):
'Test that we call services properly.'
calls = async_mock_service(hass, 'test', 'mobile_app')
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json=CALL_SERVICE))
assert (resp.status == 200)
assert (len(calls) == 1) | -3,661,124,779,861,039,600 | Test that we call services properly. | tests/components/mobile_app/test_webhook.py | test_webhook_handle_call_services | Bonnee/core | python | async def test_webhook_handle_call_services(hass, create_registrations, webhook_client):
calls = async_mock_service(hass, 'test', 'mobile_app')
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json=CALL_SERVICE))
assert (resp.status == 200)
assert (len(calls) == 1) |
async def test_webhook_handle_fire_event(hass, create_registrations, webhook_client):
'Test that we can fire events.'
events = []
@callback
def store_event(event):
'Helepr to store events.'
events.append(event)
hass.bus.async_listen('test_event', store_event)
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json=FIRE_EVENT))
assert (resp.status == 200)
json = (await resp.json())
assert (json == {})
assert (len(events) == 1)
assert (events[0].data['hello'] == 'yo world') | -6,889,423,410,323,974,000 | Test that we can fire events. | tests/components/mobile_app/test_webhook.py | test_webhook_handle_fire_event | Bonnee/core | python | async def test_webhook_handle_fire_event(hass, create_registrations, webhook_client):
events = []
@callback
def store_event(event):
'Helepr to store events.'
events.append(event)
hass.bus.async_listen('test_event', store_event)
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json=FIRE_EVENT))
assert (resp.status == 200)
json = (await resp.json())
assert (json == {})
assert (len(events) == 1)
assert (events[0].data['hello'] == 'yo world') |
async def test_webhook_update_registration(webhook_client, authed_api_client):
'Test that a we can update an existing registration via webhook.'
register_resp = (await authed_api_client.post('/api/mobile_app/registrations', json=REGISTER_CLEARTEXT))
assert (register_resp.status == 201)
register_json = (await register_resp.json())
webhook_id = register_json[CONF_WEBHOOK_ID]
update_container = {'type': 'update_registration', 'data': UPDATE}
update_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json=update_container))
assert (update_resp.status == 200)
update_json = (await update_resp.json())
assert (update_json['app_version'] == '2.0.0')
assert (CONF_WEBHOOK_ID not in update_json)
assert (CONF_SECRET not in update_json) | -5,394,532,641,275,007,000 | Test that a we can update an existing registration via webhook. | tests/components/mobile_app/test_webhook.py | test_webhook_update_registration | Bonnee/core | python | async def test_webhook_update_registration(webhook_client, authed_api_client):
register_resp = (await authed_api_client.post('/api/mobile_app/registrations', json=REGISTER_CLEARTEXT))
assert (register_resp.status == 201)
register_json = (await register_resp.json())
webhook_id = register_json[CONF_WEBHOOK_ID]
update_container = {'type': 'update_registration', 'data': UPDATE}
update_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json=update_container))
assert (update_resp.status == 200)
update_json = (await update_resp.json())
assert (update_json['app_version'] == '2.0.0')
assert (CONF_WEBHOOK_ID not in update_json)
assert (CONF_SECRET not in update_json) |
async def test_webhook_handle_get_zones(hass, create_registrations, webhook_client):
'Test that we can get zones properly.'
(await async_setup_component(hass, ZONE_DOMAIN, {ZONE_DOMAIN: {}}))
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json={'type': 'get_zones'}))
assert (resp.status == 200)
json = (await resp.json())
assert (len(json) == 1)
zones = sorted(json, key=(lambda entry: entry['entity_id']))
assert (zones[0]['entity_id'] == 'zone.home') | -8,149,553,562,526,938,000 | Test that we can get zones properly. | tests/components/mobile_app/test_webhook.py | test_webhook_handle_get_zones | Bonnee/core | python | async def test_webhook_handle_get_zones(hass, create_registrations, webhook_client):
(await async_setup_component(hass, ZONE_DOMAIN, {ZONE_DOMAIN: {}}))
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json={'type': 'get_zones'}))
assert (resp.status == 200)
json = (await resp.json())
assert (len(json) == 1)
zones = sorted(json, key=(lambda entry: entry['entity_id']))
assert (zones[0]['entity_id'] == 'zone.home') |
async def test_webhook_handle_get_config(hass, create_registrations, webhook_client):
'Test that we can get config properly.'
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json={'type': 'get_config'}))
assert (resp.status == 200)
json = (await resp.json())
if ('components' in json):
json['components'] = set(json['components'])
if ('whitelist_external_dirs' in json):
json['whitelist_external_dirs'] = set(json['whitelist_external_dirs'])
hass_config = hass.config.as_dict()
expected_dict = {'latitude': hass_config['latitude'], 'longitude': hass_config['longitude'], 'elevation': hass_config['elevation'], 'unit_system': hass_config['unit_system'], 'location_name': hass_config['location_name'], 'time_zone': hass_config['time_zone'], 'components': hass_config['components'], 'version': hass_config['version'], 'theme_color': '#03A9F4'}
assert (expected_dict == json) | -5,016,961,611,160,766,000 | Test that we can get config properly. | tests/components/mobile_app/test_webhook.py | test_webhook_handle_get_config | Bonnee/core | python | async def test_webhook_handle_get_config(hass, create_registrations, webhook_client):
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json={'type': 'get_config'}))
assert (resp.status == 200)
json = (await resp.json())
if ('components' in json):
json['components'] = set(json['components'])
if ('whitelist_external_dirs' in json):
json['whitelist_external_dirs'] = set(json['whitelist_external_dirs'])
hass_config = hass.config.as_dict()
expected_dict = {'latitude': hass_config['latitude'], 'longitude': hass_config['longitude'], 'elevation': hass_config['elevation'], 'unit_system': hass_config['unit_system'], 'location_name': hass_config['location_name'], 'time_zone': hass_config['time_zone'], 'components': hass_config['components'], 'version': hass_config['version'], 'theme_color': '#03A9F4'}
assert (expected_dict == json) |
async def test_webhook_returns_error_incorrect_json(webhook_client, create_registrations, caplog):
'Test that an error is returned when JSON is invalid.'
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), data='not json'))
assert (resp.status == 400)
json = (await resp.json())
assert (json == {})
assert ('invalid JSON' in caplog.text) | 4,461,940,669,026,047,000 | Test that an error is returned when JSON is invalid. | tests/components/mobile_app/test_webhook.py | test_webhook_returns_error_incorrect_json | Bonnee/core | python | async def test_webhook_returns_error_incorrect_json(webhook_client, create_registrations, caplog):
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), data='not json'))
assert (resp.status == 400)
json = (await resp.json())
assert (json == {})
assert ('invalid JSON' in caplog.text) |
async def test_webhook_handle_decryption(webhook_client, create_registrations):
'Test that we can encrypt/decrypt properly.'
key = create_registrations[0]['secret']
data = encrypt_payload(key, RENDER_TEMPLATE['data'])
container = {'type': 'render_template', 'encrypted': True, 'encrypted_data': data}
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[0]['webhook_id']), json=container))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert ('encrypted_data' in webhook_json)
decrypted_data = decrypt_payload(key, webhook_json['encrypted_data'])
assert (decrypted_data == {'one': 'Hello world'}) | -8,089,620,602,666,996,000 | Test that we can encrypt/decrypt properly. | tests/components/mobile_app/test_webhook.py | test_webhook_handle_decryption | Bonnee/core | python | async def test_webhook_handle_decryption(webhook_client, create_registrations):
key = create_registrations[0]['secret']
data = encrypt_payload(key, RENDER_TEMPLATE['data'])
container = {'type': 'render_template', 'encrypted': True, 'encrypted_data': data}
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[0]['webhook_id']), json=container))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert ('encrypted_data' in webhook_json)
decrypted_data = decrypt_payload(key, webhook_json['encrypted_data'])
assert (decrypted_data == {'one': 'Hello world'}) |
async def test_webhook_requires_encryption(webhook_client, create_registrations):
'Test that encrypted registrations only accept encrypted data.'
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[0]['webhook_id']), json=RENDER_TEMPLATE))
assert (resp.status == 400)
webhook_json = (await resp.json())
assert ('error' in webhook_json)
assert (webhook_json['success'] is False)
assert (webhook_json['error']['code'] == 'encryption_required') | 804,871,898,392,491,100 | Test that encrypted registrations only accept encrypted data. | tests/components/mobile_app/test_webhook.py | test_webhook_requires_encryption | Bonnee/core | python | async def test_webhook_requires_encryption(webhook_client, create_registrations):
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[0]['webhook_id']), json=RENDER_TEMPLATE))
assert (resp.status == 400)
webhook_json = (await resp.json())
assert ('error' in webhook_json)
assert (webhook_json['success'] is False)
assert (webhook_json['error']['code'] == 'encryption_required') |
async def test_webhook_update_location(hass, webhook_client, create_registrations):
'Test that location can be updated.'
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json={'type': 'update_location', 'data': {'gps': [1, 2], 'gps_accuracy': 10, 'altitude': (- 10)}}))
assert (resp.status == 200)
state = hass.states.get('device_tracker.test_1_2')
assert (state is not None)
assert (state.attributes['latitude'] == 1.0)
assert (state.attributes['longitude'] == 2.0)
assert (state.attributes['gps_accuracy'] == 10)
assert (state.attributes['altitude'] == (- 10)) | 4,189,325,734,411,630,600 | Test that location can be updated. | tests/components/mobile_app/test_webhook.py | test_webhook_update_location | Bonnee/core | python | async def test_webhook_update_location(hass, webhook_client, create_registrations):
resp = (await webhook_client.post('/api/webhook/{}'.format(create_registrations[1]['webhook_id']), json={'type': 'update_location', 'data': {'gps': [1, 2], 'gps_accuracy': 10, 'altitude': (- 10)}}))
assert (resp.status == 200)
state = hass.states.get('device_tracker.test_1_2')
assert (state is not None)
assert (state.attributes['latitude'] == 1.0)
assert (state.attributes['longitude'] == 2.0)
assert (state.attributes['gps_accuracy'] == 10)
assert (state.attributes['altitude'] == (- 10)) |
async def test_webhook_enable_encryption(hass, webhook_client, create_registrations):
'Test that encryption can be added to a reg initially created without.'
webhook_id = create_registrations[1]['webhook_id']
enable_enc_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'enable_encryption'}))
assert (enable_enc_resp.status == 200)
enable_enc_json = (await enable_enc_resp.json())
assert (len(enable_enc_json) == 1)
assert (CONF_SECRET in enable_enc_json)
key = enable_enc_json['secret']
enc_required_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json=RENDER_TEMPLATE))
assert (enc_required_resp.status == 400)
enc_required_json = (await enc_required_resp.json())
assert ('error' in enc_required_json)
assert (enc_required_json['success'] is False)
assert (enc_required_json['error']['code'] == 'encryption_required')
enc_data = encrypt_payload(key, RENDER_TEMPLATE['data'])
container = {'type': 'render_template', 'encrypted': True, 'encrypted_data': enc_data}
enc_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json=container))
assert (enc_resp.status == 200)
enc_json = (await enc_resp.json())
assert ('encrypted_data' in enc_json)
decrypted_data = decrypt_payload(key, enc_json['encrypted_data'])
assert (decrypted_data == {'one': 'Hello world'}) | 999,848,620,369,200,100 | Test that encryption can be added to a reg initially created without. | tests/components/mobile_app/test_webhook.py | test_webhook_enable_encryption | Bonnee/core | python | async def test_webhook_enable_encryption(hass, webhook_client, create_registrations):
webhook_id = create_registrations[1]['webhook_id']
enable_enc_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'enable_encryption'}))
assert (enable_enc_resp.status == 200)
enable_enc_json = (await enable_enc_resp.json())
assert (len(enable_enc_json) == 1)
assert (CONF_SECRET in enable_enc_json)
key = enable_enc_json['secret']
enc_required_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json=RENDER_TEMPLATE))
assert (enc_required_resp.status == 400)
enc_required_json = (await enc_required_resp.json())
assert ('error' in enc_required_json)
assert (enc_required_json['success'] is False)
assert (enc_required_json['error']['code'] == 'encryption_required')
enc_data = encrypt_payload(key, RENDER_TEMPLATE['data'])
container = {'type': 'render_template', 'encrypted': True, 'encrypted_data': enc_data}
enc_resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json=container))
assert (enc_resp.status == 200)
enc_json = (await enc_resp.json())
assert ('encrypted_data' in enc_json)
decrypted_data = decrypt_payload(key, enc_json['encrypted_data'])
assert (decrypted_data == {'one': 'Hello world'}) |
async def test_webhook_camera_stream_non_existent(hass, create_registrations, webhook_client):
'Test fetching camera stream URLs for a non-existent camera.'
webhook_id = create_registrations[1]['webhook_id']
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.doesnt_exist'}}))
assert (resp.status == 400)
webhook_json = (await resp.json())
assert (webhook_json['success'] is False) | -8,410,440,844,275,927,000 | Test fetching camera stream URLs for a non-existent camera. | tests/components/mobile_app/test_webhook.py | test_webhook_camera_stream_non_existent | Bonnee/core | python | async def test_webhook_camera_stream_non_existent(hass, create_registrations, webhook_client):
webhook_id = create_registrations[1]['webhook_id']
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.doesnt_exist'}}))
assert (resp.status == 400)
webhook_json = (await resp.json())
assert (webhook_json['success'] is False) |
async def test_webhook_camera_stream_non_hls(hass, create_registrations, webhook_client):
'Test fetching camera stream URLs for a non-HLS/stream-supporting camera.'
hass.states.async_set('camera.non_stream_camera', 'idle', {'supported_features': 0})
webhook_id = create_registrations[1]['webhook_id']
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.non_stream_camera'}}))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert (webhook_json['hls_path'] is None)
assert (webhook_json['mjpeg_path'] == '/api/camera_proxy_stream/camera.non_stream_camera') | -4,140,335,046,916,990,000 | Test fetching camera stream URLs for a non-HLS/stream-supporting camera. | tests/components/mobile_app/test_webhook.py | test_webhook_camera_stream_non_hls | Bonnee/core | python | async def test_webhook_camera_stream_non_hls(hass, create_registrations, webhook_client):
hass.states.async_set('camera.non_stream_camera', 'idle', {'supported_features': 0})
webhook_id = create_registrations[1]['webhook_id']
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.non_stream_camera'}}))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert (webhook_json['hls_path'] is None)
assert (webhook_json['mjpeg_path'] == '/api/camera_proxy_stream/camera.non_stream_camera') |
async def test_webhook_camera_stream_stream_available(hass, create_registrations, webhook_client):
'Test fetching camera stream URLs for an HLS/stream-supporting camera.'
hass.states.async_set('camera.stream_camera', 'idle', {'supported_features': CAMERA_SUPPORT_STREAM})
webhook_id = create_registrations[1]['webhook_id']
with patch('homeassistant.components.camera.async_request_stream', return_value='/api/streams/some_hls_stream'):
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.stream_camera'}}))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert (webhook_json['hls_path'] == '/api/streams/some_hls_stream')
assert (webhook_json['mjpeg_path'] == '/api/camera_proxy_stream/camera.stream_camera') | 462,953,747,465,132,740 | Test fetching camera stream URLs for an HLS/stream-supporting camera. | tests/components/mobile_app/test_webhook.py | test_webhook_camera_stream_stream_available | Bonnee/core | python | async def test_webhook_camera_stream_stream_available(hass, create_registrations, webhook_client):
hass.states.async_set('camera.stream_camera', 'idle', {'supported_features': CAMERA_SUPPORT_STREAM})
webhook_id = create_registrations[1]['webhook_id']
with patch('homeassistant.components.camera.async_request_stream', return_value='/api/streams/some_hls_stream'):
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.stream_camera'}}))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert (webhook_json['hls_path'] == '/api/streams/some_hls_stream')
assert (webhook_json['mjpeg_path'] == '/api/camera_proxy_stream/camera.stream_camera') |
async def test_webhook_camera_stream_stream_available_but_errors(hass, create_registrations, webhook_client):
'Test fetching camera stream URLs for an HLS/stream-supporting camera but that streaming errors.'
hass.states.async_set('camera.stream_camera', 'idle', {'supported_features': CAMERA_SUPPORT_STREAM})
webhook_id = create_registrations[1]['webhook_id']
with patch('homeassistant.components.camera.async_request_stream', side_effect=HomeAssistantError()):
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.stream_camera'}}))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert (webhook_json['hls_path'] is None)
assert (webhook_json['mjpeg_path'] == '/api/camera_proxy_stream/camera.stream_camera') | -3,383,425,318,128,153,600 | Test fetching camera stream URLs for an HLS/stream-supporting camera but that streaming errors. | tests/components/mobile_app/test_webhook.py | test_webhook_camera_stream_stream_available_but_errors | Bonnee/core | python | async def test_webhook_camera_stream_stream_available_but_errors(hass, create_registrations, webhook_client):
hass.states.async_set('camera.stream_camera', 'idle', {'supported_features': CAMERA_SUPPORT_STREAM})
webhook_id = create_registrations[1]['webhook_id']
with patch('homeassistant.components.camera.async_request_stream', side_effect=HomeAssistantError()):
resp = (await webhook_client.post(f'/api/webhook/{webhook_id}', json={'type': 'stream_camera', 'data': {'camera_entity_id': 'camera.stream_camera'}}))
assert (resp.status == 200)
webhook_json = (await resp.json())
assert (webhook_json['hls_path'] is None)
assert (webhook_json['mjpeg_path'] == '/api/camera_proxy_stream/camera.stream_camera') |
@callback
def store_event(event):
'Helepr to store events.'
events.append(event) | -6,398,689,500,183,424,000 | Helepr to store events. | tests/components/mobile_app/test_webhook.py | store_event | Bonnee/core | python | @callback
def store_event(event):
events.append(event) |
def __init__(self, host='localhost', port=8125, max_buffer_size=50):
'Initialize an Offline Connection object.\n\n >>> monascastatsd = MonascaStatsd()\n\n :name: the name for this client. Everything sent by this client\n will be prefixed by name\n :param host: the host of the MonascaStatsd server.\n :param port: the port of the MonascaStatsd server.\n :param max_buffer_size: Maximum number of metric to buffer before\n sending to the server if sending metrics in batch\n '
self.max_buffer_size = max_buffer_size
self._send = self._send_to_server
self.connect(host, port)
self.encoding = 'utf-8' | 6,217,063,500,374,581,000 | Initialize an Offline Connection object.
>>> monascastatsd = MonascaStatsd()
:name: the name for this client. Everything sent by this client
will be prefixed by name
:param host: the host of the MonascaStatsd server.
:param port: the port of the MonascaStatsd server.
:param max_buffer_size: Maximum number of metric to buffer before
sending to the server if sending metrics in batch | monasca_notification/common/utils.py | __init__ | martinchacon/monasca-notification | python | def __init__(self, host='localhost', port=8125, max_buffer_size=50):
'Initialize an Offline Connection object.\n\n >>> monascastatsd = MonascaStatsd()\n\n :name: the name for this client. Everything sent by this client\n will be prefixed by name\n :param host: the host of the MonascaStatsd server.\n :param port: the port of the MonascaStatsd server.\n :param max_buffer_size: Maximum number of metric to buffer before\n sending to the server if sending metrics in batch\n '
self.max_buffer_size = max_buffer_size
self._send = self._send_to_server
self.connect(host, port)
self.encoding = 'utf-8' |
def connect(self, host, port):
'Avoid to connect to the monascastatsd server.\n\n '
pass | -6,134,275,743,187,880,000 | Avoid to connect to the monascastatsd server. | monasca_notification/common/utils.py | connect | martinchacon/monasca-notification | python | def connect(self, host, port):
'\n\n '
pass |
def value_iteration(env, gamma, epsilon):
' Solves the shortest path problem using value iteration\n :input town_map env : The town_map environment in which we seek to\n find the shortest path.\n :input float gamma : The discount factor.\n :input float epsilon : accuracy of the value iteration procedure.\n :return numpy.array V : Optimal values for every state at every\n time, dimension S*T\n :return numpy.array policy: Optimal time-varying policy at every state,\n dimension S*T\n '
p = env.transition_probabilities
r = env.rewards
n_states = env.n_states
n_actions = env.n_actions
V = np.zeros(n_states)
Q = np.zeros((n_states, n_actions))
BV = np.zeros(n_states)
n = 0
tol = (((1 - gamma) * epsilon) / gamma)
for s in range(n_states):
for a in range(n_actions):
Q[(s, a)] = (r[(s, a)] + (gamma * np.dot(p[:, s, a], V)))
BV = np.max(Q, 1)
while ((np.linalg.norm((V - BV)) >= tol) and (n < 2600)):
n += 1
V = np.copy(BV)
for s in range(n_states):
for a in range(n_actions):
Q[(s, a)] = (r[(s, a)] + (gamma * np.dot(p[:, s, a], V)))
BV = np.max(Q, 1)
policy = np.argmax(Q, 1)
return (V, policy) | 1,402,534,937,885,546,500 | Solves the shortest path problem using value iteration
:input town_map env : The town_map environment in which we seek to
find the shortest path.
:input float gamma : The discount factor.
:input float epsilon : accuracy of the value iteration procedure.
:return numpy.array V : Optimal values for every state at every
time, dimension S*T
:return numpy.array policy: Optimal time-varying policy at every state,
dimension S*T | Assignment 2/robbing_banks.py | value_iteration | takeitbillykyle/EL2805-Reinforcement-Learning- | python | def value_iteration(env, gamma, epsilon):
' Solves the shortest path problem using value iteration\n :input town_map env : The town_map environment in which we seek to\n find the shortest path.\n :input float gamma : The discount factor.\n :input float epsilon : accuracy of the value iteration procedure.\n :return numpy.array V : Optimal values for every state at every\n time, dimension S*T\n :return numpy.array policy: Optimal time-varying policy at every state,\n dimension S*T\n '
p = env.transition_probabilities
r = env.rewards
n_states = env.n_states
n_actions = env.n_actions
V = np.zeros(n_states)
Q = np.zeros((n_states, n_actions))
BV = np.zeros(n_states)
n = 0
tol = (((1 - gamma) * epsilon) / gamma)
for s in range(n_states):
for a in range(n_actions):
Q[(s, a)] = (r[(s, a)] + (gamma * np.dot(p[:, s, a], V)))
BV = np.max(Q, 1)
while ((np.linalg.norm((V - BV)) >= tol) and (n < 2600)):
n += 1
V = np.copy(BV)
for s in range(n_states):
for a in range(n_actions):
Q[(s, a)] = (r[(s, a)] + (gamma * np.dot(p[:, s, a], V)))
BV = np.max(Q, 1)
policy = np.argmax(Q, 1)
return (V, policy) |
def __init__(self, town_map):
' Constructor of the environment town_map.\n '
self.STEP_REWARD = 0
self.BANK_REWARD = 10
self.CAUGHT_REWARD = (- 50)
self.town_map = town_map
self.initial_state = np.array([0, 0, 1, 2])
self.actions = self.__actions()
(self.states, self.map) = self.__states()
self.n_actions = len(self.actions)
self.n_states = len(self.states)
self.transition_probabilities = self.__transitions()
self.rewards = self.__rewards() | 6,462,737,220,212,363,000 | Constructor of the environment town_map. | Assignment 2/robbing_banks.py | __init__ | takeitbillykyle/EL2805-Reinforcement-Learning- | python | def __init__(self, town_map):
' \n '
self.STEP_REWARD = 0
self.BANK_REWARD = 10
self.CAUGHT_REWARD = (- 50)
self.town_map = town_map
self.initial_state = np.array([0, 0, 1, 2])
self.actions = self.__actions()
(self.states, self.map) = self.__states()
self.n_actions = len(self.actions)
self.n_states = len(self.states)
self.transition_probabilities = self.__transitions()
self.rewards = self.__rewards() |
def __move(self, state, action):
' Makes a step in the town_map, given a current position and an action.\n If the action STAY or an inadmissible action is used, the robber stays in place.\n\n :return integer next_cell corresponding to position (x,y) x (x,y) on the town_map that agent transitions to.\n '
row = (self.states[state][0] + self.actions[action][0])
col = (self.states[state][1] + self.actions[action][1])
hitting_town_walls = ((row == (- 1)) or (row == self.town_map.shape[0]) or (col == (- 1)) or (col == self.town_map.shape[1]))
list_police_pos = self.__police_positions(state)
new_police_pos = list_police_pos[np.random.randint(len(list_police_pos))]
caught = all((self.states[state][0:2] == self.states[state][2:]))
if caught:
return self.map[tuple(self.initial_state)]
elif hitting_town_walls:
return state
else:
return self.map[(row, col, new_police_pos[0], new_police_pos[1])] | 4,031,673,675,314,286,600 | Makes a step in the town_map, given a current position and an action.
If the action STAY or an inadmissible action is used, the robber stays in place.
:return integer next_cell corresponding to position (x,y) x (x,y) on the town_map that agent transitions to. | Assignment 2/robbing_banks.py | __move | takeitbillykyle/EL2805-Reinforcement-Learning- | python | def __move(self, state, action):
' Makes a step in the town_map, given a current position and an action.\n If the action STAY or an inadmissible action is used, the robber stays in place.\n\n :return integer next_cell corresponding to position (x,y) x (x,y) on the town_map that agent transitions to.\n '
row = (self.states[state][0] + self.actions[action][0])
col = (self.states[state][1] + self.actions[action][1])
hitting_town_walls = ((row == (- 1)) or (row == self.town_map.shape[0]) or (col == (- 1)) or (col == self.town_map.shape[1]))
list_police_pos = self.__police_positions(state)
new_police_pos = list_police_pos[np.random.randint(len(list_police_pos))]
caught = all((self.states[state][0:2] == self.states[state][2:]))
if caught:
return self.map[tuple(self.initial_state)]
elif hitting_town_walls:
return state
else:
return self.map[(row, col, new_police_pos[0], new_police_pos[1])] |
def __police_positions(self, state):
'\n Input: The state as an int\n Returns: A list of possible new minotaur positions from current state \n '
agent_pos = self.states[state][0:2]
police_pos = self.states[state][2:]
diff_pos = np.sign((agent_pos - police_pos))
list_pos = ([[1, 0], [(- 1), 0], [0, diff_pos[1]]] if (diff_pos[0] == 0) else ([[0, 1], [0, (- 1)], [diff_pos[0], 0]] if (diff_pos[1] == 0) else [[0, diff_pos[1]], [diff_pos[0], 0]]))
list_pos += police_pos
list_pos = list(filter(None, [(tuple(pos) * ((0 <= pos[0] < self.town_map.shape[0]) and (0 <= pos[1] < self.town_map.shape[1]))) for pos in list_pos]))
return list_pos | 3,609,533,900,443,011,000 | Input: The state as an int
Returns: A list of possible new minotaur positions from current state | Assignment 2/robbing_banks.py | __police_positions | takeitbillykyle/EL2805-Reinforcement-Learning- | python | def __police_positions(self, state):
'\n Input: The state as an int\n Returns: A list of possible new minotaur positions from current state \n '
agent_pos = self.states[state][0:2]
police_pos = self.states[state][2:]
diff_pos = np.sign((agent_pos - police_pos))
list_pos = ([[1, 0], [(- 1), 0], [0, diff_pos[1]]] if (diff_pos[0] == 0) else ([[0, 1], [0, (- 1)], [diff_pos[0], 0]] if (diff_pos[1] == 0) else [[0, diff_pos[1]], [diff_pos[0], 0]]))
list_pos += police_pos
list_pos = list(filter(None, [(tuple(pos) * ((0 <= pos[0] < self.town_map.shape[0]) and (0 <= pos[1] < self.town_map.shape[1]))) for pos in list_pos]))
return list_pos |
def __transitions(self):
' Computes the transition probabilities for every state action pair.\n :return numpy.tensor transition probabilities: tensor of transition\n probabilities of dimension S*S*A\n '
dimensions = (self.n_states, self.n_states, self.n_actions)
transition_probabilities = np.zeros(dimensions)
for s in range(self.n_states):
if ((self.states[s][0], self.states[s][1]) == (self.states[s][2], self.states[s][3])):
transition_probabilities[self.initial_state, s, :] = (1 / 3)
else:
for a in range(self.n_actions):
list_pos = self.__police_positions(s)
for police_pos in list_pos:
next_s = self.__move(s, a)
new_pos = np.copy(self.states[next_s])
new_pos[2:] = police_pos
next_s = self.map[tuple(new_pos)]
transition_probabilities[(next_s, s, a)] = (1 / len(list_pos))
return transition_probabilities | 499,550,621,826,146,400 | Computes the transition probabilities for every state action pair.
:return numpy.tensor transition probabilities: tensor of transition
probabilities of dimension S*S*A | Assignment 2/robbing_banks.py | __transitions | takeitbillykyle/EL2805-Reinforcement-Learning- | python | def __transitions(self):
' Computes the transition probabilities for every state action pair.\n :return numpy.tensor transition probabilities: tensor of transition\n probabilities of dimension S*S*A\n '
dimensions = (self.n_states, self.n_states, self.n_actions)
transition_probabilities = np.zeros(dimensions)
for s in range(self.n_states):
if ((self.states[s][0], self.states[s][1]) == (self.states[s][2], self.states[s][3])):
transition_probabilities[self.initial_state, s, :] = (1 / 3)
else:
for a in range(self.n_actions):
list_pos = self.__police_positions(s)
for police_pos in list_pos:
next_s = self.__move(s, a)
new_pos = np.copy(self.states[next_s])
new_pos[2:] = police_pos
next_s = self.map[tuple(new_pos)]
transition_probabilities[(next_s, s, a)] = (1 / len(list_pos))
return transition_probabilities |
@classmethod
def _verify_local_backends(cls):
'\n Return the local backends in `SDK_STANDARD_BACKENDS` that are\n effectively available (as some of them might depend on the presence\n of an optional dependency or on the existence of a binary).\n\n Returns:\n dict[str:BaseBackend]: a dict of the local backends instances for\n the backends that could be instantiated, keyed by backend name.\n '
ret = {}
for backend_cls in SDK_STANDARD_BACKENDS:
try:
backend_instance = cls._get_backend_instance(backend_cls)
backend_name = backend_instance.configuration['name']
ret[backend_name] = backend_instance
except QISKitError as e:
logger.info('local backend %s is not available: %s', backend_cls, str(e))
return ret | 8,764,399,197,171,842,000 | Return the local backends in `SDK_STANDARD_BACKENDS` that are
effectively available (as some of them might depend on the presence
of an optional dependency or on the existence of a binary).
Returns:
dict[str:BaseBackend]: a dict of the local backends instances for
the backends that could be instantiated, keyed by backend name. | qiskit/backends/local/localprovider.py | _verify_local_backends | Hosseinyeganeh/qiskit-core | python | @classmethod
def _verify_local_backends(cls):
'\n Return the local backends in `SDK_STANDARD_BACKENDS` that are\n effectively available (as some of them might depend on the presence\n of an optional dependency or on the existence of a binary).\n\n Returns:\n dict[str:BaseBackend]: a dict of the local backends instances for\n the backends that could be instantiated, keyed by backend name.\n '
ret = {}
for backend_cls in SDK_STANDARD_BACKENDS:
try:
backend_instance = cls._get_backend_instance(backend_cls)
backend_name = backend_instance.configuration['name']
ret[backend_name] = backend_instance
except QISKitError as e:
logger.info('local backend %s is not available: %s', backend_cls, str(e))
return ret |
@classmethod
def _get_backend_instance(cls, backend_cls):
'\n Return an instance of a backend from its class.\n\n Args:\n backend_cls (class): Backend class.\n Returns:\n BaseBackend: a backend instance.\n Raises:\n QISKitError: if the backend could not be instantiated or does not\n provide a valid configuration containing a name.\n '
try:
backend_instance = backend_cls()
except Exception as err:
raise QISKitError(('Backend %s could not be instantiated: %s' % (cls, err)))
try:
_ = backend_instance.configuration['name']
except (LookupError, TypeError):
raise QISKitError('Backend %s has an invalid configuration')
return backend_instance | 2,972,808,443,166,617,000 | Return an instance of a backend from its class.
Args:
backend_cls (class): Backend class.
Returns:
BaseBackend: a backend instance.
Raises:
QISKitError: if the backend could not be instantiated or does not
provide a valid configuration containing a name. | qiskit/backends/local/localprovider.py | _get_backend_instance | Hosseinyeganeh/qiskit-core | python | @classmethod
def _get_backend_instance(cls, backend_cls):
'\n Return an instance of a backend from its class.\n\n Args:\n backend_cls (class): Backend class.\n Returns:\n BaseBackend: a backend instance.\n Raises:\n QISKitError: if the backend could not be instantiated or does not\n provide a valid configuration containing a name.\n '
try:
backend_instance = backend_cls()
except Exception as err:
raise QISKitError(('Backend %s could not be instantiated: %s' % (cls, err)))
try:
_ = backend_instance.configuration['name']
except (LookupError, TypeError):
raise QISKitError('Backend %s has an invalid configuration')
return backend_instance |
@classmethod
def add_source(cls, source):
'\n A convenience method for downstream modules to add channel\n source types once they have implemented the step in the wizard\n below.\n\n This method must be called from `__setup__` method of downstream\n module.\n '
source_leaf = cls.channel.domain[0][2]
if (source not in source_leaf):
source_leaf.append(source) | 5,484,621,005,522,392,000 | A convenience method for downstream modules to add channel
source types once they have implemented the step in the wizard
below.
This method must be called from `__setup__` method of downstream
module. | product.py | add_source | aniforprez/trytond-sale-channel | python | @classmethod
def add_source(cls, source):
'\n A convenience method for downstream modules to add channel\n source types once they have implemented the step in the wizard\n below.\n\n This method must be called from `__setup__` method of downstream\n module.\n '
source_leaf = cls.channel.domain[0][2]
if (source not in source_leaf):
source_leaf.append(source) |
@classmethod
def __setup__(cls):
'\n Setup the class and define constraints\n '
super(TemplateSaleChannelListing, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [('channel_template_unique', Unique(table, table.channel, table.template_identifier, table.template), 'Product Template is already mapped to this channel with same identifier')] | 6,125,404,840,998,321,000 | Setup the class and define constraints | product.py | __setup__ | aniforprez/trytond-sale-channel | python | @classmethod
def __setup__(cls):
'\n \n '
super(TemplateSaleChannelListing, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [('channel_template_unique', Unique(table, table.channel, table.template_identifier, table.template), 'Product Template is already mapped to this channel with same identifier')] |
@classmethod
def create_from(cls, channel, product_data):
'\n Create the product for the channel\n '
raise NotImplementedError(('create_from is not implemented in product for %s channels' % channel.source)) | -3,979,193,719,550,255,600 | Create the product for the channel | product.py | create_from | aniforprez/trytond-sale-channel | python | @classmethod
def create_from(cls, channel, product_data):
'\n \n '
raise NotImplementedError(('create_from is not implemented in product for %s channels' % channel.source)) |
@classmethod
def get_listing_url(cls, records, name):
'\n Downstream modules should implement this function\n and return a valid url\n '
return dict.fromkeys([r.id for r in records]) | -7,361,309,929,933,427,000 | Downstream modules should implement this function
and return a valid url | product.py | get_listing_url | aniforprez/trytond-sale-channel | python | @classmethod
def get_listing_url(cls, records, name):
'\n Downstream modules should implement this function\n and return a valid url\n '
return dict.fromkeys([r.id for r in records]) |
@classmethod
def __setup__(cls):
'\n Setup the class and define constraints\n '
super(ProductSaleChannelListing, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [('channel_product_identifier_uniq', Unique(table, table.channel, table.product_identifier), 'This external product is already mapped with same channel.')]
cls._buttons.update({'export_inventory_button': {}}) | -3,085,905,198,177,834,500 | Setup the class and define constraints | product.py | __setup__ | aniforprez/trytond-sale-channel | python | @classmethod
def __setup__(cls):
'\n \n '
super(ProductSaleChannelListing, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [('channel_product_identifier_uniq', Unique(table, table.channel, table.product_identifier), 'This external product is already mapped with same channel.')]
cls._buttons.update({'export_inventory_button': {}}) |
@classmethod
def create_from(cls, channel, product_data):
'\n Create a listing for the product from channel and data\n '
raise NotImplementedError(('create_from is not implemented in channel listing for %s channels' % channel.source)) | 4,653,899,921,623,539,000 | Create a listing for the product from channel and data | product.py | create_from | aniforprez/trytond-sale-channel | python | @classmethod
def create_from(cls, channel, product_data):
'\n \n '
raise NotImplementedError(('create_from is not implemented in channel listing for %s channels' % channel.source)) |
def export_inventory(self):
'\n Export listing.product inventory to listing.channel\n\n Since external channels are implemented by downstream modules, it is\n the responsibility of those channels to implement exporting or call\n super to delegate.\n '
raise NotImplementedError(('Export inventory is not implemented for %s channels' % self.channel.source)) | 5,280,339,303,968,193,000 | Export listing.product inventory to listing.channel
Since external channels are implemented by downstream modules, it is
the responsibility of those channels to implement exporting or call
super to delegate. | product.py | export_inventory | aniforprez/trytond-sale-channel | python | def export_inventory(self):
'\n Export listing.product inventory to listing.channel\n\n Since external channels are implemented by downstream modules, it is\n the responsibility of those channels to implement exporting or call\n super to delegate.\n '
raise NotImplementedError(('Export inventory is not implemented for %s channels' % self.channel.source)) |
@classmethod
def export_bulk_inventory(cls, listings):
'\n Export listing.product inventory to listing.channel in bulk\n\n Since external channels are implemented by downstream modules, it is\n the responsibility of those channels to implement bulk exporting for\n respective channels.\n Default behaviour is to export inventory individually.\n '
for listing in listings:
listing.export_inventory() | -1,496,527,611,567,674,400 | Export listing.product inventory to listing.channel in bulk
Since external channels are implemented by downstream modules, it is
the responsibility of those channels to implement bulk exporting for
respective channels.
Default behaviour is to export inventory individually. | product.py | export_bulk_inventory | aniforprez/trytond-sale-channel | python | @classmethod
def export_bulk_inventory(cls, listings):
'\n Export listing.product inventory to listing.channel in bulk\n\n Since external channels are implemented by downstream modules, it is\n the responsibility of those channels to implement bulk exporting for\n respective channels.\n Default behaviour is to export inventory individually.\n '
for listing in listings:
listing.export_inventory() |
def import_product_image(self):
'\n Import specific product image from external channel based on product\n identifier.\n\n Since external channels are implemented by downstream modules, it is\n the responsibility of those channels to implement importing or call\n super to delegate.\n '
raise NotImplementedError(('Method import_product_image is not implemented for %s channel yet' % self.source)) | 6,288,277,365,324,206,000 | Import specific product image from external channel based on product
identifier.
Since external channels are implemented by downstream modules, it is
the responsibility of those channels to implement importing or call
super to delegate. | product.py | import_product_image | aniforprez/trytond-sale-channel | python | def import_product_image(self):
'\n Import specific product image from external channel based on product\n identifier.\n\n Since external channels are implemented by downstream modules, it is\n the responsibility of those channels to implement importing or call\n super to delegate.\n '
raise NotImplementedError(('Method import_product_image is not implemented for %s channel yet' % self.source)) |
def get_availability_context(self):
'\n Allow overriding the context used to compute availability of\n products.\n '
return {'locations': [self.channel.warehouse.id]} | 6,406,106,973,323,258,000 | Allow overriding the context used to compute availability of
products. | product.py | get_availability_context | aniforprez/trytond-sale-channel | python | def get_availability_context(self):
'\n Allow overriding the context used to compute availability of\n products.\n '
return {'locations': [self.channel.warehouse.id]} |
def get_availability(self):
'\n Return the availability of the product for this listing\n '
Product = Pool().get('product.product')
with Transaction().set_context(**self.get_availability_context()):
rv = {'type': 'bucket', 'value': None, 'quantity': None}
if self.product:
product = Product(self.product.id)
rv['quantity'] = product.quantity
if (rv['quantity'] > 0):
rv['value'] = 'in_stock'
else:
rv['value'] = 'out_of_stock'
return rv | 5,950,218,621,468,469,000 | Return the availability of the product for this listing | product.py | get_availability | aniforprez/trytond-sale-channel | python | def get_availability(self):
'\n \n '
Product = Pool().get('product.product')
with Transaction().set_context(**self.get_availability_context()):
rv = {'type': 'bucket', 'value': None, 'quantity': None}
if self.product:
product = Product(self.product.id)
rv['quantity'] = product.quantity
if (rv['quantity'] > 0):
rv['value'] = 'in_stock'
else:
rv['value'] = 'out_of_stock'
return rv |
def maxChunksToSorted(self, arr):
'\n :type arr: List[int]\n :rtype: int\n '
stacks = []
for num in arr:
if (not stacks):
stacks.append([num])
elif (num >= stacks[(- 1)][0]):
stacks.append([num])
else:
stacks[(- 1)].append(num)
while (len(stacks) >= 2):
if (num < stacks[(- 2)][0]):
stacks[(- 2)][0] = max(stacks[(- 2)][0], stacks[(- 1)][0])
stacks[(- 2)].extend(stacks.pop())
else:
break
return len(stacks) | 7,987,127,453,734,050,000 | :type arr: List[int]
:rtype: int | p768_max_chunks_to_make_sorted_ii.py | maxChunksToSorted | feigaochn/leetcode | python | def maxChunksToSorted(self, arr):
'\n :type arr: List[int]\n :rtype: int\n '
stacks = []
for num in arr:
if (not stacks):
stacks.append([num])
elif (num >= stacks[(- 1)][0]):
stacks.append([num])
else:
stacks[(- 1)].append(num)
while (len(stacks) >= 2):
if (num < stacks[(- 2)][0]):
stacks[(- 2)][0] = max(stacks[(- 2)][0], stacks[(- 1)][0])
stacks[(- 2)].extend(stacks.pop())
else:
break
return len(stacks) |
def _get_lsp_admin_group_include_any_group_id(self):
'\n Getter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)\n\n YANG Description: Include any admin group id\n '
return self.__lsp_admin_group_include_any_group_id | -1,830,173,705,894,686,500 | Getter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)
YANG Description: Include any admin group id | pybind/slxos/v17r_1_01a/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/__init__.py | _get_lsp_admin_group_include_any_group_id | extremenetworks/pybind | python | def _get_lsp_admin_group_include_any_group_id(self):
'\n Getter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)\n\n YANG Description: Include any admin group id\n '
return self.__lsp_admin_group_include_any_group_id |
def _set_lsp_admin_group_include_any_group_id(self, v, load=False):
'\n Setter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_lsp_admin_group_include_any_group_id is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_lsp_admin_group_include_any_group_id() directly.\n\n YANG Description: Include any admin group id\n '
parent = getattr(self, '_parent', None)
if ((parent is not None) and (load is False)):
raise AttributeError(('Cannot set keys directly when' + ' within an instantiated list'))
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name='lsp-admin-group-include-any-group-id', rest_name='lsp-admin-group-include-any-group-id', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'lsp_admin_group_include_any_group_id must be of a type compatible with uint32', 'defined-type': 'uint32', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={\'range\': [\'0..4294967295\']}, int_size=32), is_leaf=True, yang_name="lsp-admin-group-include-any-group-id", rest_name="lsp-admin-group-include-any-group-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'uint32\', is_config=True)'})
self.__lsp_admin_group_include_any_group_id = t
if hasattr(self, '_set'):
self._set() | -4,791,081,038,477,707,000 | Setter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_include_any_group_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_include_any_group_id() directly.
YANG Description: Include any admin group id | pybind/slxos/v17r_1_01a/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/__init__.py | _set_lsp_admin_group_include_any_group_id | extremenetworks/pybind | python | def _set_lsp_admin_group_include_any_group_id(self, v, load=False):
'\n Setter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_lsp_admin_group_include_any_group_id is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_lsp_admin_group_include_any_group_id() directly.\n\n YANG Description: Include any admin group id\n '
parent = getattr(self, '_parent', None)
if ((parent is not None) and (load is False)):
raise AttributeError(('Cannot set keys directly when' + ' within an instantiated list'))
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name='lsp-admin-group-include-any-group-id', rest_name='lsp-admin-group-include-any-group-id', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'lsp_admin_group_include_any_group_id must be of a type compatible with uint32', 'defined-type': 'uint32', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={\'range\': [\'0..4294967295\']}, int_size=32), is_leaf=True, yang_name="lsp-admin-group-include-any-group-id", rest_name="lsp-admin-group-include-any-group-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'uint32\', is_config=True)'})
self.__lsp_admin_group_include_any_group_id = t
if hasattr(self, '_set'):
self._set() |
def draw(self):
'Draw the line.\n\n Returns\n -------\n list\n The GUIDs of the created Rhino objects.\n\n '
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end, 'color': self.color, 'name': self.name}]
guids = compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids | 8,097,840,954,285,600,000 | Draw the line.
Returns
-------
list
The GUIDs of the created Rhino objects. | src/compas_rhino/artists/lineartist.py | draw | KEERTHANAUDAY/compas | python | def draw(self):
'Draw the line.\n\n Returns\n -------\n list\n The GUIDs of the created Rhino objects.\n\n '
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end, 'color': self.color, 'name': self.name}]
guids = compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids |
@staticmethod
def draw_collection(collection, names=None, colors=None, layer=None, clear=False, add_to_group=False, group_name=None):
'Draw a collection of lines.\n\n Parameters\n ----------\n collection: list of compas.geometry.Line\n A collection of ``Line`` objects.\n names : list of str, optional\n Individual names for the lines.\n colors : color or list of color, optional\n A color specification for the lines as a single color or a list of individual colors.\n layer : str, optional\n A layer path.\n clear : bool, optional\n Clear the layer before drawing.\n add_to_group : bool, optional\n Add the frames to a group.\n group_name : str, optional\n Name of the group.\n\n Returns\n -------\n guids: list\n A list of GUIDs if the collection is not grouped.\n groupname: str\n The name of the group if the collection objects are grouped.\n\n '
lines = [{'start': list(line[0]), 'end': list(line[1])} for line in collection]
if colors:
if isinstance(colors[0], (int, float)):
colors = iterable_like(collection, [colors], colors)
else:
colors = iterable_like(collection, colors, colors[0])
for (line, rgb) in zip(lines, colors):
line['color'] = rgb
if names:
if isinstance(names, basestring):
names = iterable_like(collection, [names], names)
else:
names = iterable_like(collection, names, names[0])
for (line, name) in zip(lines, names):
line['name'] = name
guids = compas_rhino.draw_lines(lines, layer=layer, clear=clear)
if (not add_to_group):
return guids
group = compas_rhino.rs.AddGroup(group_name)
if group:
compas_rhino.rs.AddObjectsToGroup(guids, group)
return group | 5,524,344,590,940,111,000 | Draw a collection of lines.
Parameters
----------
collection: list of compas.geometry.Line
A collection of ``Line`` objects.
names : list of str, optional
Individual names for the lines.
colors : color or list of color, optional
A color specification for the lines as a single color or a list of individual colors.
layer : str, optional
A layer path.
clear : bool, optional
Clear the layer before drawing.
add_to_group : bool, optional
Add the frames to a group.
group_name : str, optional
Name of the group.
Returns
-------
guids: list
A list of GUIDs if the collection is not grouped.
groupname: str
The name of the group if the collection objects are grouped. | src/compas_rhino/artists/lineartist.py | draw_collection | KEERTHANAUDAY/compas | python | @staticmethod
def draw_collection(collection, names=None, colors=None, layer=None, clear=False, add_to_group=False, group_name=None):
'Draw a collection of lines.\n\n Parameters\n ----------\n collection: list of compas.geometry.Line\n A collection of ``Line`` objects.\n names : list of str, optional\n Individual names for the lines.\n colors : color or list of color, optional\n A color specification for the lines as a single color or a list of individual colors.\n layer : str, optional\n A layer path.\n clear : bool, optional\n Clear the layer before drawing.\n add_to_group : bool, optional\n Add the frames to a group.\n group_name : str, optional\n Name of the group.\n\n Returns\n -------\n guids: list\n A list of GUIDs if the collection is not grouped.\n groupname: str\n The name of the group if the collection objects are grouped.\n\n '
lines = [{'start': list(line[0]), 'end': list(line[1])} for line in collection]
if colors:
if isinstance(colors[0], (int, float)):
colors = iterable_like(collection, [colors], colors)
else:
colors = iterable_like(collection, colors, colors[0])
for (line, rgb) in zip(lines, colors):
line['color'] = rgb
if names:
if isinstance(names, basestring):
names = iterable_like(collection, [names], names)
else:
names = iterable_like(collection, names, names[0])
for (line, name) in zip(lines, names):
line['name'] = name
guids = compas_rhino.draw_lines(lines, layer=layer, clear=clear)
if (not add_to_group):
return guids
group = compas_rhino.rs.AddGroup(group_name)
if group:
compas_rhino.rs.AddObjectsToGroup(guids, group)
return group |
def main():
'\n connects the pieces to grab posts from reddit and throw them on twitter\n '
reddit = Reddit()
twitter = Twitter()
tweets = reddit.get_tweets()
print('sending {} tweets'.format(len(tweets)))
for tweet in reddit.get_tweets():
status = twitter.send_tweet(tweet.Primary)
if tweet.Second:
twitter.send_tweet(tweet.Second, status.id)
sleep(90) | 3,298,083,492,189,179,000 | connects the pieces to grab posts from reddit and throw them on twitter | tweet_bot.py | main | seanneal/tweetbot | python | def main():
'\n \n '
reddit = Reddit()
twitter = Twitter()
tweets = reddit.get_tweets()
print('sending {} tweets'.format(len(tweets)))
for tweet in reddit.get_tweets():
status = twitter.send_tweet(tweet.Primary)
if tweet.Second:
twitter.send_tweet(tweet.Second, status.id)
sleep(90) |
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
'\n Find the appropriate cython function, casting if necessary.\n\n Parameters\n ----------\n values : np.ndarray\n is_numeric : bool\n\n Returns\n -------\n func : callable\n values : np.ndarray\n '
how = self.how
kind = self.kind
if (how in ['median', 'cumprod']):
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(f'function is not implemented for this dtype: [how->{how},dtype->{values.dtype.name}]')
func = getattr(libgroupby, f'group_{how}_float64')
return (func, values)
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if (values.dtype.kind in ['i', 'u']):
if (how in ['add', 'var', 'prod', 'mean', 'ohlc']):
values = ensure_float64(values)
return (func, values) | 5,217,492,559,264,406,000 | Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray | pandas/core/groupby/ops.py | get_cython_func_and_vals | CuteLemon/pandas | python | def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
'\n Find the appropriate cython function, casting if necessary.\n\n Parameters\n ----------\n values : np.ndarray\n is_numeric : bool\n\n Returns\n -------\n func : callable\n values : np.ndarray\n '
how = self.how
kind = self.kind
if (how in ['median', 'cumprod']):
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(f'function is not implemented for this dtype: [how->{how},dtype->{values.dtype.name}]')
func = getattr(libgroupby, f'group_{how}_float64')
return (func, values)
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if (values.dtype.kind in ['i', 'u']):
if (how in ['add', 'var', 'prod', 'mean', 'ohlc']):
values = ensure_float64(values)
return (func, values) |
def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool=False):
'\n Check if we can do this operation with our cython functions.\n\n Raises\n ------\n NotImplementedError\n This is either not a valid function for this dtype, or\n valid but not implemented in cython.\n '
how = self.how
if is_numeric:
return
if is_categorical_dtype(dtype):
if (how in ['add', 'prod', 'cumsum', 'cumprod']):
raise TypeError(f'{dtype} type does not support {how} operations')
raise NotImplementedError(f'{dtype} dtype not supported')
elif is_sparse(dtype):
raise NotImplementedError(f'{dtype} dtype not supported')
elif is_datetime64_any_dtype(dtype):
if (how in ['add', 'prod', 'cumsum', 'cumprod']):
raise TypeError(f'datetime64 type does not support {how} operations')
elif is_timedelta64_dtype(dtype):
if (how in ['prod', 'cumprod']):
raise TypeError(f'timedelta64 type does not support {how} operations') | -5,195,850,799,562,637,000 | Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython. | pandas/core/groupby/ops.py | _disallow_invalid_ops | CuteLemon/pandas | python | def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool=False):
'\n Check if we can do this operation with our cython functions.\n\n Raises\n ------\n NotImplementedError\n This is either not a valid function for this dtype, or\n valid but not implemented in cython.\n '
how = self.how
if is_numeric:
return
if is_categorical_dtype(dtype):
if (how in ['add', 'prod', 'cumsum', 'cumprod']):
raise TypeError(f'{dtype} type does not support {how} operations')
raise NotImplementedError(f'{dtype} dtype not supported')
elif is_sparse(dtype):
raise NotImplementedError(f'{dtype} dtype not supported')
elif is_datetime64_any_dtype(dtype):
if (how in ['add', 'prod', 'cumsum', 'cumprod']):
raise TypeError(f'datetime64 type does not support {how} operations')
elif is_timedelta64_dtype(dtype):
if (how in ['prod', 'cumprod']):
raise TypeError(f'timedelta64 type does not support {how} operations') |
def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:
'\n Get the desired dtype of a result based on the\n input dtype and how it was computed.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n Input dtype.\n\n Returns\n -------\n np.dtype or ExtensionDtype\n The desired dtype of the result.\n '
how = self.how
if (how in ['add', 'cumsum', 'sum', 'prod']):
if (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Int64Dtype()
elif (how in ['mean', 'median', 'var']):
if isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Float64Dtype()
elif (is_float_dtype(dtype) or is_complex_dtype(dtype)):
return dtype
elif is_numeric_dtype(dtype):
return np.dtype(np.float64)
return dtype | 1,181,172,871,760,055,300 | Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : np.dtype or ExtensionDtype
Input dtype.
Returns
-------
np.dtype or ExtensionDtype
The desired dtype of the result. | pandas/core/groupby/ops.py | _get_result_dtype | CuteLemon/pandas | python | def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:
'\n Get the desired dtype of a result based on the\n input dtype and how it was computed.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n Input dtype.\n\n Returns\n -------\n np.dtype or ExtensionDtype\n The desired dtype of the result.\n '
how = self.how
if (how in ['add', 'cumsum', 'sum', 'prod']):
if (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Int64Dtype()
elif (how in ['mean', 'median', 'var']):
if isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Float64Dtype()
elif (is_float_dtype(dtype) or is_complex_dtype(dtype)):
return dtype
elif is_numeric_dtype(dtype):
return np.dtype(np.float64)
return dtype |
@final
def _ea_wrap_cython_operation(self, values: ExtensionArray, min_count: int, ngroups: int, comp_ids: np.ndarray, **kwargs) -> ArrayLike:
'\n If we have an ExtensionArray, unwrap, call _cython_operation, and\n re-wrap if appropriate.\n '
if (isinstance(values, BaseMaskedArray) and self.uses_mask()):
return self._masked_ea_wrap_cython_operation(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, **kwargs)
if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
npvalues = values._ndarray.view('M8[ns]')
elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):
npvalues = values.to_numpy('float64', na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
elif isinstance(values.dtype, StringDtype):
npvalues = values.to_numpy(object, na_value=np.nan)
else:
raise NotImplementedError(f'function is not implemented for this dtype: {values.dtype}')
res_values = self._cython_op_ndim_compat(npvalues, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=None, **kwargs)
if (self.how in ['rank']):
return res_values
return self._reconstruct_ea_result(values, res_values) | -3,675,165,534,000,637,000 | If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate. | pandas/core/groupby/ops.py | _ea_wrap_cython_operation | CuteLemon/pandas | python | @final
def _ea_wrap_cython_operation(self, values: ExtensionArray, min_count: int, ngroups: int, comp_ids: np.ndarray, **kwargs) -> ArrayLike:
'\n If we have an ExtensionArray, unwrap, call _cython_operation, and\n re-wrap if appropriate.\n '
if (isinstance(values, BaseMaskedArray) and self.uses_mask()):
return self._masked_ea_wrap_cython_operation(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, **kwargs)
if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
npvalues = values._ndarray.view('M8[ns]')
elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):
npvalues = values.to_numpy('float64', na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
elif isinstance(values.dtype, StringDtype):
npvalues = values.to_numpy(object, na_value=np.nan)
else:
raise NotImplementedError(f'function is not implemented for this dtype: {values.dtype}')
res_values = self._cython_op_ndim_compat(npvalues, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=None, **kwargs)
if (self.how in ['rank']):
return res_values
return self._reconstruct_ea_result(values, res_values) |
def _reconstruct_ea_result(self, values, res_values):
'\n Construct an ExtensionArray result from an ndarray result.\n '
if isinstance(values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype, StringDtype)):
dtype = self._get_result_dtype(values.dtype)
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
elif needs_i8_conversion(values.dtype):
i8values = res_values.view('i8')
return type(values)(i8values, dtype=values.dtype)
raise NotImplementedError | 3,063,274,066,866,768,000 | Construct an ExtensionArray result from an ndarray result. | pandas/core/groupby/ops.py | _reconstruct_ea_result | CuteLemon/pandas | python | def _reconstruct_ea_result(self, values, res_values):
'\n \n '
if isinstance(values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype, StringDtype)):
dtype = self._get_result_dtype(values.dtype)
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
elif needs_i8_conversion(values.dtype):
i8values = res_values.view('i8')
return type(values)(i8values, dtype=values.dtype)
raise NotImplementedError |
@final
def _masked_ea_wrap_cython_operation(self, values: BaseMaskedArray, min_count: int, ngroups: int, comp_ids: np.ndarray, **kwargs) -> BaseMaskedArray:
"\n Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's\n and cython algorithms which accept a mask.\n "
orig_values = values
mask = values._mask.copy()
result_mask = np.zeros(ngroups, dtype=bool)
arr = values._data
res_values = self._cython_op_ndim_compat(arr, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=mask, result_mask=result_mask, **kwargs)
dtype = self._get_result_dtype(orig_values.dtype)
assert isinstance(dtype, BaseMaskedDtype)
cls = dtype.construct_array_type()
if (self.kind != 'aggregate'):
return cls(res_values.astype(dtype.type, copy=False), mask)
else:
return cls(res_values.astype(dtype.type, copy=False), result_mask) | -3,460,015,546,438,205,400 | Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's
and cython algorithms which accept a mask. | pandas/core/groupby/ops.py | _masked_ea_wrap_cython_operation | CuteLemon/pandas | python | @final
def _masked_ea_wrap_cython_operation(self, values: BaseMaskedArray, min_count: int, ngroups: int, comp_ids: np.ndarray, **kwargs) -> BaseMaskedArray:
"\n Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's\n and cython algorithms which accept a mask.\n "
orig_values = values
mask = values._mask.copy()
result_mask = np.zeros(ngroups, dtype=bool)
arr = values._data
res_values = self._cython_op_ndim_compat(arr, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=mask, result_mask=result_mask, **kwargs)
dtype = self._get_result_dtype(orig_values.dtype)
assert isinstance(dtype, BaseMaskedDtype)
cls = dtype.construct_array_type()
if (self.kind != 'aggregate'):
return cls(res_values.astype(dtype.type, copy=False), mask)
else:
return cls(res_values.astype(dtype.type, copy=False), result_mask) |
@final
def cython_operation(self, *, values: ArrayLike, axis: int, min_count: int=(- 1), comp_ids: np.ndarray, ngroups: int, **kwargs) -> ArrayLike:
'\n Call our cython function, with appropriate pre- and post- processing.\n '
if (values.ndim > 2):
raise NotImplementedError('number of dimensions is currently limited to 2')
elif (values.ndim == 2):
assert (axis == 1), axis
elif (not is_1d_only_ea_obj(values)):
assert (axis == 0)
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
self._disallow_invalid_ops(dtype, is_numeric)
if (not isinstance(values, np.ndarray)):
return self._ea_wrap_cython_operation(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, **kwargs)
return self._cython_op_ndim_compat(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=None, **kwargs) | -1,751,327,355,450,369,500 | Call our cython function, with appropriate pre- and post- processing. | pandas/core/groupby/ops.py | cython_operation | CuteLemon/pandas | python | @final
def cython_operation(self, *, values: ArrayLike, axis: int, min_count: int=(- 1), comp_ids: np.ndarray, ngroups: int, **kwargs) -> ArrayLike:
'\n \n '
if (values.ndim > 2):
raise NotImplementedError('number of dimensions is currently limited to 2')
elif (values.ndim == 2):
assert (axis == 1), axis
elif (not is_1d_only_ea_obj(values)):
assert (axis == 0)
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
self._disallow_invalid_ops(dtype, is_numeric)
if (not isinstance(values, np.ndarray)):
return self._ea_wrap_cython_operation(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, **kwargs)
return self._cython_op_ndim_compat(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=None, **kwargs) |
def get_iterator(self, data: NDFrameT, axis: int=0) -> Iterator[tuple[(Hashable, NDFrameT)]]:
'\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n '
splitter = self._get_splitter(data, axis=axis)
keys = self.group_keys_seq
for (key, group) in zip(keys, splitter):
(yield (key, group.__finalize__(data, method='groupby'))) | -4,732,774,640,510,437,000 | Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group | pandas/core/groupby/ops.py | get_iterator | CuteLemon/pandas | python | def get_iterator(self, data: NDFrameT, axis: int=0) -> Iterator[tuple[(Hashable, NDFrameT)]]:
'\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n '
splitter = self._get_splitter(data, axis=axis)
keys = self.group_keys_seq
for (key, group) in zip(keys, splitter):
(yield (key, group.__finalize__(data, method='groupby'))) |
@final
def _get_splitter(self, data: NDFrame, axis: int=0) -> DataSplitter:
'\n Returns\n -------\n Generator yielding subsetted objects\n\n __finalize__ has not been called for the subsetted objects returned.\n '
(ids, _, ngroups) = self.group_info
return get_splitter(data, ids, ngroups, axis=axis) | -6,953,872,263,223,836,000 | Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned. | pandas/core/groupby/ops.py | _get_splitter | CuteLemon/pandas | python | @final
def _get_splitter(self, data: NDFrame, axis: int=0) -> DataSplitter:
'\n Returns\n -------\n Generator yielding subsetted objects\n\n __finalize__ has not been called for the subsetted objects returned.\n '
(ids, _, ngroups) = self.group_info
return get_splitter(data, ids, ngroups, axis=axis) |
def _get_grouper(self):
"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n "
return self.groupings[0].grouping_vector | 3,283,307,727,770,842,000 | We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper. | pandas/core/groupby/ops.py | _get_grouper | CuteLemon/pandas | python | def _get_grouper(self):
"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n "
return self.groupings[0].grouping_vector |
@cache_readonly
def indices(self) -> dict[(Hashable, npt.NDArray[np.intp])]:
'dict {group name -> group indices}'
if ((len(self.groupings) == 1) and isinstance(self.result_index, CategoricalIndex)):
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys) | 3,842,011,190,711,266,000 | dict {group name -> group indices} | pandas/core/groupby/ops.py | indices | CuteLemon/pandas | python | @cache_readonly
def indices(self) -> dict[(Hashable, npt.NDArray[np.intp])]:
if ((len(self.groupings) == 1) and isinstance(self.result_index, CategoricalIndex)):
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys) |
@final
def size(self) -> Series:
'\n Compute group sizes.\n '
(ids, _, ngroups) = self.group_info
if ngroups:
out = np.bincount(ids[(ids != (- 1))], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype='int64') | 8,537,188,025,531,969,000 | Compute group sizes. | pandas/core/groupby/ops.py | size | CuteLemon/pandas | python | @final
def size(self) -> Series:
'\n \n '
(ids, _, ngroups) = self.group_info
if ngroups:
out = np.bincount(ids[(ids != (- 1))], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype='int64') |
@cache_readonly
def groups(self) -> dict[(Hashable, np.ndarray)]:
'dict {group name -> group labels}'
if (len(self.groupings) == 1):
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index) | -2,508,148,584,364,830,000 | dict {group name -> group labels} | pandas/core/groupby/ops.py | groups | CuteLemon/pandas | python | @cache_readonly
def groups(self) -> dict[(Hashable, np.ndarray)]:
if (len(self.groupings) == 1):
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index) |
@final
@cache_readonly
def result_arraylike(self) -> ArrayLike:
'\n Analogous to result_index, but returning an ndarray/ExtensionArray\n allowing us to retain ExtensionDtypes not supported by Index.\n '
if (len(self.groupings) == 1):
return self.groupings[0].group_arraylike
return self.result_index._values | -8,581,127,941,794,736,000 | Analogous to result_index, but returning an ndarray/ExtensionArray
allowing us to retain ExtensionDtypes not supported by Index. | pandas/core/groupby/ops.py | result_arraylike | CuteLemon/pandas | python | @final
@cache_readonly
def result_arraylike(self) -> ArrayLike:
'\n Analogous to result_index, but returning an ndarray/ExtensionArray\n allowing us to retain ExtensionDtypes not supported by Index.\n '
if (len(self.groupings) == 1):
return self.groupings[0].group_arraylike
return self.result_index._values |
@final
def _cython_operation(self, kind: str, values, how: str, axis: int, min_count: int=(- 1), **kwargs) -> ArrayLike:
'\n Returns the values of a cython operation.\n '
assert (kind in ['transform', 'aggregate'])
cy_op = WrappedCythonOp(kind=kind, how=how)
(ids, _, _) = self.group_info
ngroups = self.ngroups
return cy_op.cython_operation(values=values, axis=axis, min_count=min_count, comp_ids=ids, ngroups=ngroups, **kwargs) | -6,726,947,257,547,295,000 | Returns the values of a cython operation. | pandas/core/groupby/ops.py | _cython_operation | CuteLemon/pandas | python | @final
def _cython_operation(self, kind: str, values, how: str, axis: int, min_count: int=(- 1), **kwargs) -> ArrayLike:
'\n \n '
assert (kind in ['transform', 'aggregate'])
cy_op = WrappedCythonOp(kind=kind, how=how)
(ids, _, _) = self.group_info
ngroups = self.ngroups
return cy_op.cython_operation(values=values, axis=axis, min_count=min_count, comp_ids=ids, ngroups=ngroups, **kwargs) |
@final
def agg_series(self, obj: Series, func: Callable, preserve_dtype: bool=False) -> ArrayLike:
'\n Parameters\n ----------\n obj : Series\n func : function taking a Series and returning a scalar-like\n preserve_dtype : bool\n Whether the aggregation is known to be dtype-preserving.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n '
if (len(obj) == 0):
result = self._aggregate_series_pure_python(obj, func)
elif (not isinstance(obj._values, np.ndarray)):
result = self._aggregate_series_pure_python(obj, func)
preserve_dtype = True
else:
result = self._aggregate_series_pure_python(obj, func)
npvalues = lib.maybe_convert_objects(result, try_float=False)
if preserve_dtype:
out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
else:
out = npvalues
return out | 421,957,958,651,067,140 | Parameters
----------
obj : Series
func : function taking a Series and returning a scalar-like
preserve_dtype : bool
Whether the aggregation is known to be dtype-preserving.
Returns
-------
np.ndarray or ExtensionArray | pandas/core/groupby/ops.py | agg_series | CuteLemon/pandas | python | @final
def agg_series(self, obj: Series, func: Callable, preserve_dtype: bool=False) -> ArrayLike:
'\n Parameters\n ----------\n obj : Series\n func : function taking a Series and returning a scalar-like\n preserve_dtype : bool\n Whether the aggregation is known to be dtype-preserving.\n\n Returns\n -------\n np.ndarray or ExtensionArray\n '
if (len(obj) == 0):
result = self._aggregate_series_pure_python(obj, func)
elif (not isinstance(obj._values, np.ndarray)):
result = self._aggregate_series_pure_python(obj, func)
preserve_dtype = True
else:
result = self._aggregate_series_pure_python(obj, func)
npvalues = lib.maybe_convert_objects(result, try_float=False)
if preserve_dtype:
out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
else:
out = npvalues
return out |
@cache_readonly
def groups(self):
'dict {group name -> group labels}'
result = {key: value for (key, value) in zip(self.binlabels, self.bins) if (key is not NaT)}
return result | -5,176,918,563,581,249,000 | dict {group name -> group labels} | pandas/core/groupby/ops.py | groups | CuteLemon/pandas | python | @cache_readonly
def groups(self):
result = {key: value for (key, value) in zip(self.binlabels, self.bins) if (key is not NaT)}
return result |
def _get_grouper(self):
"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n "
return self | 7,450,340,969,822,096,000 | We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper. | pandas/core/groupby/ops.py | _get_grouper | CuteLemon/pandas | python | def _get_grouper(self):
"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n "
return self |
def get_iterator(self, data: NDFrame, axis: int=0):
'\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n '
if (axis == 0):
slicer = (lambda start, edge: data.iloc[start:edge])
else:
slicer = (lambda start, edge: data.iloc[:, start:edge])
length = len(data.axes[axis])
start = 0
for (edge, label) in zip(self.bins, self.binlabels):
if (label is not NaT):
(yield (label, slicer(start, edge)))
start = edge
if (start < length):
(yield (self.binlabels[(- 1)], slicer(start, None))) | 4,232,520,393,238,952,000 | Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group | pandas/core/groupby/ops.py | get_iterator | CuteLemon/pandas | python | def get_iterator(self, data: NDFrame, axis: int=0):
'\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n '
if (axis == 0):
slicer = (lambda start, edge: data.iloc[start:edge])
else:
slicer = (lambda start, edge: data.iloc[:, start:edge])
length = len(data.axes[axis])
start = 0
for (edge, label) in zip(self.bins, self.binlabels):
if (label is not NaT):
(yield (label, slicer(start, edge)))
start = edge
if (start < length):
(yield (self.binlabels[(- 1)], slicer(start, None))) |
@tf_export('math.argmax', 'argmax', v1=[])
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
'Returns the index with the largest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,\n `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n '
if (axis is None):
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type) | -3,456,371,538,898,410,000 | Returns the index with the largest value across axes of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,
`qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`. | tensorflow/python/ops/math_ops.py | argmax_v2 | minminsun/tensorflow | python | @tf_export('math.argmax', 'argmax', v1=[])
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
'Returns the index with the largest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,\n `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n '
if (axis is None):
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type) |
@tf_export('math.argmin', 'argmin', v1=[])
def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
'Returns the index with the smallest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,\n `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n '
if (axis is None):
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type) | 327,193,619,100,737,500 | Returns the index with the smallest value across axes of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,
`qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`. | tensorflow/python/ops/math_ops.py | argmin_v2 | minminsun/tensorflow | python | @tf_export('math.argmin', 'argmin', v1=[])
def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
'Returns the index with the smallest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,\n `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n '
if (axis is None):
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type) |
@tf_export('math.abs', 'abs')
@dispatch.add_dispatch_support
def abs(x, name=None):
'Computes the absolute value of a tensor.\n\n Given a tensor `x` of complex numbers, this operation returns a tensor of type\n `float32` or `float64` that is the absolute value of each element in `x`. All\n elements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The\n absolute value is computed as \\\\( \\sqrt{a^2 + b^2}\\\\). For example:\n ```python\n x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n tf.abs(x) # [5.25594902, 6.60492229]\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n `int32`, `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` the same size and type as `x` with absolute\n values.\n Note, for `complex64` or `complex128` input, the returned `Tensor` will be\n of type `float32` or `float64`, respectively.\n '
with ops.name_scope(name, 'Abs', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name) | 5,716,243,541,336,007,000 | Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```python
x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
tf.abs(x) # [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128` input, the returned `Tensor` will be
of type `float32` or `float64`, respectively. | tensorflow/python/ops/math_ops.py | abs | minminsun/tensorflow | python | @tf_export('math.abs', 'abs')
@dispatch.add_dispatch_support
def abs(x, name=None):
'Computes the absolute value of a tensor.\n\n Given a tensor `x` of complex numbers, this operation returns a tensor of type\n `float32` or `float64` that is the absolute value of each element in `x`. All\n elements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The\n absolute value is computed as \\\\( \\sqrt{a^2 + b^2}\\\\). For example:\n ```python\n x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n tf.abs(x) # [5.25594902, 6.60492229]\n ```\n\n Args:\n x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n `int32`, `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` the same size and type as `x` with absolute\n values.\n Note, for `complex64` or `complex128` input, the returned `Tensor` will be\n of type `float32` or `float64`, respectively.\n '
with ops.name_scope(name, 'Abs', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name) |
@tf_export('math.divide', 'divide')
@dispatch.add_dispatch_support
def divide(x, y, name=None):
'Computes Python style division of `x` by `y`.'
if (name is not None):
return (DivideDelegateWithName(x, name) / y)
else:
return (x / y) | 1,802,829,701,935,261,400 | Computes Python style division of `x` by `y`. | tensorflow/python/ops/math_ops.py | divide | minminsun/tensorflow | python | @tf_export('math.divide', 'divide')
@dispatch.add_dispatch_support
def divide(x, y, name=None):
if (name is not None):
return (DivideDelegateWithName(x, name) / y)
else:
return (x / y) |
@deprecation.deprecated('2016-12-30', '`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`')
def _neg(x, name=None):
'Computes numerical negative value element-wise.\n\n I.e., \\(y = -x\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n '
return negative(x, name) | 3,601,977,973,089,695,000 | Computes numerical negative value element-wise.
I.e., \(y = -x\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. | tensorflow/python/ops/math_ops.py | _neg | minminsun/tensorflow | python | @deprecation.deprecated('2016-12-30', '`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`')
def _neg(x, name=None):
'Computes numerical negative value element-wise.\n\n I.e., \\(y = -x\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n '
return negative(x, name) |
@tf_export(v1=['math.scalar_mul', 'scalar_mul'])
def scalar_mul(scalar, x, name=None):
'Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\n Intended for use in gradient code which might deal with `IndexedSlices`\n objects, which are easy to multiply by a scalar but more expensive to\n multiply with arbitrary tensors.\n\n Args:\n scalar: A 0-D scalar `Tensor`. Must have known shape.\n x: A `Tensor` or `IndexedSlices` to be scaled.\n name: A name for the operation (optional).\n\n Returns:\n `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\n Raises:\n ValueError: if scalar is not a 0-D `scalar`.\n '
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype, name='scalar')
shape = scalar.get_shape()
if (shape.ndims == 0):
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError(('Only scalar multiply works, got shape %s' % shape)) | -5,209,761,818,786,379,000 | Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`. | tensorflow/python/ops/math_ops.py | scalar_mul | minminsun/tensorflow | python | @tf_export(v1=['math.scalar_mul', 'scalar_mul'])
def scalar_mul(scalar, x, name=None):
'Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\n Intended for use in gradient code which might deal with `IndexedSlices`\n objects, which are easy to multiply by a scalar but more expensive to\n multiply with arbitrary tensors.\n\n Args:\n scalar: A 0-D scalar `Tensor`. Must have known shape.\n x: A `Tensor` or `IndexedSlices` to be scaled.\n name: A name for the operation (optional).\n\n Returns:\n `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\n Raises:\n ValueError: if scalar is not a 0-D `scalar`.\n '
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype, name='scalar')
shape = scalar.get_shape()
if (shape.ndims == 0):
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError(('Only scalar multiply works, got shape %s' % shape)) |
@tf_export('math.pow', 'pow')
@dispatch.add_dispatch_support
def pow(x, y, name=None):
'Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```python\n x = tf.constant([[2, 2], [3, 3]])\n y = tf.constant([[8, 16], [2, 3]])\n tf.pow(x, y) # [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n '
with ops.name_scope(name, 'Pow', [x]) as name:
return gen_math_ops._pow(x, y, name=name) | 2,437,528,458,899,659,300 | Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. | tensorflow/python/ops/math_ops.py | pow | minminsun/tensorflow | python | @tf_export('math.pow', 'pow')
@dispatch.add_dispatch_support
def pow(x, y, name=None):
'Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```python\n x = tf.constant([[2, 2], [3, 3]])\n y = tf.constant([[8, 16], [2, 3]])\n tf.pow(x, y) # [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n '
with ops.name_scope(name, 'Pow', [x]) as name:
return gen_math_ops._pow(x, y, name=name) |
@tf_export('dtypes.complex', 'complex')
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
'Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must have the same shape.\n\n For example:\n\n ```python\n real = tf.constant([2.25, 3.25])\n imag = tf.constant([4.75, 5.75])\n tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor`. Must be one of the following types: `float32`,\n `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64` or `complex128`.\n '
real = ops.convert_to_tensor(real, name='real')
imag = ops.convert_to_tensor(imag, name='imag')
with ops.name_scope(name, 'Complex', [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if (input_types == (dtypes.float64, dtypes.float64)):
Tout = dtypes.complex128
elif (input_types == (dtypes.float32, dtypes.float32)):
Tout = dtypes.complex64
else:
raise TypeError('real and imag have incorrect types: {} {}'.format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name) | 6,540,480,198,212,402,000 | Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`. | tensorflow/python/ops/math_ops.py | complex | minminsun/tensorflow | python | @tf_export('dtypes.complex', 'complex')
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
'Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must have the same shape.\n\n For example:\n\n ```python\n real = tf.constant([2.25, 3.25])\n imag = tf.constant([4.75, 5.75])\n tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor`. Must be one of the following types: `float32`,\n `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64` or `complex128`.\n '
real = ops.convert_to_tensor(real, name='real')
imag = ops.convert_to_tensor(imag, name='imag')
with ops.name_scope(name, 'Complex', [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if (input_types == (dtypes.float64, dtypes.float64)):
Tout = dtypes.complex128
elif (input_types == (dtypes.float32, dtypes.float32)):
Tout = dtypes.complex64
else:
raise TypeError('real and imag have incorrect types: {} {}'.format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name) |
@tf_export('math.real', v1=['math.real', 'real'])
@deprecation.deprecated_endpoints('real')
@dispatch.add_dispatch_support
def real(input, name=None):
'Returns the real part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the real part of each element in `input` considered as a complex number.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.real(x) # [-2.25, 3.25]\n ```\n\n If `input` is already real, it is returned unchanged.\n\n Args:\n input: A `Tensor`. Must have numeric type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n '
with ops.name_scope(name, 'Real', [input]) as name:
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input | 5,528,627,588,415,537,000 | Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`. | tensorflow/python/ops/math_ops.py | real | minminsun/tensorflow | python | @tf_export('math.real', v1=['math.real', 'real'])
@deprecation.deprecated_endpoints('real')
@dispatch.add_dispatch_support
def real(input, name=None):
'Returns the real part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the real part of each element in `input` considered as a complex number.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.real(x) # [-2.25, 3.25]\n ```\n\n If `input` is already real, it is returned unchanged.\n\n Args:\n input: A `Tensor`. Must have numeric type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n '
with ops.name_scope(name, 'Real', [input]) as name:
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.