body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def setUp(self): 'Create runner object to run tests.' from click.testing import CliRunner self.cli_runner = CliRunner()
-5,648,910,060,475,704,000
Create runner object to run tests.
tests/cmdline/commands/test_group.py
setUp
pranavmodx/aiida-core
python
def setUp(self): from click.testing import CliRunner self.cli_runner = CliRunner()
def test_help(self): 'Tests help text for all group sub commands.' options = ['--help'] result = self.cli_runner.invoke(group_list, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_create, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_delete, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_relabel, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_description, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_add_nodes, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_remove_nodes, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_show, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_copy, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output)
-3,273,121,422,687,399,000
Tests help text for all group sub commands.
tests/cmdline/commands/test_group.py
test_help
pranavmodx/aiida-core
python
def test_help(self): options = ['--help'] result = self.cli_runner.invoke(group_list, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_create, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_delete, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_relabel, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_description, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_add_nodes, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_remove_nodes, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_show, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) result = self.cli_runner.invoke(group_copy, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output)
def test_create(self): 'Test `verdi group create` command.' result = self.cli_runner.invoke(group_create, ['dummygroup5']) self.assertClickResultNoException(result) result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) self.assertIn('dummygroup5', result.output)
2,922,920,148,127,587,300
Test `verdi group create` command.
tests/cmdline/commands/test_group.py
test_create
pranavmodx/aiida-core
python
def test_create(self): result = self.cli_runner.invoke(group_create, ['dummygroup5']) self.assertClickResultNoException(result) result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) self.assertIn('dummygroup5', result.output)
def test_list(self): 'Test `verdi group list` command.' result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) for grp in ['dummygroup1', 'dummygroup2']: self.assertIn(grp, result.output)
907,058,038,534,539,100
Test `verdi group list` command.
tests/cmdline/commands/test_group.py
test_list
pranavmodx/aiida-core
python
def test_list(self): result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) for grp in ['dummygroup1', 'dummygroup2']: self.assertIn(grp, result.output)
def test_copy(self): 'Test `verdi group copy` command.' result = self.cli_runner.invoke(group_copy, ['dummygroup1', 'dummygroup2']) self.assertClickResultNoException(result) self.assertIn('Success', result.output)
6,243,768,494,118,143,000
Test `verdi group copy` command.
tests/cmdline/commands/test_group.py
test_copy
pranavmodx/aiida-core
python
def test_copy(self): result = self.cli_runner.invoke(group_copy, ['dummygroup1', 'dummygroup2']) self.assertClickResultNoException(result) self.assertIn('Success', result.output)
def test_delete(self): 'Test `verdi group delete` command.' orm.Group(label='group_test_delete_01').store() orm.Group(label='group_test_delete_02').store() result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_01']) self.assertClickResultNoException(result) result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) self.assertNotIn('group_test_delete_01', result.output) node_01 = orm.CalculationNode().store() node_02 = orm.CalculationNode().store() group = orm.load_group(label='group_test_delete_02') group.add_nodes([node_01, node_02]) self.assertEqual(group.count(), 2) result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_02']) self.assertIsNotNone(result.exception, result.output) result = self.cli_runner.invoke(group_delete, ['--force', '--clear', 'group_test_delete_02']) self.assertClickResultNoException(result) with self.assertRaises(exceptions.NotExistent): group = orm.load_group(label='group_test_delete_02')
-4,238,451,815,892,938,000
Test `verdi group delete` command.
tests/cmdline/commands/test_group.py
test_delete
pranavmodx/aiida-core
python
def test_delete(self): orm.Group(label='group_test_delete_01').store() orm.Group(label='group_test_delete_02').store() result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_01']) self.assertClickResultNoException(result) result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) self.assertNotIn('group_test_delete_01', result.output) node_01 = orm.CalculationNode().store() node_02 = orm.CalculationNode().store() group = orm.load_group(label='group_test_delete_02') group.add_nodes([node_01, node_02]) self.assertEqual(group.count(), 2) result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_02']) self.assertIsNotNone(result.exception, result.output) result = self.cli_runner.invoke(group_delete, ['--force', '--clear', 'group_test_delete_02']) self.assertClickResultNoException(result) with self.assertRaises(exceptions.NotExistent): group = orm.load_group(label='group_test_delete_02')
def test_show(self): 'Test `verdi group show` command.' result = self.cli_runner.invoke(group_show, ['dummygroup1']) self.assertClickResultNoException(result) for grpline in ['Group label', 'dummygroup1', 'Group type_string', 'user', 'Group description', '<no description>']: self.assertIn(grpline, result.output)
-4,439,619,605,897,400,300
Test `verdi group show` command.
tests/cmdline/commands/test_group.py
test_show
pranavmodx/aiida-core
python
def test_show(self): result = self.cli_runner.invoke(group_show, ['dummygroup1']) self.assertClickResultNoException(result) for grpline in ['Group label', 'dummygroup1', 'Group type_string', 'user', 'Group description', '<no description>']: self.assertIn(grpline, result.output)
def test_description(self): 'Test `verdi group description` command.' description = 'It is a new description' group = orm.load_group(label='dummygroup2') self.assertNotEqual(group.description, description) result = self.cli_runner.invoke(group_description, [group.label, description]) self.assertClickResultNoException(result) self.assertEqual(group.description, description) result = self.cli_runner.invoke(group_description, [group.label]) self.assertClickResultNoException(result) self.assertIn(description, result.output)
7,618,296,786,687,864,000
Test `verdi group description` command.
tests/cmdline/commands/test_group.py
test_description
pranavmodx/aiida-core
python
def test_description(self): description = 'It is a new description' group = orm.load_group(label='dummygroup2') self.assertNotEqual(group.description, description) result = self.cli_runner.invoke(group_description, [group.label, description]) self.assertClickResultNoException(result) self.assertEqual(group.description, description) result = self.cli_runner.invoke(group_description, [group.label]) self.assertClickResultNoException(result) self.assertIn(description, result.output)
def test_relabel(self): 'Test `verdi group relabel` command.' result = self.cli_runner.invoke(group_relabel, ['dummygroup4', 'relabeled_group']) self.assertIsNone(result.exception, result.output) result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) self.assertNotIn('dummygroup4', result.output) self.assertIn('relabeled_group', result.output)
2,875,102,681,937,065,500
Test `verdi group relabel` command.
tests/cmdline/commands/test_group.py
test_relabel
pranavmodx/aiida-core
python
def test_relabel(self): result = self.cli_runner.invoke(group_relabel, ['dummygroup4', 'relabeled_group']) self.assertIsNone(result.exception, result.output) result = self.cli_runner.invoke(group_list) self.assertClickResultNoException(result) self.assertNotIn('dummygroup4', result.output) self.assertIn('relabeled_group', result.output)
def test_add_remove_nodes(self): 'Test `verdi group remove-nodes` command.' node_01 = orm.CalculationNode().store() node_02 = orm.CalculationNode().store() node_03 = orm.CalculationNode().store() result = self.cli_runner.invoke(group_add_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) self.assertClickResultNoException(result) result = self.cli_runner.invoke(group_show, ['dummygroup1']) self.assertClickResultNoException(result) self.assertIn('CalculationNode', result.output) self.assertIn(str(node_01.pk), result.output) result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) self.assertIsNone(result.exception, result.output) result = self.cli_runner.invoke(group_show, ['-r', 'dummygroup1']) self.assertClickResultNoException(result) self.assertNotIn('CalculationNode', result.output) self.assertNotIn(str(node_01.pk), result.output) group = orm.load_group(label='dummygroup1') group.add_nodes([node_01, node_02, node_03]) self.assertEqual(group.count(), 3) result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--clear', '--group=dummygroup1']) self.assertClickResultNoException(result) self.assertEqual(group.count(), 0)
-3,826,414,208,446,320,000
Test `verdi group remove-nodes` command.
tests/cmdline/commands/test_group.py
test_add_remove_nodes
pranavmodx/aiida-core
python
def test_add_remove_nodes(self): node_01 = orm.CalculationNode().store() node_02 = orm.CalculationNode().store() node_03 = orm.CalculationNode().store() result = self.cli_runner.invoke(group_add_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) self.assertClickResultNoException(result) result = self.cli_runner.invoke(group_show, ['dummygroup1']) self.assertClickResultNoException(result) self.assertIn('CalculationNode', result.output) self.assertIn(str(node_01.pk), result.output) result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) self.assertIsNone(result.exception, result.output) result = self.cli_runner.invoke(group_show, ['-r', 'dummygroup1']) self.assertClickResultNoException(result) self.assertNotIn('CalculationNode', result.output) self.assertNotIn(str(node_01.pk), result.output) group = orm.load_group(label='dummygroup1') group.add_nodes([node_01, node_02, node_03]) self.assertEqual(group.count(), 3) result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--clear', '--group=dummygroup1']) self.assertClickResultNoException(result) self.assertEqual(group.count(), 0)
def test_copy_existing_group(self): 'Test user is prompted to continue if destination group exists and is not empty' source_label = 'source_copy_existing_group' dest_label = 'dest_copy_existing_group' calc_s1 = orm.CalculationNode().store() calc_s2 = orm.CalculationNode().store() nodes_source_group = {str(node.uuid) for node in [calc_s1, calc_s2]} source_group = orm.Group(label=source_label).store() source_group.add_nodes([calc_s1, calc_s2]) options = [source_label, dest_label] result = self.cli_runner.invoke(group_copy, options) self.assertClickResultNoException(result) self.assertIn('Success: Nodes copied from group<{}> to group<{}>'.format(source_label, dest_label), result.output, result.exception) dest_group = orm.load_group(label=dest_label) self.assertEqual(dest_group.count(), 2) nodes_dest_group = {str(node.uuid) for node in dest_group.nodes} self.assertSetEqual(nodes_source_group, nodes_dest_group) result = self.cli_runner.invoke(group_copy, options) self.assertIsNotNone(result.exception, result.output) self.assertIn('Warning: Destination group<{}> already exists and is not empty.'.format(dest_label), result.output, result.exception) dest_group = orm.load_group(label=dest_label) self.assertEqual(dest_group.count(), 2) nodes_dest_group = {str(node.uuid) for node in dest_group.nodes} self.assertSetEqual(nodes_source_group, nodes_dest_group)
-7,379,491,617,463,558,000
Test user is prompted to continue if destination group exists and is not empty
tests/cmdline/commands/test_group.py
test_copy_existing_group
pranavmodx/aiida-core
python
def test_copy_existing_group(self): source_label = 'source_copy_existing_group' dest_label = 'dest_copy_existing_group' calc_s1 = orm.CalculationNode().store() calc_s2 = orm.CalculationNode().store() nodes_source_group = {str(node.uuid) for node in [calc_s1, calc_s2]} source_group = orm.Group(label=source_label).store() source_group.add_nodes([calc_s1, calc_s2]) options = [source_label, dest_label] result = self.cli_runner.invoke(group_copy, options) self.assertClickResultNoException(result) self.assertIn('Success: Nodes copied from group<{}> to group<{}>'.format(source_label, dest_label), result.output, result.exception) dest_group = orm.load_group(label=dest_label) self.assertEqual(dest_group.count(), 2) nodes_dest_group = {str(node.uuid) for node in dest_group.nodes} self.assertSetEqual(nodes_source_group, nodes_dest_group) result = self.cli_runner.invoke(group_copy, options) self.assertIsNotNone(result.exception, result.output) self.assertIn('Warning: Destination group<{}> already exists and is not empty.'.format(dest_label), result.output, result.exception) dest_group = orm.load_group(label=dest_label) self.assertEqual(dest_group.count(), 2) nodes_dest_group = {str(node.uuid) for node in dest_group.nodes} self.assertSetEqual(nodes_source_group, nodes_dest_group)
def forward(self, input): '\n input: [B, N, latent_size + point_dim]\n :param latent_codes: [B, N, LATENT_CODE_DIM]\n :param points: [B, N, 3]\n :return: sdf_pred: [B, N]\n ' x = self.layers1(input) x = torch.cat((x, input), dim=(- 1)) x = self.layers2(x) return x
1,390,690,253,077,486,000
input: [B, N, latent_size + point_dim] :param latent_codes: [B, N, LATENT_CODE_DIM] :param points: [B, N, 3] :return: sdf_pred: [B, N]
networks/sdf_net_decoder.py
forward
FrankieYin/master_project
python
def forward(self, input): '\n input: [B, N, latent_size + point_dim]\n :param latent_codes: [B, N, LATENT_CODE_DIM]\n :param points: [B, N, 3]\n :return: sdf_pred: [B, N]\n ' x = self.layers1(input) x = torch.cat((x, input), dim=(- 1)) x = self.layers2(x) return x
@search_blueprint.route('/table', methods=['POST']) def search_table() -> Response: '\n Parse the request arguments and call the helper method to execute a table search\n :return: a Response created with the results from the helper method\n ' try: request_json = request.get_json() search_term = get_query_param(request_json, 'term', '"term" parameter expected in request data') page_index = get_query_param(request_json, 'pageIndex', '"pageIndex" parameter expected in request data') search_type = request_json.get('searchType') transformed_filters = transform_filters(filters=request_json.get('filters', {})) results_dict = _search_table(filters=transformed_filters, search_term=search_term, page_index=page_index, search_type=search_type) return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR)) except Exception as e: message = ('Encountered exception: ' + str(e)) logging.exception(message) return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
8,425,497,943,664,630,000
Parse the request arguments and call the helper method to execute a table search :return: a Response created with the results from the helper method
amundsen_application/api/search/v0.py
search_table
ai-platform/amundsenfrontendlibrary
python
@search_blueprint.route('/table', methods=['POST']) def search_table() -> Response: '\n Parse the request arguments and call the helper method to execute a table search\n :return: a Response created with the results from the helper method\n ' try: request_json = request.get_json() search_term = get_query_param(request_json, 'term', '"term" parameter expected in request data') page_index = get_query_param(request_json, 'pageIndex', '"pageIndex" parameter expected in request data') search_type = request_json.get('searchType') transformed_filters = transform_filters(filters=request_json.get('filters', {})) results_dict = _search_table(filters=transformed_filters, search_term=search_term, page_index=page_index, search_type=search_type) return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR)) except Exception as e: message = ('Encountered exception: ' + str(e)) logging.exception(message) return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging def _search_table(*, search_term: str, page_index: int, filters: Dict, search_type: str) -> Dict[(str, Any)]: "\n Call the search service endpoint and return matching results\n Search service logic defined here:\n https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/table.py\n\n :return: a json output containing search results array as 'results'\n " tables = {'page_index': int(page_index), 'results': [], 'total_results': 0} results_dict = {'search_term': search_term, 'msg': '', 'tables': tables} try: if has_filters(filters=filters): query_json = generate_query_json(filters=filters, page_index=page_index, search_term=search_term) url_base = (app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_FILTER_ENDPOINT) response = request_search(url=url_base, headers={'Content-Type': 'application/json'}, method='POST', data=json.dumps(query_json)) else: url_base = (app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_ENDPOINT) url = f'{url_base}?query_term={search_term}&page_index={page_index}' response = request_search(url=url) status_code = response.status_code if (status_code == HTTPStatus.OK): results_dict['msg'] = 'Success' results = response.json().get('results') tables['results'] = [map_table_result(result) for result in results] tables['total_results'] = response.json().get('total_results') else: message = 'Encountered error: Search request failed' results_dict['msg'] = message logging.error(message) results_dict['status_code'] = status_code return results_dict except Exception as e: message = ('Encountered exception: ' + str(e)) results_dict['msg'] = message logging.exception(message) return results_dict
509,113,720,001,199,170
Call the search service endpoint and return matching results Search service logic defined here: https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/table.py :return: a json output containing search results array as 'results'
amundsen_application/api/search/v0.py
_search_table
ai-platform/amundsenfrontendlibrary
python
@action_logging def _search_table(*, search_term: str, page_index: int, filters: Dict, search_type: str) -> Dict[(str, Any)]: "\n Call the search service endpoint and return matching results\n Search service logic defined here:\n https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/table.py\n\n :return: a json output containing search results array as 'results'\n " tables = {'page_index': int(page_index), 'results': [], 'total_results': 0} results_dict = {'search_term': search_term, 'msg': , 'tables': tables} try: if has_filters(filters=filters): query_json = generate_query_json(filters=filters, page_index=page_index, search_term=search_term) url_base = (app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_FILTER_ENDPOINT) response = request_search(url=url_base, headers={'Content-Type': 'application/json'}, method='POST', data=json.dumps(query_json)) else: url_base = (app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_ENDPOINT) url = f'{url_base}?query_term={search_term}&page_index={page_index}' response = request_search(url=url) status_code = response.status_code if (status_code == HTTPStatus.OK): results_dict['msg'] = 'Success' results = response.json().get('results') tables['results'] = [map_table_result(result) for result in results] tables['total_results'] = response.json().get('total_results') else: message = 'Encountered error: Search request failed' results_dict['msg'] = message logging.error(message) results_dict['status_code'] = status_code return results_dict except Exception as e: message = ('Encountered exception: ' + str(e)) results_dict['msg'] = message logging.exception(message) return results_dict
@search_blueprint.route('/user', methods=['GET']) def search_user() -> Response: '\n Parse the request arguments and call the helper method to execute a user search\n :return: a Response created with the results from the helper method\n ' try: search_term = get_query_param(request.args, 'query', 'Endpoint takes a "query" parameter') page_index = get_query_param(request.args, 'page_index', 'Endpoint takes a "page_index" parameter') search_type = request.args.get('search_type') results_dict = _search_user(search_term=search_term, page_index=page_index, search_type=search_type) return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR)) except Exception as e: message = ('Encountered exception: ' + str(e)) logging.exception(message) return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
-3,573,663,369,142,424,000
Parse the request arguments and call the helper method to execute a user search :return: a Response created with the results from the helper method
amundsen_application/api/search/v0.py
search_user
ai-platform/amundsenfrontendlibrary
python
@search_blueprint.route('/user', methods=['GET']) def search_user() -> Response: '\n Parse the request arguments and call the helper method to execute a user search\n :return: a Response created with the results from the helper method\n ' try: search_term = get_query_param(request.args, 'query', 'Endpoint takes a "query" parameter') page_index = get_query_param(request.args, 'page_index', 'Endpoint takes a "page_index" parameter') search_type = request.args.get('search_type') results_dict = _search_user(search_term=search_term, page_index=page_index, search_type=search_type) return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR)) except Exception as e: message = ('Encountered exception: ' + str(e)) logging.exception(message) return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging def _search_user(*, search_term: str, page_index: int, search_type: str) -> Dict[(str, Any)]: "\n Call the search service endpoint and return matching results\n Search service logic defined here:\n https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/user.py\n\n :return: a json output containing search results array as 'results'\n " def _map_user_result(result: Dict) -> Dict: user_result = dump_user(load_user(result)) user_result['type'] = 'user' return user_result users = {'page_index': int(page_index), 'results': [], 'total_results': 0} results_dict = {'search_term': search_term, 'msg': 'Success', 'status_code': HTTPStatus.OK, 'users': users} try: url = '{0}?query_term={1}&page_index={2}'.format((app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT), search_term, page_index) response = request_search(url=url) status_code = response.status_code if (status_code == HTTPStatus.OK): results_dict['msg'] = 'Success' results = response.json().get('results') users['results'] = [_map_user_result(result) for result in results] users['total_results'] = response.json().get('total_results') else: message = 'Encountered error: Search request failed' results_dict['msg'] = message logging.error(message) results_dict['status_code'] = status_code return results_dict except Exception as e: message = ('Encountered exception: ' + str(e)) results_dict['msg'] = message results_dict['status_code'] = HTTPStatus.INTERNAL_SERVER_ERROR logging.exception(message) return results_dict
-5,010,729,810,745,006,000
Call the search service endpoint and return matching results Search service logic defined here: https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/user.py :return: a json output containing search results array as 'results'
amundsen_application/api/search/v0.py
_search_user
ai-platform/amundsenfrontendlibrary
python
@action_logging def _search_user(*, search_term: str, page_index: int, search_type: str) -> Dict[(str, Any)]: "\n Call the search service endpoint and return matching results\n Search service logic defined here:\n https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/user.py\n\n :return: a json output containing search results array as 'results'\n " def _map_user_result(result: Dict) -> Dict: user_result = dump_user(load_user(result)) user_result['type'] = 'user' return user_result users = {'page_index': int(page_index), 'results': [], 'total_results': 0} results_dict = {'search_term': search_term, 'msg': 'Success', 'status_code': HTTPStatus.OK, 'users': users} try: url = '{0}?query_term={1}&page_index={2}'.format((app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT), search_term, page_index) response = request_search(url=url) status_code = response.status_code if (status_code == HTTPStatus.OK): results_dict['msg'] = 'Success' results = response.json().get('results') users['results'] = [_map_user_result(result) for result in results] users['total_results'] = response.json().get('total_results') else: message = 'Encountered error: Search request failed' results_dict['msg'] = message logging.error(message) results_dict['status_code'] = status_code return results_dict except Exception as e: message = ('Encountered exception: ' + str(e)) results_dict['msg'] = message results_dict['status_code'] = HTTPStatus.INTERNAL_SERVER_ERROR logging.exception(message) return results_dict
@_dispatch.add_dispatch_list @tf_export('ignite_dataset') def ignite_dataset(cache_name, host, port, local, part, page_size, schema, permutation, name=None): 'IgniteDataset that allows to get data from Apache Ignite.\n\n Apache Ignite is a memory-centric distributed database, caching, and processing\n platform for transactional, analytical, and streaming workloads, delivering\n in-memory speeds at petabyte scale. This contrib package contains an\n integration between Apache Ignite and TensorFlow. The integration is based on\n tf.data from TensorFlow side and Binary Client Protocol from Apache Ignite side.\n It allows to use Apache Ignite as a datasource for neural network training,\n inference and all other computations supported by TensorFlow. Ignite Dataset\n is based on Apache Ignite Binary Client Protocol.\n\n Args:\n cache_name: A `Tensor` of type `string`. Ignite Cache Name.\n host: A `Tensor` of type `string`. Ignite Thin Client Host.\n port: A `Tensor` of type `int32`. Ignite Thin Client Port.\n local: A `Tensor` of type `bool`.\n Local flag that defines that data should be fetched from local host only.\n part: A `Tensor` of type `int32`. Partition data should be fetched from.\n page_size: A `Tensor` of type `int32`. Page size for Ignite Thin Client.\n schema: A `Tensor` of type `int32`.\n Internal structure that defines schema of cache objects.\n permutation: A `Tensor` of type `int32`.\n Internal structure that defines permutation of cache objects.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `variant`.\n ' _ctx = (_context._context or _context.context()) if ((_ctx is not None) and _ctx._thread_local_data.is_eager): try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._thread_local_data.device_name, 'IgniteDataset', name, _ctx.post_execution_callbacks, cache_name, host, port, local, part, page_size, schema, permutation) return _result except _core._FallbackException: try: return ignite_dataset_eager_fallback(cache_name, host, port, local, part, page_size, schema, permutation, name=name, ctx=_ctx) except _core._SymbolicException: pass except (TypeError, ValueError): result = _dispatch.dispatch(ignite_dataset, cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise except _core._NotOkStatusException as e: if (name is not None): message = ((e.message + ' name: ') + name) else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) try: (_, _, _op) = _op_def_lib._apply_op_helper('IgniteDataset', cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(ignite_dataset, cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient('IgniteDataset', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
9,173,039,070,967,508,000
IgniteDataset that allows to get data from Apache Ignite. Apache Ignite is a memory-centric distributed database, caching, and processing platform for transactional, analytical, and streaming workloads, delivering in-memory speeds at petabyte scale. This contrib package contains an integration between Apache Ignite and TensorFlow. The integration is based on tf.data from TensorFlow side and Binary Client Protocol from Apache Ignite side. It allows to use Apache Ignite as a datasource for neural network training, inference and all other computations supported by TensorFlow. Ignite Dataset is based on Apache Ignite Binary Client Protocol. Args: cache_name: A `Tensor` of type `string`. Ignite Cache Name. host: A `Tensor` of type `string`. Ignite Thin Client Host. port: A `Tensor` of type `int32`. Ignite Thin Client Port. local: A `Tensor` of type `bool`. Local flag that defines that data should be fetched from local host only. part: A `Tensor` of type `int32`. Partition data should be fetched from. page_size: A `Tensor` of type `int32`. Page size for Ignite Thin Client. schema: A `Tensor` of type `int32`. Internal structure that defines schema of cache objects. permutation: A `Tensor` of type `int32`. Internal structure that defines permutation of cache objects. name: A name for the operation (optional). Returns: A `Tensor` of type `variant`.
venv/lib/python3.7/site-packages/tensorflow_core/contrib/ignite/python/ops/gen_dataset_ops.py
ignite_dataset
aMp37/SimpleHTR
python
@_dispatch.add_dispatch_list @tf_export('ignite_dataset') def ignite_dataset(cache_name, host, port, local, part, page_size, schema, permutation, name=None): 'IgniteDataset that allows to get data from Apache Ignite.\n\n Apache Ignite is a memory-centric distributed database, caching, and processing\n platform for transactional, analytical, and streaming workloads, delivering\n in-memory speeds at petabyte scale. This contrib package contains an\n integration between Apache Ignite and TensorFlow. The integration is based on\n tf.data from TensorFlow side and Binary Client Protocol from Apache Ignite side.\n It allows to use Apache Ignite as a datasource for neural network training,\n inference and all other computations supported by TensorFlow. Ignite Dataset\n is based on Apache Ignite Binary Client Protocol.\n\n Args:\n cache_name: A `Tensor` of type `string`. Ignite Cache Name.\n host: A `Tensor` of type `string`. Ignite Thin Client Host.\n port: A `Tensor` of type `int32`. Ignite Thin Client Port.\n local: A `Tensor` of type `bool`.\n Local flag that defines that data should be fetched from local host only.\n part: A `Tensor` of type `int32`. Partition data should be fetched from.\n page_size: A `Tensor` of type `int32`. Page size for Ignite Thin Client.\n schema: A `Tensor` of type `int32`.\n Internal structure that defines schema of cache objects.\n permutation: A `Tensor` of type `int32`.\n Internal structure that defines permutation of cache objects.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `variant`.\n ' _ctx = (_context._context or _context.context()) if ((_ctx is not None) and _ctx._thread_local_data.is_eager): try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._thread_local_data.device_name, 'IgniteDataset', name, _ctx.post_execution_callbacks, cache_name, host, port, local, part, page_size, schema, permutation) return _result except _core._FallbackException: try: return ignite_dataset_eager_fallback(cache_name, host, port, local, part, page_size, schema, permutation, name=name, ctx=_ctx) except _core._SymbolicException: pass except (TypeError, ValueError): result = _dispatch.dispatch(ignite_dataset, cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise except _core._NotOkStatusException as e: if (name is not None): message = ((e.message + ' name: ') + name) else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) try: (_, _, _op) = _op_def_lib._apply_op_helper('IgniteDataset', cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(ignite_dataset, cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient('IgniteDataset', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
def ignite_dataset_eager_fallback(cache_name, host, port, local, part, page_size, schema, permutation, name=None, ctx=None): 'This is the slowpath function for Eager mode.\n This is for function ignite_dataset\n ' _ctx = (ctx if ctx else _context.context()) cache_name = _ops.convert_to_tensor(cache_name, _dtypes.string) host = _ops.convert_to_tensor(host, _dtypes.string) port = _ops.convert_to_tensor(port, _dtypes.int32) local = _ops.convert_to_tensor(local, _dtypes.bool) part = _ops.convert_to_tensor(part, _dtypes.int32) page_size = _ops.convert_to_tensor(page_size, _dtypes.int32) schema = _ops.convert_to_tensor(schema, _dtypes.int32) permutation = _ops.convert_to_tensor(permutation, _dtypes.int32) _inputs_flat = [cache_name, host, port, local, part, page_size, schema, permutation] _attrs = None _result = _execute.execute(b'IgniteDataset', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('IgniteDataset', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
5,534,718,431,688,911,000
This is the slowpath function for Eager mode. This is for function ignite_dataset
venv/lib/python3.7/site-packages/tensorflow_core/contrib/ignite/python/ops/gen_dataset_ops.py
ignite_dataset_eager_fallback
aMp37/SimpleHTR
python
def ignite_dataset_eager_fallback(cache_name, host, port, local, part, page_size, schema, permutation, name=None, ctx=None): 'This is the slowpath function for Eager mode.\n This is for function ignite_dataset\n ' _ctx = (ctx if ctx else _context.context()) cache_name = _ops.convert_to_tensor(cache_name, _dtypes.string) host = _ops.convert_to_tensor(host, _dtypes.string) port = _ops.convert_to_tensor(port, _dtypes.int32) local = _ops.convert_to_tensor(local, _dtypes.bool) part = _ops.convert_to_tensor(part, _dtypes.int32) page_size = _ops.convert_to_tensor(page_size, _dtypes.int32) schema = _ops.convert_to_tensor(schema, _dtypes.int32) permutation = _ops.convert_to_tensor(permutation, _dtypes.int32) _inputs_flat = [cache_name, host, port, local, part, page_size, schema, permutation] _attrs = None _result = _execute.execute(b'IgniteDataset', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('IgniteDataset', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
def speaker_access(f): '\n Decorator that protects the view relative to a speaker.\n ' @functools.wraps(f) def wrapper(request, slug, **kwargs): spk = get_object_or_404(models.Speaker, slug=slug) if (request.user.is_staff or (request.user == spk.user)): full_access = True talks = spk.talks() else: full_access = False conf = models.Conference.objects.current() if settings.VOTING_OPENED(conf, request.user): if settings.VOTING_ALLOWED(request.user): talks = spk.talks() elif settings.VOTING_DISALLOWED: return redirect(settings.VOTING_DISALLOWED) else: raise http.Http404() else: talks = spk.talks(status='accepted') if (talks.count() == 0): raise http.Http404() return f(request, slug, speaker=spk, talks=talks, full_access=full_access, **kwargs) return wrapper
5,354,350,178,998,343,000
Decorator that protects the view relative to a speaker.
conference/decorators.py
speaker_access
cezar77/epcon
python
def speaker_access(f): '\n \n ' @functools.wraps(f) def wrapper(request, slug, **kwargs): spk = get_object_or_404(models.Speaker, slug=slug) if (request.user.is_staff or (request.user == spk.user)): full_access = True talks = spk.talks() else: full_access = False conf = models.Conference.objects.current() if settings.VOTING_OPENED(conf, request.user): if settings.VOTING_ALLOWED(request.user): talks = spk.talks() elif settings.VOTING_DISALLOWED: return redirect(settings.VOTING_DISALLOWED) else: raise http.Http404() else: talks = spk.talks(status='accepted') if (talks.count() == 0): raise http.Http404() return f(request, slug, speaker=spk, talks=talks, full_access=full_access, **kwargs) return wrapper
def talk_access(f): '\n Decorator that protects the view relative to a talk.\n ' @functools.wraps(f) def wrapper(request, slug, **kwargs): tlk = get_object_or_404(models.Talk, slug=slug) if request.user.is_anonymous(): full_access = False elif request.user.is_staff: full_access = True else: try: tlk.get_all_speakers().get(user__id=request.user.id) except (models.Speaker.DoesNotExist, models.Speaker.MultipleObjectsReturned): full_access = False else: full_access = True if ((tlk.status == 'proposed') and (not full_access)): conf = models.Conference.objects.current() if (not settings.VOTING_OPENED(conf, request.user)): return http.HttpResponseForbidden() if (not settings.VOTING_ALLOWED(request.user)): if settings.VOTING_DISALLOWED: return redirect(settings.VOTING_DISALLOWED) else: return http.HttpResponseForbidden() return f(request, slug, talk=tlk, full_access=full_access, **kwargs) return wrapper
-8,703,531,422,658,072,000
Decorator that protects the view relative to a talk.
conference/decorators.py
talk_access
cezar77/epcon
python
def talk_access(f): '\n \n ' @functools.wraps(f) def wrapper(request, slug, **kwargs): tlk = get_object_or_404(models.Talk, slug=slug) if request.user.is_anonymous(): full_access = False elif request.user.is_staff: full_access = True else: try: tlk.get_all_speakers().get(user__id=request.user.id) except (models.Speaker.DoesNotExist, models.Speaker.MultipleObjectsReturned): full_access = False else: full_access = True if ((tlk.status == 'proposed') and (not full_access)): conf = models.Conference.objects.current() if (not settings.VOTING_OPENED(conf, request.user)): return http.HttpResponseForbidden() if (not settings.VOTING_ALLOWED(request.user)): if settings.VOTING_DISALLOWED: return redirect(settings.VOTING_DISALLOWED) else: return http.HttpResponseForbidden() return f(request, slug, talk=tlk, full_access=full_access, **kwargs) return wrapper
def profile_access(f): '\n Decorator which protect the relative view to a profile.\n ' @functools.wraps(f) def wrapper(request, slug, **kwargs): try: profile = models.AttendeeProfile.objects.select_related('user').get(slug=slug) except models.AttendeeProfile.DoesNotExist: raise http.Http404() if (request.user.is_staff or (request.user == profile.user)): full_access = True else: full_access = False accepted = models.TalkSpeaker.objects.filter(speaker__user=profile.user).filter(talk__status='accepted').count() if (not accepted): conf = models.Conference.objects.current() if (not (settings.VOTING_OPENED(conf, request.user) and settings.VOTING_ALLOWED(request.user))): if (profile.visibility == 'x'): return http.HttpResponseForbidden() elif ((profile.visibility == 'm') and request.user.is_anonymous()): return http.HttpResponseForbidden() return f(request, slug, profile=profile, full_access=full_access, **kwargs) return wrapper
6,321,148,472,076,626,000
Decorator which protect the relative view to a profile.
conference/decorators.py
profile_access
cezar77/epcon
python
def profile_access(f): '\n \n ' @functools.wraps(f) def wrapper(request, slug, **kwargs): try: profile = models.AttendeeProfile.objects.select_related('user').get(slug=slug) except models.AttendeeProfile.DoesNotExist: raise http.Http404() if (request.user.is_staff or (request.user == profile.user)): full_access = True else: full_access = False accepted = models.TalkSpeaker.objects.filter(speaker__user=profile.user).filter(talk__status='accepted').count() if (not accepted): conf = models.Conference.objects.current() if (not (settings.VOTING_OPENED(conf, request.user) and settings.VOTING_ALLOWED(request.user))): if (profile.visibility == 'x'): return http.HttpResponseForbidden() elif ((profile.visibility == 'm') and request.user.is_anonymous()): return http.HttpResponseForbidden() return f(request, slug, profile=profile, full_access=full_access, **kwargs) return wrapper
def flatten_reply(reply): "Flatten node replies.\n\n Convert from a list of replies in this format::\n\n [{'[email protected]': reply},\n {'[email protected]': reply}]\n\n into this format::\n\n {'[email protected]': reply,\n '[email protected]': reply}\n " (nodes, dupes) = ({}, set()) for item in reply: [dupes.add(name) for name in item if (name in nodes)] nodes.update(item) if dupes: warnings.warn(DuplicateNodenameWarning(W_DUPNODE.format(pluralize(len(dupes), 'name'), ', '.join(sorted(dupes))))) return nodes
-8,466,753,339,999,273,000
Flatten node replies. Convert from a list of replies in this format:: [{'[email protected]': reply}, {'[email protected]': reply}] into this format:: {'[email protected]': reply, '[email protected]': reply}
idps/lib/python3.7/site-packages/celery/app/control.py
flatten_reply
DTrafford/IDPS
python
def flatten_reply(reply): "Flatten node replies.\n\n Convert from a list of replies in this format::\n\n [{'[email protected]': reply},\n {'[email protected]': reply}]\n\n into this format::\n\n {'[email protected]': reply,\n '[email protected]': reply}\n " (nodes, dupes) = ({}, set()) for item in reply: [dupes.add(name) for name in item if (name in nodes)] nodes.update(item) if dupes: warnings.warn(DuplicateNodenameWarning(W_DUPNODE.format(pluralize(len(dupes), 'name'), ', '.join(sorted(dupes))))) return nodes
def purge(self, connection=None): 'Discard all waiting tasks.\n\n This will ignore all tasks waiting for execution, and they will\n be deleted from the messaging server.\n\n Arguments:\n connection (kombu.Connection): Optional specific connection\n instance to use. If not provided a connection will\n be acquired from the connection pool.\n\n Returns:\n int: the number of tasks discarded.\n ' with self.app.connection_or_acquire(connection) as conn: return self.app.amqp.TaskConsumer(conn).purge()
-8,160,378,701,582,752,000
Discard all waiting tasks. This will ignore all tasks waiting for execution, and they will be deleted from the messaging server. Arguments: connection (kombu.Connection): Optional specific connection instance to use. If not provided a connection will be acquired from the connection pool. Returns: int: the number of tasks discarded.
idps/lib/python3.7/site-packages/celery/app/control.py
purge
DTrafford/IDPS
python
def purge(self, connection=None): 'Discard all waiting tasks.\n\n This will ignore all tasks waiting for execution, and they will\n be deleted from the messaging server.\n\n Arguments:\n connection (kombu.Connection): Optional specific connection\n instance to use. If not provided a connection will\n be acquired from the connection pool.\n\n Returns:\n int: the number of tasks discarded.\n ' with self.app.connection_or_acquire(connection) as conn: return self.app.amqp.TaskConsumer(conn).purge()
def revoke(self, task_id, destination=None, terminate=False, signal=TERM_SIGNAME, **kwargs): 'Tell all (or specific) workers to revoke a task by id.\n\n If a task is revoked, the workers will ignore the task and\n not execute it after all.\n\n Arguments:\n task_id (str): Id of the task to revoke.\n terminate (bool): Also terminate the process currently working\n on the task (if any).\n signal (str): Name of signal to send to process if terminate.\n Default is TERM.\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n ' return self.broadcast('revoke', destination=destination, arguments={'task_id': task_id, 'terminate': terminate, 'signal': signal}, **kwargs)
1,868,803,149,643,385,000
Tell all (or specific) workers to revoke a task by id. If a task is revoked, the workers will ignore the task and not execute it after all. Arguments: task_id (str): Id of the task to revoke. terminate (bool): Also terminate the process currently working on the task (if any). signal (str): Name of signal to send to process if terminate. Default is TERM. See Also: :meth:`broadcast` for supported keyword arguments.
idps/lib/python3.7/site-packages/celery/app/control.py
revoke
DTrafford/IDPS
python
def revoke(self, task_id, destination=None, terminate=False, signal=TERM_SIGNAME, **kwargs): 'Tell all (or specific) workers to revoke a task by id.\n\n If a task is revoked, the workers will ignore the task and\n not execute it after all.\n\n Arguments:\n task_id (str): Id of the task to revoke.\n terminate (bool): Also terminate the process currently working\n on the task (if any).\n signal (str): Name of signal to send to process if terminate.\n Default is TERM.\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n ' return self.broadcast('revoke', destination=destination, arguments={'task_id': task_id, 'terminate': terminate, 'signal': signal}, **kwargs)
def terminate(self, task_id, destination=None, signal=TERM_SIGNAME, **kwargs): 'Tell all (or specific) workers to terminate a task by id.\n\n See Also:\n This is just a shortcut to :meth:`revoke` with the terminate\n argument enabled.\n ' return self.revoke(task_id, destination=destination, terminate=True, signal=signal, **kwargs)
-4,554,651,076,549,802,000
Tell all (or specific) workers to terminate a task by id. See Also: This is just a shortcut to :meth:`revoke` with the terminate argument enabled.
idps/lib/python3.7/site-packages/celery/app/control.py
terminate
DTrafford/IDPS
python
def terminate(self, task_id, destination=None, signal=TERM_SIGNAME, **kwargs): 'Tell all (or specific) workers to terminate a task by id.\n\n See Also:\n This is just a shortcut to :meth:`revoke` with the terminate\n argument enabled.\n ' return self.revoke(task_id, destination=destination, terminate=True, signal=signal, **kwargs)
def ping(self, destination=None, timeout=1.0, **kwargs): "Ping all (or specific) workers.\n\n Returns:\n List[Dict]: List of ``{'hostname': reply}`` dictionaries.\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n " return self.broadcast('ping', reply=True, arguments={}, destination=destination, timeout=timeout, **kwargs)
-6,216,762,758,353,650,000
Ping all (or specific) workers. Returns: List[Dict]: List of ``{'hostname': reply}`` dictionaries. See Also: :meth:`broadcast` for supported keyword arguments.
idps/lib/python3.7/site-packages/celery/app/control.py
ping
DTrafford/IDPS
python
def ping(self, destination=None, timeout=1.0, **kwargs): "Ping all (or specific) workers.\n\n Returns:\n List[Dict]: List of ``{'hostname': reply}`` dictionaries.\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n " return self.broadcast('ping', reply=True, arguments={}, destination=destination, timeout=timeout, **kwargs)
def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): "Tell workers to set a new rate limit for task by type.\n\n Arguments:\n task_name (str): Name of task to change rate limit for.\n rate_limit (int, str): The rate limit as tasks per second,\n or a rate limit string (`'100/m'`, etc.\n see :attr:`celery.task.base.Task.rate_limit` for\n more information).\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n " return self.broadcast('rate_limit', destination=destination, arguments={'task_name': task_name, 'rate_limit': rate_limit}, **kwargs)
2,807,817,230,656,315,000
Tell workers to set a new rate limit for task by type. Arguments: task_name (str): Name of task to change rate limit for. rate_limit (int, str): The rate limit as tasks per second, or a rate limit string (`'100/m'`, etc. see :attr:`celery.task.base.Task.rate_limit` for more information). See Also: :meth:`broadcast` for supported keyword arguments.
idps/lib/python3.7/site-packages/celery/app/control.py
rate_limit
DTrafford/IDPS
python
def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): "Tell workers to set a new rate limit for task by type.\n\n Arguments:\n task_name (str): Name of task to change rate limit for.\n rate_limit (int, str): The rate limit as tasks per second,\n or a rate limit string (`'100/m'`, etc.\n see :attr:`celery.task.base.Task.rate_limit` for\n more information).\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n " return self.broadcast('rate_limit', destination=destination, arguments={'task_name': task_name, 'rate_limit': rate_limit}, **kwargs)
def add_consumer(self, queue, exchange=None, exchange_type='direct', routing_key=None, options=None, destination=None, **kwargs): "Tell all (or specific) workers to start consuming from a new queue.\n\n Only the queue name is required as if only the queue is specified\n then the exchange/routing key will be set to the same name (\n like automatic queues do).\n\n Note:\n This command does not respect the default queue/exchange\n options in the configuration.\n\n Arguments:\n queue (str): Name of queue to start consuming from.\n exchange (str): Optional name of exchange.\n exchange_type (str): Type of exchange (defaults to 'direct')\n command to, when empty broadcast to all workers.\n routing_key (str): Optional routing key.\n options (Dict): Additional options as supported\n by :meth:`kombu.entitiy.Queue.from_dict`.\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n " return self.broadcast('add_consumer', destination=destination, arguments=dict({'queue': queue, 'exchange': exchange, 'exchange_type': exchange_type, 'routing_key': routing_key}, **(options or {})), **kwargs)
-4,895,948,726,730,740,000
Tell all (or specific) workers to start consuming from a new queue. Only the queue name is required as if only the queue is specified then the exchange/routing key will be set to the same name ( like automatic queues do). Note: This command does not respect the default queue/exchange options in the configuration. Arguments: queue (str): Name of queue to start consuming from. exchange (str): Optional name of exchange. exchange_type (str): Type of exchange (defaults to 'direct') command to, when empty broadcast to all workers. routing_key (str): Optional routing key. options (Dict): Additional options as supported by :meth:`kombu.entitiy.Queue.from_dict`. See Also: :meth:`broadcast` for supported keyword arguments.
idps/lib/python3.7/site-packages/celery/app/control.py
add_consumer
DTrafford/IDPS
python
def add_consumer(self, queue, exchange=None, exchange_type='direct', routing_key=None, options=None, destination=None, **kwargs): "Tell all (or specific) workers to start consuming from a new queue.\n\n Only the queue name is required as if only the queue is specified\n then the exchange/routing key will be set to the same name (\n like automatic queues do).\n\n Note:\n This command does not respect the default queue/exchange\n options in the configuration.\n\n Arguments:\n queue (str): Name of queue to start consuming from.\n exchange (str): Optional name of exchange.\n exchange_type (str): Type of exchange (defaults to 'direct')\n command to, when empty broadcast to all workers.\n routing_key (str): Optional routing key.\n options (Dict): Additional options as supported\n by :meth:`kombu.entitiy.Queue.from_dict`.\n\n See Also:\n :meth:`broadcast` for supported keyword arguments.\n " return self.broadcast('add_consumer', destination=destination, arguments=dict({'queue': queue, 'exchange': exchange, 'exchange_type': exchange_type, 'routing_key': routing_key}, **(options or {})), **kwargs)
def cancel_consumer(self, queue, destination=None, **kwargs): 'Tell all (or specific) workers to stop consuming from ``queue``.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('cancel_consumer', destination=destination, arguments={'queue': queue}, **kwargs)
3,505,204,021,316,517,400
Tell all (or specific) workers to stop consuming from ``queue``. See Also: Supports the same arguments as :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
cancel_consumer
DTrafford/IDPS
python
def cancel_consumer(self, queue, destination=None, **kwargs): 'Tell all (or specific) workers to stop consuming from ``queue``.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('cancel_consumer', destination=destination, arguments={'queue': queue}, **kwargs)
def time_limit(self, task_name, soft=None, hard=None, destination=None, **kwargs): 'Tell workers to set time limits for a task by type.\n\n Arguments:\n task_name (str): Name of task to change time limits for.\n soft (float): New soft time limit (in seconds).\n hard (float): New hard time limit (in seconds).\n **kwargs (Any): arguments passed on to :meth:`broadcast`.\n ' return self.broadcast('time_limit', arguments={'task_name': task_name, 'hard': hard, 'soft': soft}, destination=destination, **kwargs)
-1,159,561,853,796,097,500
Tell workers to set time limits for a task by type. Arguments: task_name (str): Name of task to change time limits for. soft (float): New soft time limit (in seconds). hard (float): New hard time limit (in seconds). **kwargs (Any): arguments passed on to :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
time_limit
DTrafford/IDPS
python
def time_limit(self, task_name, soft=None, hard=None, destination=None, **kwargs): 'Tell workers to set time limits for a task by type.\n\n Arguments:\n task_name (str): Name of task to change time limits for.\n soft (float): New soft time limit (in seconds).\n hard (float): New hard time limit (in seconds).\n **kwargs (Any): arguments passed on to :meth:`broadcast`.\n ' return self.broadcast('time_limit', arguments={'task_name': task_name, 'hard': hard, 'soft': soft}, destination=destination, **kwargs)
def enable_events(self, destination=None, **kwargs): 'Tell all (or specific) workers to enable events.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('enable_events', arguments={}, destination=destination, **kwargs)
-6,017,461,550,049,096,000
Tell all (or specific) workers to enable events. See Also: Supports the same arguments as :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
enable_events
DTrafford/IDPS
python
def enable_events(self, destination=None, **kwargs): 'Tell all (or specific) workers to enable events.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('enable_events', arguments={}, destination=destination, **kwargs)
def disable_events(self, destination=None, **kwargs): 'Tell all (or specific) workers to disable events.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('disable_events', arguments={}, destination=destination, **kwargs)
3,649,242,235,164,667,000
Tell all (or specific) workers to disable events. See Also: Supports the same arguments as :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
disable_events
DTrafford/IDPS
python
def disable_events(self, destination=None, **kwargs): 'Tell all (or specific) workers to disable events.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('disable_events', arguments={}, destination=destination, **kwargs)
def pool_grow(self, n=1, destination=None, **kwargs): 'Tell all (or specific) workers to grow the pool by ``n``.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('pool_grow', arguments={'n': n}, destination=destination, **kwargs)
1,891,126,562,088,961,500
Tell all (or specific) workers to grow the pool by ``n``. See Also: Supports the same arguments as :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
pool_grow
DTrafford/IDPS
python
def pool_grow(self, n=1, destination=None, **kwargs): 'Tell all (or specific) workers to grow the pool by ``n``.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('pool_grow', arguments={'n': n}, destination=destination, **kwargs)
def pool_shrink(self, n=1, destination=None, **kwargs): 'Tell all (or specific) workers to shrink the pool by ``n``.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('pool_shrink', arguments={'n': n}, destination=destination, **kwargs)
-3,755,088,330,896,770,600
Tell all (or specific) workers to shrink the pool by ``n``. See Also: Supports the same arguments as :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
pool_shrink
DTrafford/IDPS
python
def pool_shrink(self, n=1, destination=None, **kwargs): 'Tell all (or specific) workers to shrink the pool by ``n``.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('pool_shrink', arguments={'n': n}, destination=destination, **kwargs)
def autoscale(self, max, min, destination=None, **kwargs): 'Change worker(s) autoscale setting.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('autoscale', arguments={'max': max, 'min': min}, destination=destination, **kwargs)
-7,394,465,980,728,531,000
Change worker(s) autoscale setting. See Also: Supports the same arguments as :meth:`broadcast`.
idps/lib/python3.7/site-packages/celery/app/control.py
autoscale
DTrafford/IDPS
python
def autoscale(self, max, min, destination=None, **kwargs): 'Change worker(s) autoscale setting.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`.\n ' return self.broadcast('autoscale', arguments={'max': max, 'min': min}, destination=destination, **kwargs)
def shutdown(self, destination=None, **kwargs): 'Shutdown worker(s).\n\n See Also:\n Supports the same arguments as :meth:`broadcast`\n ' return self.broadcast('shutdown', arguments={}, destination=destination, **kwargs)
3,164,932,352,739,790,000
Shutdown worker(s). See Also: Supports the same arguments as :meth:`broadcast`
idps/lib/python3.7/site-packages/celery/app/control.py
shutdown
DTrafford/IDPS
python
def shutdown(self, destination=None, **kwargs): 'Shutdown worker(s).\n\n See Also:\n Supports the same arguments as :meth:`broadcast`\n ' return self.broadcast('shutdown', arguments={}, destination=destination, **kwargs)
def pool_restart(self, modules=None, reload=False, reloader=None, destination=None, **kwargs): 'Restart the execution pools of all or specific workers.\n\n Keyword Arguments:\n modules (Sequence[str]): List of modules to reload.\n reload (bool): Flag to enable module reloading. Default is False.\n reloader (Any): Function to reload a module.\n destination (Sequence[str]): List of worker names to send this\n command to.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`\n ' return self.broadcast('pool_restart', arguments={'modules': modules, 'reload': reload, 'reloader': reloader}, destination=destination, **kwargs)
-4,415,053,140,935,657,500
Restart the execution pools of all or specific workers. Keyword Arguments: modules (Sequence[str]): List of modules to reload. reload (bool): Flag to enable module reloading. Default is False. reloader (Any): Function to reload a module. destination (Sequence[str]): List of worker names to send this command to. See Also: Supports the same arguments as :meth:`broadcast`
idps/lib/python3.7/site-packages/celery/app/control.py
pool_restart
DTrafford/IDPS
python
def pool_restart(self, modules=None, reload=False, reloader=None, destination=None, **kwargs): 'Restart the execution pools of all or specific workers.\n\n Keyword Arguments:\n modules (Sequence[str]): List of modules to reload.\n reload (bool): Flag to enable module reloading. Default is False.\n reloader (Any): Function to reload a module.\n destination (Sequence[str]): List of worker names to send this\n command to.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`\n ' return self.broadcast('pool_restart', arguments={'modules': modules, 'reload': reload, 'reloader': reloader}, destination=destination, **kwargs)
def heartbeat(self, destination=None, **kwargs): 'Tell worker(s) to send a heartbeat immediately.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`\n ' return self.broadcast('heartbeat', arguments={}, destination=destination, **kwargs)
-2,943,589,852,549,582,300
Tell worker(s) to send a heartbeat immediately. See Also: Supports the same arguments as :meth:`broadcast`
idps/lib/python3.7/site-packages/celery/app/control.py
heartbeat
DTrafford/IDPS
python
def heartbeat(self, destination=None, **kwargs): 'Tell worker(s) to send a heartbeat immediately.\n\n See Also:\n Supports the same arguments as :meth:`broadcast`\n ' return self.broadcast('heartbeat', arguments={}, destination=destination, **kwargs)
def broadcast(self, command, arguments=None, destination=None, connection=None, reply=False, timeout=1.0, limit=None, callback=None, channel=None, pattern=None, matcher=None, **extra_kwargs): 'Broadcast a control command to the celery workers.\n\n Arguments:\n command (str): Name of command to send.\n arguments (Dict): Keyword arguments for the command.\n destination (List): If set, a list of the hosts to send the\n command to, when empty broadcast to all workers.\n connection (kombu.Connection): Custom broker connection to use,\n if not set, a connection will be acquired from the pool.\n reply (bool): Wait for and return the reply.\n timeout (float): Timeout in seconds to wait for the reply.\n limit (int): Limit number of replies.\n callback (Callable): Callback called immediately for\n each reply received.\n pattern (str): Custom pattern string to match\n matcher (Callable): Custom matcher to run the pattern to match\n ' with self.app.connection_or_acquire(connection) as conn: arguments = dict((arguments or {}), **extra_kwargs) if (pattern and matcher): return self.mailbox(conn)._broadcast(command, arguments, destination, reply, timeout, limit, callback, channel=channel, pattern=pattern, matcher=matcher) else: return self.mailbox(conn)._broadcast(command, arguments, destination, reply, timeout, limit, callback, channel=channel)
-1,976,461,350,039,020,300
Broadcast a control command to the celery workers. Arguments: command (str): Name of command to send. arguments (Dict): Keyword arguments for the command. destination (List): If set, a list of the hosts to send the command to, when empty broadcast to all workers. connection (kombu.Connection): Custom broker connection to use, if not set, a connection will be acquired from the pool. reply (bool): Wait for and return the reply. timeout (float): Timeout in seconds to wait for the reply. limit (int): Limit number of replies. callback (Callable): Callback called immediately for each reply received. pattern (str): Custom pattern string to match matcher (Callable): Custom matcher to run the pattern to match
idps/lib/python3.7/site-packages/celery/app/control.py
broadcast
DTrafford/IDPS
python
def broadcast(self, command, arguments=None, destination=None, connection=None, reply=False, timeout=1.0, limit=None, callback=None, channel=None, pattern=None, matcher=None, **extra_kwargs): 'Broadcast a control command to the celery workers.\n\n Arguments:\n command (str): Name of command to send.\n arguments (Dict): Keyword arguments for the command.\n destination (List): If set, a list of the hosts to send the\n command to, when empty broadcast to all workers.\n connection (kombu.Connection): Custom broker connection to use,\n if not set, a connection will be acquired from the pool.\n reply (bool): Wait for and return the reply.\n timeout (float): Timeout in seconds to wait for the reply.\n limit (int): Limit number of replies.\n callback (Callable): Callback called immediately for\n each reply received.\n pattern (str): Custom pattern string to match\n matcher (Callable): Custom matcher to run the pattern to match\n ' with self.app.connection_or_acquire(connection) as conn: arguments = dict((arguments or {}), **extra_kwargs) if (pattern and matcher): return self.mailbox(conn)._broadcast(command, arguments, destination, reply, timeout, limit, callback, channel=channel, pattern=pattern, matcher=matcher) else: return self.mailbox(conn)._broadcast(command, arguments, destination, reply, timeout, limit, callback, channel=channel)
def _has_restrictions(ts): 'Determine whether the given task has restrictions and whether these\n restrictions are strict.\n ' return ((not ts.loose_restrictions) and (ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions))
3,853,626,670,157,284,000
Determine whether the given task has restrictions and whether these restrictions are strict.
distributed/stealing.py
_has_restrictions
ncclementi/distributed
python
def _has_restrictions(ts): 'Determine whether the given task has restrictions and whether these\n restrictions are strict.\n ' return ((not ts.loose_restrictions) and (ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions))
def _can_steal(thief, ts, victim): 'Determine whether worker ``thief`` can steal task ``ts`` from worker\n ``victim``.\n\n Assumes that `ts` has some restrictions.\n ' if (ts.host_restrictions and (get_address_host(thief.address) not in ts.host_restrictions)): return False elif (ts.worker_restrictions and (thief.address not in ts.worker_restrictions)): return False if (victim.resources is None): return True for (resource, value) in victim.resources.items(): try: supplied = thief.resources[resource] except KeyError: return False else: if (supplied < value): return False return True
-5,987,312,310,506,130,000
Determine whether worker ``thief`` can steal task ``ts`` from worker ``victim``. Assumes that `ts` has some restrictions.
distributed/stealing.py
_can_steal
ncclementi/distributed
python
def _can_steal(thief, ts, victim): 'Determine whether worker ``thief`` can steal task ``ts`` from worker\n ``victim``.\n\n Assumes that `ts` has some restrictions.\n ' if (ts.host_restrictions and (get_address_host(thief.address) not in ts.host_restrictions)): return False elif (ts.worker_restrictions and (thief.address not in ts.worker_restrictions)): return False if (victim.resources is None): return True for (resource, value) in victim.resources.items(): try: supplied = thief.resources[resource] except KeyError: return False else: if (supplied < value): return False return True
async def start(self, scheduler=None): 'Start the background coroutine to balance the tasks on the cluster.\n Idempotent.\n The scheduler argument is ignored. It is merely required to satisify the\n plugin interface. Since this class is simultaneouly an extension, the\n scheudler instance is already registered during initialization\n ' if ('stealing' in self.scheduler.periodic_callbacks): return pc = PeriodicCallback(callback=self.balance, callback_time=(self._callback_time * 1000)) pc.start() self.scheduler.periodic_callbacks['stealing'] = pc self._in_flight_event.set()
8,660,697,406,665,815,000
Start the background coroutine to balance the tasks on the cluster. Idempotent. The scheduler argument is ignored. It is merely required to satisify the plugin interface. Since this class is simultaneouly an extension, the scheudler instance is already registered during initialization
distributed/stealing.py
start
ncclementi/distributed
python
async def start(self, scheduler=None): 'Start the background coroutine to balance the tasks on the cluster.\n Idempotent.\n The scheduler argument is ignored. It is merely required to satisify the\n plugin interface. Since this class is simultaneouly an extension, the\n scheudler instance is already registered during initialization\n ' if ('stealing' in self.scheduler.periodic_callbacks): return pc = PeriodicCallback(callback=self.balance, callback_time=(self._callback_time * 1000)) pc.start() self.scheduler.periodic_callbacks['stealing'] = pc self._in_flight_event.set()
async def stop(self): 'Stop the background task balancing tasks on the cluster.\n This will block until all currently running stealing requests are\n finished. Idempotent\n ' pc = self.scheduler.periodic_callbacks.pop('stealing', None) if pc: pc.stop() (await self._in_flight_event.wait())
6,955,667,300,840,942,000
Stop the background task balancing tasks on the cluster. This will block until all currently running stealing requests are finished. Idempotent
distributed/stealing.py
stop
ncclementi/distributed
python
async def stop(self): 'Stop the background task balancing tasks on the cluster.\n This will block until all currently running stealing requests are\n finished. Idempotent\n ' pc = self.scheduler.periodic_callbacks.pop('stealing', None) if pc: pc.stop() (await self._in_flight_event.wait())
def _to_dict(self, *, exclude: Container[str]=()) -> dict: '\n A very verbose dictionary representation for debugging purposes.\n Not type stable and not inteded for roundtrips.\n\n Parameters\n ----------\n comm:\n exclude:\n A list of attributes which must not be present in the output.\n\n See also\n --------\n Client.dump_cluster_state\n ' return recursive_to_dict({'stealable_all': self.stealable_all, 'stealable': self.stealable, 'key_stealable': self.key_stealable, 'in_flight': self.in_flight, 'in_flight_occupancy': self.in_flight_occupancy}, exclude=exclude)
3,584,262,214,568,210,000
A very verbose dictionary representation for debugging purposes. Not type stable and not inteded for roundtrips. Parameters ---------- comm: exclude: A list of attributes which must not be present in the output. See also -------- Client.dump_cluster_state
distributed/stealing.py
_to_dict
ncclementi/distributed
python
def _to_dict(self, *, exclude: Container[str]=()) -> dict: '\n A very verbose dictionary representation for debugging purposes.\n Not type stable and not inteded for roundtrips.\n\n Parameters\n ----------\n comm:\n exclude:\n A list of attributes which must not be present in the output.\n\n See also\n --------\n Client.dump_cluster_state\n ' return recursive_to_dict({'stealable_all': self.stealable_all, 'stealable': self.stealable, 'key_stealable': self.key_stealable, 'in_flight': self.in_flight, 'in_flight_occupancy': self.in_flight_occupancy}, exclude=exclude)
def steal_time_ratio(self, ts): 'The compute to communication time ratio of a key\n\n Returns\n -------\n cost_multiplier: The increased cost from moving this task as a factor.\n For example a result of zero implies a task without dependencies.\n level: The location within a stealable list to place this value\n ' split = ts.prefix.name if (split in fast_tasks): return (None, None) if (not ts.dependencies): return (0, 0) ws = ts.processing_on compute_time = ws.processing[ts] if (compute_time < 0.005): return (None, None) nbytes = ts.get_nbytes_deps() transfer_time = ((nbytes / self.scheduler.bandwidth) + LATENCY) cost_multiplier = (transfer_time / compute_time) if (cost_multiplier > 100): return (None, None) level = int(round((log2(cost_multiplier) + 6))) if (level < 1): level = 1 return (cost_multiplier, level)
4,686,170,437,953,275,000
The compute to communication time ratio of a key Returns ------- cost_multiplier: The increased cost from moving this task as a factor. For example a result of zero implies a task without dependencies. level: The location within a stealable list to place this value
distributed/stealing.py
steal_time_ratio
ncclementi/distributed
python
def steal_time_ratio(self, ts): 'The compute to communication time ratio of a key\n\n Returns\n -------\n cost_multiplier: The increased cost from moving this task as a factor.\n For example a result of zero implies a task without dependencies.\n level: The location within a stealable list to place this value\n ' split = ts.prefix.name if (split in fast_tasks): return (None, None) if (not ts.dependencies): return (0, 0) ws = ts.processing_on compute_time = ws.processing[ts] if (compute_time < 0.005): return (None, None) nbytes = ts.get_nbytes_deps() transfer_time = ((nbytes / self.scheduler.bandwidth) + LATENCY) cost_multiplier = (transfer_time / compute_time) if (cost_multiplier > 100): return (None, None) level = int(round((log2(cost_multiplier) + 6))) if (level < 1): level = 1 return (cost_multiplier, level)
def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): 'Load tf checkpoints in a pytorch model' try: import re import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(gpt_neo_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: if (('global_step' not in name) and ('adam' not in name)): array = tf.train.load_variable(tf_path, name) array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy() name = name.replace('attn/q', 'attn/attention/q_proj/w') name = name.replace('attn/k', 'attn/attention/k_proj/w') name = name.replace('attn/v', 'attn/attention/v_proj/w') name = name.replace('attn/o', 'attn/attention/out_proj/w') name = name.replace('norm_1', 'ln_1') name = name.replace('norm_2', 'ln_2') name = name.replace('attn/compute_output_bias/o_b', 'attn/attention/out_proj/b') name = name.replace('conv1d_main/c_fc/kernel', 'c_fc/w') name = name.replace('conv1d_main/c_fc/bias', 'c_fc/b') name = name.replace('conv1d_main/c_proj/kernel', 'c_proj/w') name = name.replace('conv1d_main/c_proj/bias', 'c_proj/b') names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name[5:] name = name.split('/') pointer = model.transformer for m_name in name: if re.fullmatch('[A-Za-z]+\\d+', m_name): scope_names = re.split('(\\d+)', m_name) else: scope_names = [m_name] if ((scope_names[0] == 'w') or (scope_names[0] == 'g')): pointer = getattr(pointer, 'weight') elif (scope_names[0] == 'b'): pointer = getattr(pointer, 'bias') elif ((scope_names[0] == 'wpe') or (scope_names[0] == 'wte')): pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, scope_names[0]) if (len(scope_names) >= 2): num = int(scope_names[1]) pointer = pointer[num] if ((name[(- 1)] == 'w') and (name[(- 2)] in ['out_proj', 'k_proj', 'q_proj', 'v_proj', 'c_proj', 'c_fc'])): array = array.transpose() if (name == ['wte']): array = array[:config.vocab_size] try: assert (pointer.shape == array.shape), f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}' except AssertionError as e: e.args += (pointer.shape, array.shape) raise print(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) embs = model.transformer.wte.weight lin = LazyLinearAPICompatible(embs.size()[1], embs.size()[0], bias=False) lin.weight = embs model.set_output_embeddings(lin) return model
8,552,888,941,187,628,000
Load tf checkpoints in a pytorch model
src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py
load_tf_weights_in_gpt_neo
Snarp/nostalgebraist-autoresponder
python
def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): try: import re import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(gpt_neo_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: if (('global_step' not in name) and ('adam' not in name)): array = tf.train.load_variable(tf_path, name) array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy() name = name.replace('attn/q', 'attn/attention/q_proj/w') name = name.replace('attn/k', 'attn/attention/k_proj/w') name = name.replace('attn/v', 'attn/attention/v_proj/w') name = name.replace('attn/o', 'attn/attention/out_proj/w') name = name.replace('norm_1', 'ln_1') name = name.replace('norm_2', 'ln_2') name = name.replace('attn/compute_output_bias/o_b', 'attn/attention/out_proj/b') name = name.replace('conv1d_main/c_fc/kernel', 'c_fc/w') name = name.replace('conv1d_main/c_fc/bias', 'c_fc/b') name = name.replace('conv1d_main/c_proj/kernel', 'c_proj/w') name = name.replace('conv1d_main/c_proj/bias', 'c_proj/b') names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name[5:] name = name.split('/') pointer = model.transformer for m_name in name: if re.fullmatch('[A-Za-z]+\\d+', m_name): scope_names = re.split('(\\d+)', m_name) else: scope_names = [m_name] if ((scope_names[0] == 'w') or (scope_names[0] == 'g')): pointer = getattr(pointer, 'weight') elif (scope_names[0] == 'b'): pointer = getattr(pointer, 'bias') elif ((scope_names[0] == 'wpe') or (scope_names[0] == 'wte')): pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, scope_names[0]) if (len(scope_names) >= 2): num = int(scope_names[1]) pointer = pointer[num] if ((name[(- 1)] == 'w') and (name[(- 2)] in ['out_proj', 'k_proj', 'q_proj', 'v_proj', 'c_proj', 'c_fc'])): array = array.transpose() if (name == ['wte']): array = array[:config.vocab_size] try: assert (pointer.shape == array.shape), f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}' except AssertionError as e: e.args += (pointer.shape, array.shape) raise print(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) embs = model.transformer.wte.weight lin = LazyLinearAPICompatible(embs.size()[1], embs.size()[0], bias=False) lin.weight = embs model.set_output_embeddings(lin) return model
def _split_heads(self, tensor, num_heads, attn_head_size): '\n Splits hidden_size dim into attn_head_size and num_heads\n ' new_shape = (tensor.size()[:(- 1)] + (num_heads, attn_head_size)) tensor = tensor.view(*new_shape) if (len(tensor.shape) == 5): return tensor.permute(0, 1, 3, 2, 4) elif (len(tensor.shape) == 4): return tensor.permute(0, 2, 1, 3) else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}')
3,453,971,312,782,584,300
Splits hidden_size dim into attn_head_size and num_heads
src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py
_split_heads
Snarp/nostalgebraist-autoresponder
python
def _split_heads(self, tensor, num_heads, attn_head_size): '\n \n ' new_shape = (tensor.size()[:(- 1)] + (num_heads, attn_head_size)) tensor = tensor.view(*new_shape) if (len(tensor.shape) == 5): return tensor.permute(0, 1, 3, 2, 4) elif (len(tensor.shape) == 4): return tensor.permute(0, 2, 1, 3) else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}')
def _merge_heads(self, tensor, num_heads, attn_head_size): '\n Merges attn_head_size dim and num_attn_heads dim into hidden_size\n ' if (len(tensor.shape) == 5): tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() elif (len(tensor.shape) == 4): tensor = tensor.permute(0, 2, 1, 3).contiguous() else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}') new_shape = (tensor.size()[:(- 2)] + ((num_heads * attn_head_size),)) return tensor.view(new_shape)
-6,433,798,251,814,849,000
Merges attn_head_size dim and num_attn_heads dim into hidden_size
src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py
_merge_heads
Snarp/nostalgebraist-autoresponder
python
def _merge_heads(self, tensor, num_heads, attn_head_size): '\n \n ' if (len(tensor.shape) == 5): tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() elif (len(tensor.shape) == 4): tensor = tensor.permute(0, 2, 1, 3).contiguous() else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}') new_shape = (tensor.size()[:(- 2)] + ((num_heads * attn_head_size),)) return tensor.view(new_shape)
def _init_weights(self, module): 'Initialize the weights.' if isinstance(module, (LazyLinearAPICompatible,)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if (module.bias is not None): module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if (module.padding_idx is not None): module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
3,737,366,796,486,095,000
Initialize the weights.
src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py
_init_weights
Snarp/nostalgebraist-autoresponder
python
def _init_weights(self, module): if isinstance(module, (LazyLinearAPICompatible,)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if (module.bias is not None): module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if (module.padding_idx is not None): module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): '\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n ' return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if (labels is not None): lm_logits = lm_logits.to(torch.float32) shift_logits = lm_logits[..., :(- 1), :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view((- 1), shift_logits.size((- 1))), shift_labels.view((- 1))) lm_logits = lm_logits.to(hidden_states.dtype) loss = loss.to(hidden_states.dtype) if (not return_dict): output = ((lm_logits,) + transformer_outputs[1:]) return (((loss,) + output) if (loss is not None) else output) return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
5,923,136,324,066,054,000
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py
forward
Snarp/nostalgebraist-autoresponder
python
@add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): '\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n ' return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if (labels is not None): lm_logits = lm_logits.to(torch.float32) shift_logits = lm_logits[..., :(- 1), :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view((- 1), shift_logits.size((- 1))), shift_labels.view((- 1))) lm_logits = lm_logits.to(hidden_states.dtype) loss = loss.to(hidden_states.dtype) if (not return_dict): output = ((lm_logits,) + transformer_outputs[1:]) return (((loss,) + output) if (loss is not None) else output) return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
@staticmethod def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: '\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n ' return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past))
-8,147,205,206,558,354,000
This function is used to re-order the :obj:`past_key_values` cache if :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py
_reorder_cache
Snarp/nostalgebraist-autoresponder
python
@staticmethod def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: '\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n ' return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past))
def is_type(obj): 'Returns True is obj is a reference to a type.\n\n >>> is_type(1)\n False\n\n >>> is_type(object)\n True\n\n >>> class Klass: pass\n >>> is_type(Klass)\n True\n ' return isinstance(obj, class_types)
5,911,129,199,024,651,000
Returns True is obj is a reference to a type. >>> is_type(1) False >>> is_type(object) True >>> class Klass: pass >>> is_type(Klass) True
jsonpickle/util.py
is_type
antoinecollet5/Jsonpickle
python
def is_type(obj): 'Returns True is obj is a reference to a type.\n\n >>> is_type(1)\n False\n\n >>> is_type(object)\n True\n\n >>> class Klass: pass\n >>> is_type(Klass)\n True\n ' return isinstance(obj, class_types)
def is_object(obj): 'Returns True is obj is a reference to an object instance.\n\n >>> is_object(1)\n True\n\n >>> is_object(object())\n True\n\n >>> is_object(lambda x: 1)\n False\n ' return (isinstance(obj, object) and (not isinstance(obj, (type, types.FunctionType, types.BuiltinFunctionType))))
-2,526,840,915,339,155,000
Returns True is obj is a reference to an object instance. >>> is_object(1) True >>> is_object(object()) True >>> is_object(lambda x: 1) False
jsonpickle/util.py
is_object
antoinecollet5/Jsonpickle
python
def is_object(obj): 'Returns True is obj is a reference to an object instance.\n\n >>> is_object(1)\n True\n\n >>> is_object(object())\n True\n\n >>> is_object(lambda x: 1)\n False\n ' return (isinstance(obj, object) and (not isinstance(obj, (type, types.FunctionType, types.BuiltinFunctionType))))
def is_primitive(obj): 'Helper method to see if the object is a basic data type. Unicode strings,\n integers, longs, floats, booleans, and None are considered primitive\n and will return True when passed into *is_primitive()*\n\n >>> is_primitive(3)\n True\n >>> is_primitive([4,4])\n False\n ' return (type(obj) in PRIMITIVES)
7,743,021,598,907,132,000
Helper method to see if the object is a basic data type. Unicode strings, integers, longs, floats, booleans, and None are considered primitive and will return True when passed into *is_primitive()* >>> is_primitive(3) True >>> is_primitive([4,4]) False
jsonpickle/util.py
is_primitive
antoinecollet5/Jsonpickle
python
def is_primitive(obj): 'Helper method to see if the object is a basic data type. Unicode strings,\n integers, longs, floats, booleans, and None are considered primitive\n and will return True when passed into *is_primitive()*\n\n >>> is_primitive(3)\n True\n >>> is_primitive([4,4])\n False\n ' return (type(obj) in PRIMITIVES)
def is_enum(obj): 'Is the object an enum?' return (('enum' in sys.modules) and isinstance(obj, sys.modules['enum'].Enum))
1,340,873,959,223,885,000
Is the object an enum?
jsonpickle/util.py
is_enum
antoinecollet5/Jsonpickle
python
def is_enum(obj): return (('enum' in sys.modules) and isinstance(obj, sys.modules['enum'].Enum))
def is_dictionary(obj): "Helper method for testing if the object is a dictionary.\n\n >>> is_dictionary({'key':'value'})\n True\n\n " return (type(obj) is dict)
2,052,502,357,758,371,300
Helper method for testing if the object is a dictionary. >>> is_dictionary({'key':'value'}) True
jsonpickle/util.py
is_dictionary
antoinecollet5/Jsonpickle
python
def is_dictionary(obj): "Helper method for testing if the object is a dictionary.\n\n >>> is_dictionary({'key':'value'})\n True\n\n " return (type(obj) is dict)
def is_sequence(obj): 'Helper method to see if the object is a sequence (list, set, or tuple).\n\n >>> is_sequence([4])\n True\n\n ' return (type(obj) in SEQUENCES_SET)
2,095,652,466,627,002,400
Helper method to see if the object is a sequence (list, set, or tuple). >>> is_sequence([4]) True
jsonpickle/util.py
is_sequence
antoinecollet5/Jsonpickle
python
def is_sequence(obj): 'Helper method to see if the object is a sequence (list, set, or tuple).\n\n >>> is_sequence([4])\n True\n\n ' return (type(obj) in SEQUENCES_SET)
def is_list(obj): 'Helper method to see if the object is a Python list.\n\n >>> is_list([4])\n True\n ' return (type(obj) is list)
3,207,010,466,554,383,000
Helper method to see if the object is a Python list. >>> is_list([4]) True
jsonpickle/util.py
is_list
antoinecollet5/Jsonpickle
python
def is_list(obj): 'Helper method to see if the object is a Python list.\n\n >>> is_list([4])\n True\n ' return (type(obj) is list)
def is_set(obj): 'Helper method to see if the object is a Python set.\n\n >>> is_set(set())\n True\n ' return (type(obj) is set)
-4,346,913,397,758,760,000
Helper method to see if the object is a Python set. >>> is_set(set()) True
jsonpickle/util.py
is_set
antoinecollet5/Jsonpickle
python
def is_set(obj): 'Helper method to see if the object is a Python set.\n\n >>> is_set(set())\n True\n ' return (type(obj) is set)
def is_bytes(obj): "Helper method to see if the object is a bytestring.\n\n >>> is_bytes(b'foo')\n True\n " return (type(obj) is bytes)
-4,669,482,203,807,382,000
Helper method to see if the object is a bytestring. >>> is_bytes(b'foo') True
jsonpickle/util.py
is_bytes
antoinecollet5/Jsonpickle
python
def is_bytes(obj): "Helper method to see if the object is a bytestring.\n\n >>> is_bytes(b'foo')\n True\n " return (type(obj) is bytes)
def is_unicode(obj): 'Helper method to see if the object is a unicode string' return (type(obj) is compat.ustr)
-5,009,300,965,617,864,000
Helper method to see if the object is a unicode string
jsonpickle/util.py
is_unicode
antoinecollet5/Jsonpickle
python
def is_unicode(obj): return (type(obj) is compat.ustr)
def is_tuple(obj): 'Helper method to see if the object is a Python tuple.\n\n >>> is_tuple((1,))\n True\n ' return (type(obj) is tuple)
5,996,855,661,813,377,000
Helper method to see if the object is a Python tuple. >>> is_tuple((1,)) True
jsonpickle/util.py
is_tuple
antoinecollet5/Jsonpickle
python
def is_tuple(obj): 'Helper method to see if the object is a Python tuple.\n\n >>> is_tuple((1,))\n True\n ' return (type(obj) is tuple)
def is_dictionary_subclass(obj): 'Returns True if *obj* is a subclass of the dict type. *obj* must be\n a subclass and not the actual builtin dict.\n\n >>> class Temp(dict): pass\n >>> is_dictionary_subclass(Temp())\n True\n ' return (hasattr(obj, '__class__') and issubclass(obj.__class__, dict) and (type(obj) is not dict))
-8,526,177,384,186,611,000
Returns True if *obj* is a subclass of the dict type. *obj* must be a subclass and not the actual builtin dict. >>> class Temp(dict): pass >>> is_dictionary_subclass(Temp()) True
jsonpickle/util.py
is_dictionary_subclass
antoinecollet5/Jsonpickle
python
def is_dictionary_subclass(obj): 'Returns True if *obj* is a subclass of the dict type. *obj* must be\n a subclass and not the actual builtin dict.\n\n >>> class Temp(dict): pass\n >>> is_dictionary_subclass(Temp())\n True\n ' return (hasattr(obj, '__class__') and issubclass(obj.__class__, dict) and (type(obj) is not dict))
def is_sequence_subclass(obj): 'Returns True if *obj* is a subclass of list, set or tuple.\n\n *obj* must be a subclass and not the actual builtin, such\n as list, set, tuple, etc..\n\n >>> class Temp(list): pass\n >>> is_sequence_subclass(Temp())\n True\n ' return (hasattr(obj, '__class__') and (issubclass(obj.__class__, SEQUENCES) or is_list_like(obj)) and (not is_sequence(obj)))
6,032,409,357,767,384,000
Returns True if *obj* is a subclass of list, set or tuple. *obj* must be a subclass and not the actual builtin, such as list, set, tuple, etc.. >>> class Temp(list): pass >>> is_sequence_subclass(Temp()) True
jsonpickle/util.py
is_sequence_subclass
antoinecollet5/Jsonpickle
python
def is_sequence_subclass(obj): 'Returns True if *obj* is a subclass of list, set or tuple.\n\n *obj* must be a subclass and not the actual builtin, such\n as list, set, tuple, etc..\n\n >>> class Temp(list): pass\n >>> is_sequence_subclass(Temp())\n True\n ' return (hasattr(obj, '__class__') and (issubclass(obj.__class__, SEQUENCES) or is_list_like(obj)) and (not is_sequence(obj)))
def is_noncomplex(obj): 'Returns True if *obj* is a special (weird) class, that is more complex\n than primitive data types, but is not a full object. Including:\n\n * :class:`~time.struct_time`\n ' if (type(obj) is time.struct_time): return True return False
-8,276,497,009,716,233,000
Returns True if *obj* is a special (weird) class, that is more complex than primitive data types, but is not a full object. Including: * :class:`~time.struct_time`
jsonpickle/util.py
is_noncomplex
antoinecollet5/Jsonpickle
python
def is_noncomplex(obj): 'Returns True if *obj* is a special (weird) class, that is more complex\n than primitive data types, but is not a full object. Including:\n\n * :class:`~time.struct_time`\n ' if (type(obj) is time.struct_time): return True return False
def is_function(obj): 'Returns true if passed a function\n\n >>> is_function(lambda x: 1)\n True\n\n >>> is_function(locals)\n True\n\n >>> def method(): pass\n >>> is_function(method)\n True\n\n >>> is_function(1)\n False\n ' function_types = {types.FunctionType, types.MethodType, types.LambdaType, types.BuiltinFunctionType, types.BuiltinMethodType} return (type(obj) in function_types)
7,614,756,007,911,644,000
Returns true if passed a function >>> is_function(lambda x: 1) True >>> is_function(locals) True >>> def method(): pass >>> is_function(method) True >>> is_function(1) False
jsonpickle/util.py
is_function
antoinecollet5/Jsonpickle
python
def is_function(obj): 'Returns true if passed a function\n\n >>> is_function(lambda x: 1)\n True\n\n >>> is_function(locals)\n True\n\n >>> def method(): pass\n >>> is_function(method)\n True\n\n >>> is_function(1)\n False\n ' function_types = {types.FunctionType, types.MethodType, types.LambdaType, types.BuiltinFunctionType, types.BuiltinMethodType} return (type(obj) in function_types)
def is_module_function(obj): 'Return True if `obj` is a module-global function\n\n >>> import os\n >>> is_module_function(os.path.exists)\n True\n\n >>> is_module_function(lambda: None)\n False\n\n ' return (hasattr(obj, '__class__') and isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)) and hasattr(obj, '__module__') and hasattr(obj, '__name__') and (obj.__name__ != '<lambda>'))
-5,166,977,073,362,640,000
Return True if `obj` is a module-global function >>> import os >>> is_module_function(os.path.exists) True >>> is_module_function(lambda: None) False
jsonpickle/util.py
is_module_function
antoinecollet5/Jsonpickle
python
def is_module_function(obj): 'Return True if `obj` is a module-global function\n\n >>> import os\n >>> is_module_function(os.path.exists)\n True\n\n >>> is_module_function(lambda: None)\n False\n\n ' return (hasattr(obj, '__class__') and isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)) and hasattr(obj, '__module__') and hasattr(obj, '__name__') and (obj.__name__ != '<lambda>'))
def is_module(obj): 'Returns True if passed a module\n\n >>> import os\n >>> is_module(os)\n True\n\n ' return isinstance(obj, types.ModuleType)
-8,892,213,375,293,908,000
Returns True if passed a module >>> import os >>> is_module(os) True
jsonpickle/util.py
is_module
antoinecollet5/Jsonpickle
python
def is_module(obj): 'Returns True if passed a module\n\n >>> import os\n >>> is_module(os)\n True\n\n ' return isinstance(obj, types.ModuleType)
def is_picklable(name, value): "Return True if an object can be pickled\n\n >>> import os\n >>> is_picklable('os', os)\n True\n\n >>> def foo(): pass\n >>> is_picklable('foo', foo)\n True\n\n >>> is_picklable('foo', lambda: None)\n False\n\n " if (name in tags.RESERVED): return False return (is_module_function(value) or (not is_function(value)))
-1,088,638,592,422,786,000
Return True if an object can be pickled >>> import os >>> is_picklable('os', os) True >>> def foo(): pass >>> is_picklable('foo', foo) True >>> is_picklable('foo', lambda: None) False
jsonpickle/util.py
is_picklable
antoinecollet5/Jsonpickle
python
def is_picklable(name, value): "Return True if an object can be pickled\n\n >>> import os\n >>> is_picklable('os', os)\n True\n\n >>> def foo(): pass\n >>> is_picklable('foo', foo)\n True\n\n >>> is_picklable('foo', lambda: None)\n False\n\n " if (name in tags.RESERVED): return False return (is_module_function(value) or (not is_function(value)))
def is_installed(module): "Tests to see if ``module`` is available on the sys.path\n\n >>> is_installed('sys')\n True\n >>> is_installed('hopefullythisisnotarealmodule')\n False\n\n " try: __import__(module) return True except ImportError: return False
-7,763,117,679,556,954,000
Tests to see if ``module`` is available on the sys.path >>> is_installed('sys') True >>> is_installed('hopefullythisisnotarealmodule') False
jsonpickle/util.py
is_installed
antoinecollet5/Jsonpickle
python
def is_installed(module): "Tests to see if ``module`` is available on the sys.path\n\n >>> is_installed('sys')\n True\n >>> is_installed('hopefullythisisnotarealmodule')\n False\n\n " try: __import__(module) return True except ImportError: return False
def is_reducible(obj): '\n Returns false if of a type which have special casing,\n and should not have their __reduce__ methods used\n ' if (is_collections(obj) and (not isinstance(obj, collections.defaultdict))): return True if (type(obj) in NON_REDUCIBLE_TYPES): return False elif (obj is object): return False elif is_list_like(obj): return False elif isinstance(obj, types.ModuleType): return False elif is_dictionary_subclass(obj): return False elif is_reducible_sequence_subclass(obj): return False elif isinstance(getattr(obj, '__slots__', None), iterator_types): return False elif (is_type(obj) and (obj.__module__ == 'datetime')): return False return True
5,660,725,338,134,013,000
Returns false if of a type which have special casing, and should not have their __reduce__ methods used
jsonpickle/util.py
is_reducible
antoinecollet5/Jsonpickle
python
def is_reducible(obj): '\n Returns false if of a type which have special casing,\n and should not have their __reduce__ methods used\n ' if (is_collections(obj) and (not isinstance(obj, collections.defaultdict))): return True if (type(obj) in NON_REDUCIBLE_TYPES): return False elif (obj is object): return False elif is_list_like(obj): return False elif isinstance(obj, types.ModuleType): return False elif is_dictionary_subclass(obj): return False elif is_reducible_sequence_subclass(obj): return False elif isinstance(getattr(obj, '__slots__', None), iterator_types): return False elif (is_type(obj) and (obj.__module__ == 'datetime')): return False return True
def in_dict(obj, key, default=False): '\n Returns true if key exists in obj.__dict__; false if not in.\n If obj.__dict__ is absent, return default\n ' return ((key in obj.__dict__) if getattr(obj, '__dict__', None) else default)
2,998,211,634,569,577,500
Returns true if key exists in obj.__dict__; false if not in. If obj.__dict__ is absent, return default
jsonpickle/util.py
in_dict
antoinecollet5/Jsonpickle
python
def in_dict(obj, key, default=False): '\n Returns true if key exists in obj.__dict__; false if not in.\n If obj.__dict__ is absent, return default\n ' return ((key in obj.__dict__) if getattr(obj, '__dict__', None) else default)
def in_slots(obj, key, default=False): '\n Returns true if key exists in obj.__slots__; false if not in.\n If obj.__slots__ is absent, return default\n ' return ((key in obj.__slots__) if getattr(obj, '__slots__', None) else default)
-7,808,203,502,761,476,000
Returns true if key exists in obj.__slots__; false if not in. If obj.__slots__ is absent, return default
jsonpickle/util.py
in_slots
antoinecollet5/Jsonpickle
python
def in_slots(obj, key, default=False): '\n Returns true if key exists in obj.__slots__; false if not in.\n If obj.__slots__ is absent, return default\n ' return ((key in obj.__slots__) if getattr(obj, '__slots__', None) else default)
def has_reduce(obj): '\n Tests if __reduce__ or __reduce_ex__ exists in the object dict or\n in the class dicts of every class in the MRO *except object*.\n\n Returns a tuple of booleans (has_reduce, has_reduce_ex)\n ' if ((not is_reducible(obj)) or is_type(obj)): return (False, False) if is_noncomplex(obj): return (False, True) has_reduce = False has_reduce_ex = False REDUCE = '__reduce__' REDUCE_EX = '__reduce_ex__' has_reduce = (in_dict(obj, REDUCE) or in_slots(obj, REDUCE)) has_reduce_ex = (in_dict(obj, REDUCE_EX) or in_slots(obj, REDUCE_EX)) for base in type(obj).__mro__: if is_reducible(base): has_reduce = (has_reduce or in_dict(base, REDUCE)) has_reduce_ex = (has_reduce_ex or in_dict(base, REDUCE_EX)) if (has_reduce and has_reduce_ex): return (has_reduce, has_reduce_ex) cls = type(obj) object_reduce = getattr(object, REDUCE) object_reduce_ex = getattr(object, REDUCE_EX) if (not has_reduce): has_reduce_cls = getattr(cls, REDUCE, False) if (has_reduce_cls is not object_reduce): has_reduce = has_reduce_cls if (not has_reduce_ex): has_reduce_ex_cls = getattr(cls, REDUCE_EX, False) if (has_reduce_ex_cls is not object_reduce_ex): has_reduce_ex = has_reduce_ex_cls return (has_reduce, has_reduce_ex)
6,952,400,020,244,919,000
Tests if __reduce__ or __reduce_ex__ exists in the object dict or in the class dicts of every class in the MRO *except object*. Returns a tuple of booleans (has_reduce, has_reduce_ex)
jsonpickle/util.py
has_reduce
antoinecollet5/Jsonpickle
python
def has_reduce(obj): '\n Tests if __reduce__ or __reduce_ex__ exists in the object dict or\n in the class dicts of every class in the MRO *except object*.\n\n Returns a tuple of booleans (has_reduce, has_reduce_ex)\n ' if ((not is_reducible(obj)) or is_type(obj)): return (False, False) if is_noncomplex(obj): return (False, True) has_reduce = False has_reduce_ex = False REDUCE = '__reduce__' REDUCE_EX = '__reduce_ex__' has_reduce = (in_dict(obj, REDUCE) or in_slots(obj, REDUCE)) has_reduce_ex = (in_dict(obj, REDUCE_EX) or in_slots(obj, REDUCE_EX)) for base in type(obj).__mro__: if is_reducible(base): has_reduce = (has_reduce or in_dict(base, REDUCE)) has_reduce_ex = (has_reduce_ex or in_dict(base, REDUCE_EX)) if (has_reduce and has_reduce_ex): return (has_reduce, has_reduce_ex) cls = type(obj) object_reduce = getattr(object, REDUCE) object_reduce_ex = getattr(object, REDUCE_EX) if (not has_reduce): has_reduce_cls = getattr(cls, REDUCE, False) if (has_reduce_cls is not object_reduce): has_reduce = has_reduce_cls if (not has_reduce_ex): has_reduce_ex_cls = getattr(cls, REDUCE_EX, False) if (has_reduce_ex_cls is not object_reduce_ex): has_reduce_ex = has_reduce_ex_cls return (has_reduce, has_reduce_ex)
def translate_module_name(module): 'Rename builtin modules to a consistent module name.\n\n Prefer the more modern naming.\n\n This is used so that references to Python\'s `builtins` module can\n be loaded in both Python 2 and 3. We remap to the "__builtin__"\n name and unmap it when importing.\n\n Map the Python2 `exceptions` module to `builtins` because\n `builtins` is a superset and contains everything that is\n available in `exceptions`, which makes the translation simpler.\n\n See untranslate_module_name() for the reverse operation.\n ' lookup = dict(__builtin__='builtins', exceptions='builtins') return lookup.get(module, module)
4,901,060,085,934,409,000
Rename builtin modules to a consistent module name. Prefer the more modern naming. This is used so that references to Python's `builtins` module can be loaded in both Python 2 and 3. We remap to the "__builtin__" name and unmap it when importing. Map the Python2 `exceptions` module to `builtins` because `builtins` is a superset and contains everything that is available in `exceptions`, which makes the translation simpler. See untranslate_module_name() for the reverse operation.
jsonpickle/util.py
translate_module_name
antoinecollet5/Jsonpickle
python
def translate_module_name(module): 'Rename builtin modules to a consistent module name.\n\n Prefer the more modern naming.\n\n This is used so that references to Python\'s `builtins` module can\n be loaded in both Python 2 and 3. We remap to the "__builtin__"\n name and unmap it when importing.\n\n Map the Python2 `exceptions` module to `builtins` because\n `builtins` is a superset and contains everything that is\n available in `exceptions`, which makes the translation simpler.\n\n See untranslate_module_name() for the reverse operation.\n ' lookup = dict(__builtin__='builtins', exceptions='builtins') return lookup.get(module, module)
def untranslate_module_name(module): 'Rename module names mention in JSON to names that we can import\n\n This reverses the translation applied by translate_module_name() to\n a module name available to the current version of Python.\n\n ' module = _0_9_6_compat_untranslate(module) lookup = (dict(builtins='__builtin__') if PY2 else {}) return lookup.get(module, module)
-6,730,269,842,887,094,000
Rename module names mention in JSON to names that we can import This reverses the translation applied by translate_module_name() to a module name available to the current version of Python.
jsonpickle/util.py
untranslate_module_name
antoinecollet5/Jsonpickle
python
def untranslate_module_name(module): 'Rename module names mention in JSON to names that we can import\n\n This reverses the translation applied by translate_module_name() to\n a module name available to the current version of Python.\n\n ' module = _0_9_6_compat_untranslate(module) lookup = (dict(builtins='__builtin__') if PY2 else {}) return lookup.get(module, module)
def _0_9_6_compat_untranslate(module): 'Provide compatibility for pickles created with jsonpickle 0.9.6 and\n earlier, remapping `exceptions` and `__builtin__` to `builtins`.\n ' lookup = dict(__builtin__='builtins', exceptions='builtins') return lookup.get(module, module)
329,968,985,575,753,200
Provide compatibility for pickles created with jsonpickle 0.9.6 and earlier, remapping `exceptions` and `__builtin__` to `builtins`.
jsonpickle/util.py
_0_9_6_compat_untranslate
antoinecollet5/Jsonpickle
python
def _0_9_6_compat_untranslate(module): 'Provide compatibility for pickles created with jsonpickle 0.9.6 and\n earlier, remapping `exceptions` and `__builtin__` to `builtins`.\n ' lookup = dict(__builtin__='builtins', exceptions='builtins') return lookup.get(module, module)
def importable_name(cls): "\n >>> class Example(object):\n ... pass\n\n >>> ex = Example()\n >>> importable_name(ex.__class__) == 'jsonpickle.util.Example'\n True\n >>> importable_name(type(25)) == 'builtins.int'\n True\n >>> importable_name(None.__class__) == 'builtins.NoneType'\n True\n >>> importable_name(False.__class__) == 'builtins.bool'\n True\n >>> importable_name(AttributeError) == 'builtins.AttributeError'\n True\n\n " name = getattr(cls, '__qualname__', cls.__name__) module = translate_module_name(cls.__module__) return '{}.{}'.format(module, name)
5,324,702,748,341,663,000
>>> class Example(object): ... pass >>> ex = Example() >>> importable_name(ex.__class__) == 'jsonpickle.util.Example' True >>> importable_name(type(25)) == 'builtins.int' True >>> importable_name(None.__class__) == 'builtins.NoneType' True >>> importable_name(False.__class__) == 'builtins.bool' True >>> importable_name(AttributeError) == 'builtins.AttributeError' True
jsonpickle/util.py
importable_name
antoinecollet5/Jsonpickle
python
def importable_name(cls): "\n >>> class Example(object):\n ... pass\n\n >>> ex = Example()\n >>> importable_name(ex.__class__) == 'jsonpickle.util.Example'\n True\n >>> importable_name(type(25)) == 'builtins.int'\n True\n >>> importable_name(None.__class__) == 'builtins.NoneType'\n True\n >>> importable_name(False.__class__) == 'builtins.bool'\n True\n >>> importable_name(AttributeError) == 'builtins.AttributeError'\n True\n\n " name = getattr(cls, '__qualname__', cls.__name__) module = translate_module_name(cls.__module__) return '{}.{}'.format(module, name)
def b64encode(data): '\n Encode binary data to ascii text in base64. Data must be bytes.\n ' return base64.b64encode(data).decode('ascii')
5,550,798,368,815,420,000
Encode binary data to ascii text in base64. Data must be bytes.
jsonpickle/util.py
b64encode
antoinecollet5/Jsonpickle
python
def b64encode(data): '\n \n ' return base64.b64encode(data).decode('ascii')
def b64decode(payload): '\n Decode payload - must be ascii text.\n ' return base64.b64decode(payload)
3,385,305,434,666,332,000
Decode payload - must be ascii text.
jsonpickle/util.py
b64decode
antoinecollet5/Jsonpickle
python
def b64decode(payload): '\n \n ' return base64.b64decode(payload)
def b85encode(data): '\n Encode binary data to ascii text in base85. Data must be bytes.\n ' if PY2: raise NotImplementedError("Python 2 can't encode data in base85.") return base64.b85encode(data).decode('ascii')
8,403,727,459,595,849,000
Encode binary data to ascii text in base85. Data must be bytes.
jsonpickle/util.py
b85encode
antoinecollet5/Jsonpickle
python
def b85encode(data): '\n \n ' if PY2: raise NotImplementedError("Python 2 can't encode data in base85.") return base64.b85encode(data).decode('ascii')
def b85decode(payload): '\n Decode payload - must be ascii text.\n ' if PY2: raise NotImplementedError("Python 2 can't decode base85-encoded data.") return base64.b85decode(payload)
-8,605,121,480,186,929,000
Decode payload - must be ascii text.
jsonpickle/util.py
b85decode
antoinecollet5/Jsonpickle
python
def b85decode(payload): '\n \n ' if PY2: raise NotImplementedError("Python 2 can't decode base85-encoded data.") return base64.b85decode(payload)
def items(obj): 'Iterate over dicts in a deterministic order\n\n Python2 does not guarantee dict ordering, so this function\n papers over the difference in behavior. Python3 does guarantee\n dict order, without use of OrderedDict, so no sorting is needed there.\n\n ' if PY3_ORDERED_DICT: for (k, v) in obj.items(): (yield (k, v)) else: for (k, v) in sorted(obj.items(), key=itemgetter): (yield (k, v))
-1,443,848,681,206,376,000
Iterate over dicts in a deterministic order Python2 does not guarantee dict ordering, so this function papers over the difference in behavior. Python3 does guarantee dict order, without use of OrderedDict, so no sorting is needed there.
jsonpickle/util.py
items
antoinecollet5/Jsonpickle
python
def items(obj): 'Iterate over dicts in a deterministic order\n\n Python2 does not guarantee dict ordering, so this function\n papers over the difference in behavior. Python3 does guarantee\n dict order, without use of OrderedDict, so no sorting is needed there.\n\n ' if PY3_ORDERED_DICT: for (k, v) in obj.items(): (yield (k, v)) else: for (k, v) in sorted(obj.items(), key=itemgetter): (yield (k, v))
def train_args(): "\n Retrieves and parses the 3 command line arguments provided by the user when\n they run the program from a terminal window. This function uses Python's\n argparse module to created and defined these 3 command line arguments. If\n the user fails to provide some or all of the 3 arguments, then the default\n values are used for the missing arguments.\n\n :return: results: data structure that stores the command line arguments object\n " parser = argparse.ArgumentParser() parser.add_argument('--data_dir', action='store', dest='data_dir', default='flowers', help='Path the directory of the dataset, should contain sub-directories /train, /test, /valid') parser.add_argument('--save_dir', action='store', dest='save_dir', default='checkpoint.pth', help='Set directory to save checkpoints') parser.add_argument('--arch', action='store', dest='arch', default='vgg16', help='Choose architecture. Default: vgg16') parser.add_argument('--learning_rate', action='store', dest='learning_rate', default=0.003, help='Set the learning rate', type=float) parser.add_argument('--hidden_units', action='store', dest='hidden_units', default=256, help='Add the hidden units', type=int) parser.add_argument('--epochs', action='store', dest='epochs', default=30, help='Add number of epoch cycles', type=int) parser.add_argument('--gpu', action='store_true', dest='gpu', help='Activate GPU') results = parser.parse_args() return results
1,974,434,042,351,910,100
Retrieves and parses the 3 command line arguments provided by the user when they run the program from a terminal window. This function uses Python's argparse module to created and defined these 3 command line arguments. If the user fails to provide some or all of the 3 arguments, then the default values are used for the missing arguments. :return: results: data structure that stores the command line arguments object
train_args.py
train_args
victoray/ImageClasssifier
python
def train_args(): "\n Retrieves and parses the 3 command line arguments provided by the user when\n they run the program from a terminal window. This function uses Python's\n argparse module to created and defined these 3 command line arguments. If\n the user fails to provide some or all of the 3 arguments, then the default\n values are used for the missing arguments.\n\n :return: results: data structure that stores the command line arguments object\n " parser = argparse.ArgumentParser() parser.add_argument('--data_dir', action='store', dest='data_dir', default='flowers', help='Path the directory of the dataset, should contain sub-directories /train, /test, /valid') parser.add_argument('--save_dir', action='store', dest='save_dir', default='checkpoint.pth', help='Set directory to save checkpoints') parser.add_argument('--arch', action='store', dest='arch', default='vgg16', help='Choose architecture. Default: vgg16') parser.add_argument('--learning_rate', action='store', dest='learning_rate', default=0.003, help='Set the learning rate', type=float) parser.add_argument('--hidden_units', action='store', dest='hidden_units', default=256, help='Add the hidden units', type=int) parser.add_argument('--epochs', action='store', dest='epochs', default=30, help='Add number of epoch cycles', type=int) parser.add_argument('--gpu', action='store_true', dest='gpu', help='Activate GPU') results = parser.parse_args() return results
def get_app(self): ' Overwrites method of AsyncHTTPTestCase.\n Returns:\n an instance of tornado application\n ' distributed_taskqueue = None return appscale_taskqueue.prepare_taskqueue_application(task_queue=distributed_taskqueue)
4,512,339,946,284,686,300
Overwrites method of AsyncHTTPTestCase. Returns: an instance of tornado application
AppTaskQueue/test/unit/test_service_stats.py
get_app
HafeezRai/appscale
python
def get_app(self): ' Overwrites method of AsyncHTTPTestCase.\n Returns:\n an instance of tornado application\n ' distributed_taskqueue = None return appscale_taskqueue.prepare_taskqueue_application(task_queue=distributed_taskqueue)
def setUp(self): ' Patches handlers of Taskqueue application in order\n to prevent real calls to Cassandra and Datastore because only\n service statistics matters for this test.\n ' super(TestServiceStatistics, self).setUp() handlers = [rest_api.RESTQueue, rest_api.RESTTask, rest_api.RESTLease, rest_api.RESTTasks] self.patchers = [] self.get_http_status_mock = mock.MagicMock() for handler in handlers: patcher = patch.object(handler, 'get_status', self.get_http_status_mock) patcher.start() self.patchers.append(patcher) for method in ['get', 'post', 'put', 'delete', 'patch']: def method_impl(*args, **kwargs): return None patcher = patch.object(handler, method, method_impl) patcher.start() self.patchers.append(patcher) remote_request_patcher = patch.object(appscale_taskqueue.ProtobufferHandler, 'remote_request') self.pb_remote_request_mock = remote_request_patcher.start() self.patchers.append(remote_request_patcher) time_patcher = patch.object(stats_manager.time, 'time') self.time_mock = time_patcher.start() self.patchers.append(time_patcher)
-598,522,386,944,669,300
Patches handlers of Taskqueue application in order to prevent real calls to Cassandra and Datastore because only service statistics matters for this test.
AppTaskQueue/test/unit/test_service_stats.py
setUp
HafeezRai/appscale
python
def setUp(self): ' Patches handlers of Taskqueue application in order\n to prevent real calls to Cassandra and Datastore because only\n service statistics matters for this test.\n ' super(TestServiceStatistics, self).setUp() handlers = [rest_api.RESTQueue, rest_api.RESTTask, rest_api.RESTLease, rest_api.RESTTasks] self.patchers = [] self.get_http_status_mock = mock.MagicMock() for handler in handlers: patcher = patch.object(handler, 'get_status', self.get_http_status_mock) patcher.start() self.patchers.append(patcher) for method in ['get', 'post', 'put', 'delete', 'patch']: def method_impl(*args, **kwargs): return None patcher = patch.object(handler, method, method_impl) patcher.start() self.patchers.append(patcher) remote_request_patcher = patch.object(appscale_taskqueue.ProtobufferHandler, 'remote_request') self.pb_remote_request_mock = remote_request_patcher.start() self.patchers.append(remote_request_patcher) time_patcher = patch.object(stats_manager.time, 'time') self.time_mock = time_patcher.start() self.patchers.append(time_patcher)
def generate_extension(ext_def): 'Generate extension constructors.' assert ('name' in ext_def), 'invalid extension name' ext_path = (ext_def['name'].replace('.', os.path.sep) + '.pyx') ext_root = os.path.dirname(ext_path) ext_def['sources'] = [ext_path] if ('extra_objects' in ext_def): if (not sys.platform.startswith('linux')): static_libs = [os.path.split(lib) for lib in ext_def['extra_objects']] (lib_dirs, lib_names) = zip(*static_libs) lib_names = [os.path.splitext(name)[0] for name in lib_names] ext_def.setdefault('libraries', []).extend(lib_names) ext_def.setdefault('library_dirs', []).extend(list(set(lib_dirs))) del ext_def['extra_objects'] arguments = ('include_dirs', 'library_dirs', 'runtime_library_dirs', 'extra_objects') for argument in arguments: try: ext_def[argument] = [os.path.join(ext_root, path) for path in ext_def[argument]] except KeyError: pass return Extension(**ext_def)
-7,423,803,714,519,988,000
Generate extension constructors.
setup.py
generate_extension
liuyenting/olive-camera-dcamapi
python
def generate_extension(ext_def): assert ('name' in ext_def), 'invalid extension name' ext_path = (ext_def['name'].replace('.', os.path.sep) + '.pyx') ext_root = os.path.dirname(ext_path) ext_def['sources'] = [ext_path] if ('extra_objects' in ext_def): if (not sys.platform.startswith('linux')): static_libs = [os.path.split(lib) for lib in ext_def['extra_objects']] (lib_dirs, lib_names) = zip(*static_libs) lib_names = [os.path.splitext(name)[0] for name in lib_names] ext_def.setdefault('libraries', []).extend(lib_names) ext_def.setdefault('library_dirs', []).extend(list(set(lib_dirs))) del ext_def['extra_objects'] arguments = ('include_dirs', 'library_dirs', 'runtime_library_dirs', 'extra_objects') for argument in arguments: try: ext_def[argument] = [os.path.join(ext_root, path) for path in ext_def[argument]] except KeyError: pass return Extension(**ext_def)
def test_simple_iter_clauses(self): 'Test basic expression iter_clauses functionality.' b = BooleanExpression('A or B or C or D') self.assertTrue(b.is_cnf) self.assertTrue(b.is_dnf) clauses = b.iter_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A or B or C or D">') with self.assertRaises(StopIteration): next(clauses) b = BooleanExpression('(A and B and C) or (D and E and F)') self.assertFalse(b.is_cnf) self.assertTrue(b.is_dnf) clauses = b.iter_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A and B and C">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "D and E and F">') with self.assertRaises(StopIteration): next(clauses)
-6,528,639,060,011,016,000
Test basic expression iter_clauses functionality.
tt/tests/unit/expressions/test_bexpr_iter_clauses.py
test_simple_iter_clauses
fkromer/tt
python
def test_simple_iter_clauses(self): b = BooleanExpression('A or B or C or D') self.assertTrue(b.is_cnf) self.assertTrue(b.is_dnf) clauses = b.iter_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A or B or C or D">') with self.assertRaises(StopIteration): next(clauses) b = BooleanExpression('(A and B and C) or (D and E and F)') self.assertFalse(b.is_cnf) self.assertTrue(b.is_dnf) clauses = b.iter_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A and B and C">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "D and E and F">') with self.assertRaises(StopIteration): next(clauses)
def test_simple_iter_cnf(self): 'Test basic expression iter_cnf_clauses functionality.' b = BooleanExpression('(A or B) and (C or D) and (E or F)') self.assertTrue(b.is_cnf) self.assertFalse(b.is_dnf) clauses = b.iter_cnf_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A or B">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "C or D">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "E or F">') with self.assertRaises(StopIteration): next(clauses)
-743,596,002,143,586,800
Test basic expression iter_cnf_clauses functionality.
tt/tests/unit/expressions/test_bexpr_iter_clauses.py
test_simple_iter_cnf
fkromer/tt
python
def test_simple_iter_cnf(self): b = BooleanExpression('(A or B) and (C or D) and (E or F)') self.assertTrue(b.is_cnf) self.assertFalse(b.is_dnf) clauses = b.iter_cnf_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A or B">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "C or D">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "E or F">') with self.assertRaises(StopIteration): next(clauses)
def test_simple_iter_dnf(self): 'Test basic expression iter_dnf_clauses functionality.' b = BooleanExpression('(A and B) or (C and D) or (E and F)') self.assertTrue(b.is_dnf) self.assertFalse(b.is_cnf) clauses = b.iter_dnf_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A and B">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "C and D">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "E and F">') with self.assertRaises(StopIteration): next(clauses)
5,606,984,770,953,174,000
Test basic expression iter_dnf_clauses functionality.
tt/tests/unit/expressions/test_bexpr_iter_clauses.py
test_simple_iter_dnf
fkromer/tt
python
def test_simple_iter_dnf(self): b = BooleanExpression('(A and B) or (C and D) or (E and F)') self.assertTrue(b.is_dnf) self.assertFalse(b.is_cnf) clauses = b.iter_dnf_clauses() self.assertEqual(repr(next(clauses)), '<BooleanExpression "A and B">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "C and D">') self.assertEqual(repr(next(clauses)), '<BooleanExpression "E and F">') with self.assertRaises(StopIteration): next(clauses)
def init(self, permits): '\n Try to initialize this Semaphore instance with the given permit count.\n\n :param permits: (int), the given permit count.\n :return: (bool), ``true`` if initialization success.\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_init_codec, permits=permits)
-4,474,863,822,055,226,400
Try to initialize this Semaphore instance with the given permit count. :param permits: (int), the given permit count. :return: (bool), ``true`` if initialization success.
hazelcast/proxy/semaphore.py
init
Kilo59/hazelcast-python-client
python
def init(self, permits): '\n Try to initialize this Semaphore instance with the given permit count.\n\n :param permits: (int), the given permit count.\n :return: (bool), ``true`` if initialization success.\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_init_codec, permits=permits)
def acquire(self, permits=1): '\n Acquires one or specified amount of permits if available, and returns immediately, reducing the number of\n available permits by one or given amount.\n\n If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes\n and lies dormant until one of following happens:\n\n * some other thread invokes one of the release methods for this semaphore, the current thread is next to be\n assigned permits and the number of available permits satisfies this request,\n * this Semaphore instance is destroyed, or\n * some other thread interrupts the current thread.\n\n :param permits: (int), the number of permits to acquire (optional).\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_acquire_codec, permits=permits)
-3,143,300,385,678,641,700
Acquires one or specified amount of permits if available, and returns immediately, reducing the number of available permits by one or given amount. If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes and lies dormant until one of following happens: * some other thread invokes one of the release methods for this semaphore, the current thread is next to be assigned permits and the number of available permits satisfies this request, * this Semaphore instance is destroyed, or * some other thread interrupts the current thread. :param permits: (int), the number of permits to acquire (optional).
hazelcast/proxy/semaphore.py
acquire
Kilo59/hazelcast-python-client
python
def acquire(self, permits=1): '\n Acquires one or specified amount of permits if available, and returns immediately, reducing the number of\n available permits by one or given amount.\n\n If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes\n and lies dormant until one of following happens:\n\n * some other thread invokes one of the release methods for this semaphore, the current thread is next to be\n assigned permits and the number of available permits satisfies this request,\n * this Semaphore instance is destroyed, or\n * some other thread interrupts the current thread.\n\n :param permits: (int), the number of permits to acquire (optional).\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_acquire_codec, permits=permits)
def available_permits(self): '\n Returns the current number of permits currently available in this semaphore.\n\n * This method is typically used for debugging and testing purposes.\n :return: (int), the number of available permits in this semaphore.\n ' return self._encode_invoke(semaphore_available_permits_codec)
2,166,608,214,720,584,700
Returns the current number of permits currently available in this semaphore. * This method is typically used for debugging and testing purposes. :return: (int), the number of available permits in this semaphore.
hazelcast/proxy/semaphore.py
available_permits
Kilo59/hazelcast-python-client
python
def available_permits(self): '\n Returns the current number of permits currently available in this semaphore.\n\n * This method is typically used for debugging and testing purposes.\n :return: (int), the number of available permits in this semaphore.\n ' return self._encode_invoke(semaphore_available_permits_codec)
def drain_permits(self): '\n Acquires and returns all permits that are immediately available.\n\n :return: (int), the number of permits drained.\n ' return self._encode_invoke(semaphore_drain_permits_codec)
294,431,509,549,184,200
Acquires and returns all permits that are immediately available. :return: (int), the number of permits drained.
hazelcast/proxy/semaphore.py
drain_permits
Kilo59/hazelcast-python-client
python
def drain_permits(self): '\n Acquires and returns all permits that are immediately available.\n\n :return: (int), the number of permits drained.\n ' return self._encode_invoke(semaphore_drain_permits_codec)
def reduce_permits(self, reduction): '\n Shrinks the number of available permits by the indicated reduction. This method differs from acquire in that it\n does not block waiting for permits to become available.\n\n :param reduction: (int), the number of permits to remove.\n ' check_not_negative(reduction, 'Reduction cannot be negative!') return self._encode_invoke(semaphore_reduce_permits_codec, reduction=reduction)
-908,642,113,753,615,700
Shrinks the number of available permits by the indicated reduction. This method differs from acquire in that it does not block waiting for permits to become available. :param reduction: (int), the number of permits to remove.
hazelcast/proxy/semaphore.py
reduce_permits
Kilo59/hazelcast-python-client
python
def reduce_permits(self, reduction): '\n Shrinks the number of available permits by the indicated reduction. This method differs from acquire in that it\n does not block waiting for permits to become available.\n\n :param reduction: (int), the number of permits to remove.\n ' check_not_negative(reduction, 'Reduction cannot be negative!') return self._encode_invoke(semaphore_reduce_permits_codec, reduction=reduction)
def release(self, permits=1): '\n Releases one or given number of permits, increasing the number of available permits by one or that amount.\n\n There is no requirement that a thread that releases a permit must have acquired that permit by calling one of\n the acquire methods. Correct usage of a semaphore is established by programming convention in the application.\n\n :param permits: (int), the number of permits to release (optional).\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_release_codec, permits=permits)
-1,641,383,373,656,564,200
Releases one or given number of permits, increasing the number of available permits by one or that amount. There is no requirement that a thread that releases a permit must have acquired that permit by calling one of the acquire methods. Correct usage of a semaphore is established by programming convention in the application. :param permits: (int), the number of permits to release (optional).
hazelcast/proxy/semaphore.py
release
Kilo59/hazelcast-python-client
python
def release(self, permits=1): '\n Releases one or given number of permits, increasing the number of available permits by one or that amount.\n\n There is no requirement that a thread that releases a permit must have acquired that permit by calling one of\n the acquire methods. Correct usage of a semaphore is established by programming convention in the application.\n\n :param permits: (int), the number of permits to release (optional).\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_release_codec, permits=permits)
def try_acquire(self, permits=1, timeout=0): '\n Tries to acquire one or the given number of permits, if they are available, and returns immediately, with the\n value ``true``, reducing the number of available permits by the given amount.\n\n If there are insufficient permits and a timeout is provided, the current thread becomes disabled for thread\n scheduling purposes and lies dormant until one of following happens:\n * some other thread invokes the release() method for this semaphore and the current thread is next to be\n assigned a permit, or\n * some other thread interrupts the current thread, or\n * the specified waiting time elapses.\n\n If there are insufficient permits and no timeout is provided, this method will return immediately with the value\n ``false`` and the number of available permits is unchanged.\n\n :param permits: (int), the number of permits to acquire (optional).\n :param timeout: (long), the maximum time in seconds to wait for the permit(s) (optional).\n :return: (bool), ``true`` if desired amount of permits was acquired, ``false`` otherwise.\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_try_acquire_codec, permits=permits, timeout=to_millis(timeout))
-9,216,870,520,673,875,000
Tries to acquire one or the given number of permits, if they are available, and returns immediately, with the value ``true``, reducing the number of available permits by the given amount. If there are insufficient permits and a timeout is provided, the current thread becomes disabled for thread scheduling purposes and lies dormant until one of following happens: * some other thread invokes the release() method for this semaphore and the current thread is next to be assigned a permit, or * some other thread interrupts the current thread, or * the specified waiting time elapses. If there are insufficient permits and no timeout is provided, this method will return immediately with the value ``false`` and the number of available permits is unchanged. :param permits: (int), the number of permits to acquire (optional). :param timeout: (long), the maximum time in seconds to wait for the permit(s) (optional). :return: (bool), ``true`` if desired amount of permits was acquired, ``false`` otherwise.
hazelcast/proxy/semaphore.py
try_acquire
Kilo59/hazelcast-python-client
python
def try_acquire(self, permits=1, timeout=0): '\n Tries to acquire one or the given number of permits, if they are available, and returns immediately, with the\n value ``true``, reducing the number of available permits by the given amount.\n\n If there are insufficient permits and a timeout is provided, the current thread becomes disabled for thread\n scheduling purposes and lies dormant until one of following happens:\n * some other thread invokes the release() method for this semaphore and the current thread is next to be\n assigned a permit, or\n * some other thread interrupts the current thread, or\n * the specified waiting time elapses.\n\n If there are insufficient permits and no timeout is provided, this method will return immediately with the value\n ``false`` and the number of available permits is unchanged.\n\n :param permits: (int), the number of permits to acquire (optional).\n :param timeout: (long), the maximum time in seconds to wait for the permit(s) (optional).\n :return: (bool), ``true`` if desired amount of permits was acquired, ``false`` otherwise.\n ' check_not_negative(permits, 'Permits cannot be negative!') return self._encode_invoke(semaphore_try_acquire_codec, permits=permits, timeout=to_millis(timeout))
def __init__(self, node='', rpcuser=None, rpcpassword=None, debug=False, data_refresh_time_seconds=900, **kwargs): 'Init crea\n\n :param str node: Node to connect to *(optional)*\n :param str rpcuser: RPC user *(optional)*\n :param str rpcpassword: RPC password *(optional)*\n :param bool nobroadcast: Do **not** broadcast a transaction!\n *(optional)*\n :param bool unsigned: Do **not** sign a transaction! *(optional)*\n :param bool debug: Enable Debugging *(optional)*\n :param array,dict,string keys: Predefine the wif keys to shortcut the\n wallet database *(optional)*\n :param array,dict,string wif: Predefine the wif keys to shortcut the\n wallet database *(optional)*\n :param bool offline: Boolean to prevent connecting to network (defaults\n to ``False``) *(optional)*\n :param int expiration: Delay in seconds until transactions are supposed\n to expire *(optional)* (default is 30)\n :param str blocking: Wait for broadcast transactions to be included\n in a block and return full transaction (can be "head" or\n "irreversible")\n :param bool bundle: Do not broadcast transactions right away, but allow\n to bundle operations *(optional)*\n :param bool use_condenser: Use the old condenser_api rpc protocol on nodes with version\n 0.19.4 or higher. The settings has no effect on nodes with version of 0.19.3 or lower.\n :param int num_retries: Set the maximum number of reconnects to the nodes before\n NumRetriesReached is raised. Disabled for -1. (default is -1)\n :param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5)\n :param int timeout: Timeout setting for https nodes (default is 60)\n :param bool use_sc2: When True, a creaconnect object is created. Can be used for broadcast\n posting op or creating hot_links (default is False)\n :param CreaConnect creaconnect: A CreaConnect object can be set manually, set use_sc2 to True\n\n ' self.rpc = None self.debug = debug self.offline = bool(kwargs.get('offline', False)) self.nobroadcast = bool(kwargs.get('nobroadcast', False)) self.unsigned = bool(kwargs.get('unsigned', False)) self.expiration = int(kwargs.get('expiration', 30)) self.bundle = bool(kwargs.get('bundle', False)) self.creaconnect = kwargs.get('creaconnect', None) self.use_sc2 = bool(kwargs.get('use_sc2', False)) self.blocking = kwargs.get('blocking', False) self.custom_chains = kwargs.get('custom_chains', {}) self.config = config if (not self.offline): self.connect(node=node, rpcuser=rpcuser, rpcpassword=rpcpassword, **kwargs) self.data = {'last_refresh': None, 'last_node': None, 'dynamic_global_properties': None, 'feed_history': None, 'get_feed_history': None, 'hardfork_properties': None, 'network': None, 'witness_schedule': None, 'config': None, 'reward_funds': None} self.data_refresh_time_seconds = data_refresh_time_seconds self.clear() self.wallet = Wallet(crea_instance=self, **kwargs) if ((self.creaconnect is not None) and (not isinstance(self.creaconnect, CreaConnect))): raise ValueError('creaconnect musst be CreaConnect object') if ((self.creaconnect is None) and self.use_sc2): self.creaconnect = CreaConnect(crea_instance=self, **kwargs) elif ((self.creaconnect is not None) and (not self.use_sc2)): self.use_sc2 = True
8,843,852,646,283,711,000
Init crea :param str node: Node to connect to *(optional)* :param str rpcuser: RPC user *(optional)* :param str rpcpassword: RPC password *(optional)* :param bool nobroadcast: Do **not** broadcast a transaction! *(optional)* :param bool unsigned: Do **not** sign a transaction! *(optional)* :param bool debug: Enable Debugging *(optional)* :param array,dict,string keys: Predefine the wif keys to shortcut the wallet database *(optional)* :param array,dict,string wif: Predefine the wif keys to shortcut the wallet database *(optional)* :param bool offline: Boolean to prevent connecting to network (defaults to ``False``) *(optional)* :param int expiration: Delay in seconds until transactions are supposed to expire *(optional)* (default is 30) :param str blocking: Wait for broadcast transactions to be included in a block and return full transaction (can be "head" or "irreversible") :param bool bundle: Do not broadcast transactions right away, but allow to bundle operations *(optional)* :param bool use_condenser: Use the old condenser_api rpc protocol on nodes with version 0.19.4 or higher. The settings has no effect on nodes with version of 0.19.3 or lower. :param int num_retries: Set the maximum number of reconnects to the nodes before NumRetriesReached is raised. Disabled for -1. (default is -1) :param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5) :param int timeout: Timeout setting for https nodes (default is 60) :param bool use_sc2: When True, a creaconnect object is created. Can be used for broadcast posting op or creating hot_links (default is False) :param CreaConnect creaconnect: A CreaConnect object can be set manually, set use_sc2 to True
crea/crea.py
__init__
creativechain/crea-python-lib
python
def __init__(self, node=, rpcuser=None, rpcpassword=None, debug=False, data_refresh_time_seconds=900, **kwargs): 'Init crea\n\n :param str node: Node to connect to *(optional)*\n :param str rpcuser: RPC user *(optional)*\n :param str rpcpassword: RPC password *(optional)*\n :param bool nobroadcast: Do **not** broadcast a transaction!\n *(optional)*\n :param bool unsigned: Do **not** sign a transaction! *(optional)*\n :param bool debug: Enable Debugging *(optional)*\n :param array,dict,string keys: Predefine the wif keys to shortcut the\n wallet database *(optional)*\n :param array,dict,string wif: Predefine the wif keys to shortcut the\n wallet database *(optional)*\n :param bool offline: Boolean to prevent connecting to network (defaults\n to ``False``) *(optional)*\n :param int expiration: Delay in seconds until transactions are supposed\n to expire *(optional)* (default is 30)\n :param str blocking: Wait for broadcast transactions to be included\n in a block and return full transaction (can be "head" or\n "irreversible")\n :param bool bundle: Do not broadcast transactions right away, but allow\n to bundle operations *(optional)*\n :param bool use_condenser: Use the old condenser_api rpc protocol on nodes with version\n 0.19.4 or higher. The settings has no effect on nodes with version of 0.19.3 or lower.\n :param int num_retries: Set the maximum number of reconnects to the nodes before\n NumRetriesReached is raised. Disabled for -1. (default is -1)\n :param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5)\n :param int timeout: Timeout setting for https nodes (default is 60)\n :param bool use_sc2: When True, a creaconnect object is created. Can be used for broadcast\n posting op or creating hot_links (default is False)\n :param CreaConnect creaconnect: A CreaConnect object can be set manually, set use_sc2 to True\n\n ' self.rpc = None self.debug = debug self.offline = bool(kwargs.get('offline', False)) self.nobroadcast = bool(kwargs.get('nobroadcast', False)) self.unsigned = bool(kwargs.get('unsigned', False)) self.expiration = int(kwargs.get('expiration', 30)) self.bundle = bool(kwargs.get('bundle', False)) self.creaconnect = kwargs.get('creaconnect', None) self.use_sc2 = bool(kwargs.get('use_sc2', False)) self.blocking = kwargs.get('blocking', False) self.custom_chains = kwargs.get('custom_chains', {}) self.config = config if (not self.offline): self.connect(node=node, rpcuser=rpcuser, rpcpassword=rpcpassword, **kwargs) self.data = {'last_refresh': None, 'last_node': None, 'dynamic_global_properties': None, 'feed_history': None, 'get_feed_history': None, 'hardfork_properties': None, 'network': None, 'witness_schedule': None, 'config': None, 'reward_funds': None} self.data_refresh_time_seconds = data_refresh_time_seconds self.clear() self.wallet = Wallet(crea_instance=self, **kwargs) if ((self.creaconnect is not None) and (not isinstance(self.creaconnect, CreaConnect))): raise ValueError('creaconnect musst be CreaConnect object') if ((self.creaconnect is None) and self.use_sc2): self.creaconnect = CreaConnect(crea_instance=self, **kwargs) elif ((self.creaconnect is not None) and (not self.use_sc2)): self.use_sc2 = True
def connect(self, node='', rpcuser='', rpcpassword='', **kwargs): ' Connect to Crea network (internal use only)\n ' if (not node): node = self.get_default_nodes() if (not bool(node)): raise ValueError('A Crea node needs to be provided!') if ((not rpcuser) and ('rpcuser' in config)): rpcuser = config['rpcuser'] if ((not rpcpassword) and ('rpcpassword' in config)): rpcpassword = config['rpcpassword'] self.rpc = CreaNodeRPC(node, rpcuser, rpcpassword, **kwargs)
3,630,909,131,526,738,400
Connect to Crea network (internal use only)
crea/crea.py
connect
creativechain/crea-python-lib
python
def connect(self, node=, rpcuser=, rpcpassword=, **kwargs): ' \n ' if (not node): node = self.get_default_nodes() if (not bool(node)): raise ValueError('A Crea node needs to be provided!') if ((not rpcuser) and ('rpcuser' in config)): rpcuser = config['rpcuser'] if ((not rpcpassword) and ('rpcpassword' in config)): rpcpassword = config['rpcpassword'] self.rpc = CreaNodeRPC(node, rpcuser, rpcpassword, **kwargs)
def is_connected(self): 'Returns if rpc is connected' return (self.rpc is not None)
-5,560,205,291,248,361,000
Returns if rpc is connected
crea/crea.py
is_connected
creativechain/crea-python-lib
python
def is_connected(self): return (self.rpc is not None)