query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test case for remove_trusted_project3
def test_remove_trusted_project3(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_remove_project(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_remove_submission_service_from_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_remove_role_from_project_member(self):\n pass", "def test_remove(self):\n pass", "def test_replace_project(self):\n pass", "def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects", "def test_networking_project_network_delete(self):\n pass", "def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)", "def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()", "def test_remove_user(self):\n pass", "def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)" ]
[ "0.9278655", "0.9246258", "0.9157412", "0.9134749", "0.91201967", "0.91039425", "0.9036883", "0.77507776", "0.7569032", "0.7497728", "0.7471733", "0.73709375", "0.7319566", "0.72512436", "0.724262", "0.720237", "0.69951224", "0.67084503", "0.6693837", "0.6693837", "0.6288508", "0.60433125", "0.59939176", "0.59599125", "0.5884533", "0.5877229", "0.5867639", "0.5832234", "0.5799917", "0.5791616" ]
0.9546932
0
Test case for remove_trusted_project4
def test_remove_trusted_project4(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_remove_project(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_remove_submission_service_from_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_remove_role_from_project_member(self):\n pass", "def test_remove(self):\n pass", "def test_remove_user(self):\n pass", "def test_replace_project(self):\n pass", "def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)", "def test_delete_deployment_run(self):\n pass", "def test_verify_remove(self):\n self._verify([self.applied_commands['remove']])", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def test_networking_project_network_delete(self):\n pass", "def test_remove_projects_from_group(client):\n group = client.remove_projects_from_group(\n TEAM_ID,\n GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"]\n )\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" not in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" not in group.projects" ]
[ "0.93665326", "0.9304083", "0.9278743", "0.9268719", "0.92684174", "0.91731745", "0.91634226", "0.76994747", "0.75663847", "0.7544348", "0.7483998", "0.7455191", "0.74047047", "0.7276416", "0.7209261", "0.7208959", "0.70610076", "0.6924763", "0.6703152", "0.6703152", "0.63288295", "0.6272401", "0.6078622", "0.605246", "0.60258996", "0.5959381", "0.59168875", "0.58945554", "0.589323", "0.58403134" ]
0.9543273
0
Test case for remove_trusted_project5
def test_remove_trusted_project5(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_remove_project(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_remove_submission_service_from_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_remove(self):\n pass", "def test_remove_role_from_project_member(self):\n pass", "def test_remove_user(self):\n pass", "def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)", "def test_replace_project(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_config_remove(self):\n test_name = sys._getframe().f_code.co_name\n self.env.config.set('project', 'name', 'Test project')\n rv, output = self._execute('config remove project name')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)\n self.assertEqual('My Project', self.env.config.get('project', 'name'))", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def test_verify_remove(self):\n self._verify([self.applied_commands['remove']])", "def test_unassign_managing_team(self):\n pass" ]
[ "0.9409422", "0.9379898", "0.93444204", "0.9290754", "0.9269168", "0.9133459", "0.9105442", "0.77310395", "0.76480186", "0.75851285", "0.7489856", "0.7451729", "0.74309206", "0.7246543", "0.7209722", "0.7162757", "0.7043448", "0.6806125", "0.64220196", "0.64220196", "0.6223145", "0.6198417", "0.60641617", "0.5939832", "0.58838886", "0.58602417", "0.584046", "0.58105505", "0.58072495", "0.5767448" ]
0.95118666
0
Test case for remove_trusted_project6
def test_remove_trusted_project6(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_trusted_project7(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_remove_project(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_submission_service_from_project(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_remove_role_from_project_member(self):\n pass", "def test_remove(self):\n pass", "def test_remove_user(self):\n pass", "def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)", "def test_replace_project(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_verify_remove(self):\n self._verify([self.applied_commands['remove']])", "def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))", "def test_teams_remove_user_from_team_v2(self):\n pass", "def test_ticket_type_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type remove task')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)" ]
[ "0.939581", "0.9384906", "0.93368316", "0.93177885", "0.9211896", "0.9105562", "0.9033085", "0.77461016", "0.7496969", "0.749646", "0.7483712", "0.7463693", "0.7402155", "0.7266633", "0.7123273", "0.71190715", "0.69215924", "0.68984663", "0.6431791", "0.6431791", "0.6286845", "0.62804323", "0.618965", "0.61681336", "0.6015552", "0.5909519", "0.5886068", "0.58738875", "0.5868697", "0.58367646" ]
0.95322406
0
Test case for remove_trusted_project7
def test_remove_trusted_project7(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_trusted_project6(self):\n pass", "def test_remove_trusted_project4(self):\n pass", "def test_remove_trusted_project3(self):\n pass", "def test_remove_trusted_project5(self):\n pass", "def test_remove_trusted_project1(self):\n pass", "def test_remove_trusted_project(self):\n pass", "def test_remove_trusted_project2(self):\n pass", "def test_add_trusted_project6(self):\n pass", "def test_add_trusted_project7(self):\n pass", "def test_remove_project(self):\n pass", "def test_add_trusted_project4(self):\n pass", "def test_add_trusted_project5(self):\n pass", "def test_add_trusted_project3(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_add_trusted_project1(self):\n pass", "def test_add_trusted_project2(self):\n pass", "def test_remove_submission_service_from_project(self):\n pass", "def test_add_trusted_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_remove(self):\n pass", "def test_remove_role_from_project_member(self):\n pass", "def test_remove_user(self):\n pass", "def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)", "def test_replace_project(self):\n pass", "def test_verify_remove(self):\n self._verify([self.applied_commands['remove']])", "def test_delete_deployment_run(self):\n pass", "def test_ticket_type_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type remove task')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)" ]
[ "0.9398824", "0.9376049", "0.93200684", "0.9272902", "0.92145514", "0.91305655", "0.9100401", "0.76087683", "0.75870156", "0.75179636", "0.74906707", "0.74203193", "0.7412438", "0.7254135", "0.71347374", "0.71240234", "0.7028174", "0.7005196", "0.6525979", "0.6525979", "0.62860274", "0.6280639", "0.61013794", "0.60980105", "0.59754425", "0.5959469", "0.5958455", "0.58785707", "0.5838588", "0.58384806" ]
0.94799876
0
Test case for remove_virt_realm
def test_remove_virt_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deallocate_virt_realm(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_disable_virt_realm_remote_access(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_delete_namespaced_role(self):\n pass", "def test_delete_hyperflex_sys_config_policy(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_delete_hyperflex_local_credential_policy(self):\n pass", "def test_you_must_be_realm_admin(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n\n other_realm = do_create_realm(string_id=\"other\", name=\"other\")\n stream = self.make_stream(\"other_realm_stream\", realm=other_realm)\n\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")\n\n # Even becoming a realm admin doesn't help us for an out-of-realm\n # stream.\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")", "def test_vault_delete_vault_section(self):\n pass", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_delete_hyperflex_ucsm_config_policy(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_delete_namespaced_role_binding(self):\n pass", "def test_delete_hyperflex_app_catalog(self):\n pass", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_delete_hyperflex_cluster_profile(self):\n pass", "def test_delete_hyperflex_vcenter_config_policy(self):\n pass", "def test_ipam_vrfs_delete(self):\n pass", "def test_delete_hyperflex_proxy_setting_policy(self):\n pass", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def test_delete_admin_from_org(self):\n pass" ]
[ "0.8415829", "0.76790386", "0.6977621", "0.6968231", "0.6649399", "0.6488709", "0.6478052", "0.64243895", "0.63384175", "0.63369054", "0.60204786", "0.596914", "0.5958203", "0.5943352", "0.5852323", "0.58489937", "0.58290774", "0.58160895", "0.5814261", "0.5807215", "0.5796356", "0.5795389", "0.5791214", "0.5782589", "0.57681966", "0.57617724", "0.5752949", "0.5739832", "0.5733877", "0.57292193" ]
0.95876104
0
Test case for replace_software_asset_for_software_component
def test_replace_software_asset_for_software_component(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_itar_restrict_software_asset(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_create_software_bundle_from_system_module(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_get_software(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def test_itar_restrict_asset(self):\n pass", "def test_delete_system_asset(self):\n pass", "def test_set_asset_license_connected(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n story.save()\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def test_itar_restrict_test_asset(self):\n pass" ]
[ "0.81310123", "0.8055666", "0.804227", "0.7302746", "0.72951823", "0.7087468", "0.69891983", "0.69518346", "0.6941114", "0.69226784", "0.6725317", "0.66894275", "0.6558504", "0.65035266", "0.6500949", "0.6385505", "0.6357382", "0.63551706", "0.6257513", "0.6034613", "0.6026534", "0.60089993", "0.59803915", "0.59367764", "0.5833394", "0.58222324", "0.5805537", "0.57651585", "0.56549853", "0.55936086" ]
0.94514245
0
Test case for request_project_invitation
def test_request_project_invitation(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_meeting_invitation(self):\n pass", "def test_new_invitation(self):\n (approval_user_id,\n joining_user_id,\n conversation_id,\n _) = self.setup_invites()\n uri = '/status/{}/{}'.format(conversation_id, approval_user_id)\n self.set_user_cookie(approval_user_id, conversation_id)\n self.set_session_cookie(approval_user_id, conversation_id)\n resp = self.client.post(\n uri, data={'public_key':'', 'last_message_seen_id': None})\n resp_json = json.loads(resp.data)\n\n invitations = resp_json['invitations']\n self.assertEqual(len(invitations), 1)\n self.assertEqual(invitations[0]['user_id'], joining_user_id)", "def test_create(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 1\n )\n invite = ProjectInvite.objects.first()\n self.assertEqual(invite.email, INVITE_USER_EMAIL)\n self.assertEqual(invite.role, self.role_contributor)\n self.assertEqual(invite.issuer, self.user)\n self.assertEqual(invite.message, INVITE_MESSAGE)\n\n expected = {\n 'email': INVITE_USER_EMAIL,\n 'project': str(self.project.sodar_uuid),\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'issuer': self.get_serialized_user(self.user),\n 'date_created': self.get_drf_datetime(invite.date_created),\n 'date_expire': self.get_drf_datetime(invite.date_expire),\n 'message': invite.message,\n 'sodar_uuid': str(invite.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)\n self.assertEqual(len(mail.outbox), 1)", "def test_request_membership_form_with_an_invalid_project_id(self):\n pass", "def test_invite_user_to_project_email_not_username(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"invite_user_to_project\")\n self.assertEqual(mail.outbox[0].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(fake_clients.identity_cache[\"new_users\"][0].name, \"new_user\")", "def test_user_invite_cant_edit_users(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_create_project_request(self):\n pass", "def test_new_user_no_project(self):\n setup_identity_cache()\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": \"test_project_id\",\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_ack_invitation(self):\n (approver_user_id,\n joining_user_id,\n _,\n invite_id) = self.setup_invites()\n uri = '/invite_ack/{}/{}'.format(approver_user_id, joining_user_id)\n rsp = self.client.post(uri, data={'approves': True})\n rsp_json = json.loads(rsp.data)\n\n invite = model.Invitation.query.get(invite_id)\n self.assertEqual(rsp_json['success'], True)\n self.assertEqual(rsp.status_code, 200)\n self.assertEqual(invite.invite_id, invite_id)", "def test_create_owner(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_OWNER,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def test_new_user_not_my_project(self):\n setup_identity_cache()\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": \"test_project_id\",\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_teams_invite_member(self):\n pass", "def test_form_when_a_user_has_a_request_awaiting_authorisation(self):\n self.approve_project(self.project)\n\n # Create a project user membership.\n ProjectUserMembership.objects.create(\n project=self.project,\n user=self.project_applicant,\n status=ProjectUserMembership.AWAITING_AUTHORISATION,\n date_joined=datetime.datetime.now(),\n date_left=datetime.datetime.now() + datetime.timedelta(days=10),\n )\n\n # Ensure the project user membership status is currently set to awaiting authorisation.\n membership = ProjectUserMembership.objects.get(\n user=self.project_applicant,\n project=self.project,\n )\n self.assertTrue(membership.is_awaiting_authorisation())\n\n # A request to create a project user membership should be rejected.\n form = ProjectUserMembershipCreationForm(\n initial={\n 'user': self.project_applicant,\n },\n data={\n 'project_code': self.project_code,\n },\n )\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['project_code'],\n ['A membership request for this project already exists.'],\n )", "def test_create_invitation(session):\n invitation = factory_invitation_model(session=session, status='PENDING')\n session.add(invitation)\n session.commit()\n assert invitation.id is not None", "def test_post_request_for_team(self):\n\n usual_user = UserFactory(\n username='Usual User',\n email='[email protected]',\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n data = {'team': self.team.id}\n response = self.client.post(reverse('api:user-team-requests-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n notification = UserNotification.objects.last()\n notification_message = UserNotification.get_notification_text(\n UserNotification.TEAM_REQUEST_WAS_SENT_WITH_DEACTIVATED_EMAIL, username=usual_user.username\n )\n self.assertEqual(notification.message, notification_message)", "def test_create_invalid_email(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': 'NOT_AN_EMAIL!',\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def test_get_invitations(session, auth_mock): # pylint:disable=unused-argument\n with patch.object(InvitationService, 'send_invitation', return_value=None):\n user = factory_user_model()\n org = OrgService.create_org(TestOrgInfo.org1, user.id)\n\n invitation_info = factory_invitation(org.as_dict()['id'])\n\n invitation = InvitationService.create_invitation(invitation_info, UserService(user), {}, '')\n\n response = org.get_invitations()\n assert response\n assert len(response['invitations']) == 1\n assert response['invitations'][0]['recipientEmail'] == invitation.as_dict()['recipientEmail']", "def test_email_additional_addresses(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignments = [\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n ),\n fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"project_admin\",\n user={\"id\": user.id},\n ),\n ]\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=assignments\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\"email\": \"[email protected]\", \"roles\": [\"member\"]}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(set(mail.outbox[0].to), set([\"[email protected]\"]))\n self.assertEqual(mail.outbox[0].subject, \"invite_user_to_project_additional\")\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[1].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_user_invite_cant_edit_users_existing_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})", "def test_find_pending_invitations_by_org(session): # pylint:disable=unused-argument\n invitation = factory_invitation_model(session=session, status='PENDING')\n session.add(invitation)\n session.commit()\n\n retrieved_invitation = InvitationModel.find_pending_invitations_by_org(invitation.membership[0].org_id)\n assert len(retrieved_invitation) == 1\n assert retrieved_invitation[0].recipient_email == invitation.recipient_email", "def test_registration_request_tag(self): \n content = \"register here: <registration> {% registration %} </registration>\"\n \n registrationpage = create_page_in_admin(self.testproject,\"registrationpage\",content)\n \n # when you don't have to be approved, just following the link rendered by registration should do\n # register you\n self.testproject.require_participant_review = False\n self.testproject.save()\n \n response = self._test_page_can_be_viewed(self.signedup_user,registrationpage)\n self.assertTextBetweenTags(response.content,\"registration\",\"Participate in\",\"registering without review\")\n \n \n # when participant review is on, all admins will receive an email of a \n # new participant request, which they can approve or reject. \n self.testproject.require_participant_review = True\n self.testproject.save()\n \n # have a user request registration \n response = self._test_page_can_be_viewed(self.signedup_user,registrationpage)\n self.assertTextBetweenTags(response.content,\n \"registration\",\n \"Request to participate in\",\n \"registering with participation review\")\n \n registration_anchor = find_text_between('<registration>','</registration>',response.content)\n registration_link = extract_href_from_anchor(registration_anchor)\n \n response = self._test_url_can_be_viewed(self.signedup_user,registration_link)\n \n # user should see some useful info after requestion registration \n self.assertText(response.content,\n \"A participation request has been sent\",\n \"Checking message after user has requested participation\")\n # and admins should receive an email \n \n request_mail = mail.outbox[-1]\n \n admins = User.objects.filter(groups__name=self.testproject.admin_group_name())\n \n self.assertEmail(request_mail,{\"to\":admins[0].email,\n \"subject\":\"New participation request\",\n \"body\":\"has just requested to participate\" \n })\n \n # link in this email should lead to admin overview of requests\n link_in_email = find_text_between('href=\"','\">here',self.get_mail_html_part(request_mail))\n #TODO: create a function to check all links in the email.\n \n reg_request = RegistrationRequest.objects.filter(project=self.testproject)\n self.assertTrue(reg_request != [],\n \"User {0} clicked registration link, but no registrationRequest\\\n object seems to have been created for project '{1}'\".format(self.signedup_user,\n self.testproject))\n \n \n factory = RequestFactory()\n request = factory.get(\"/\") #just fake a request, we only need to add user\n request.user = self.testproject.get_admins()[0]\n \n self.apply_standard_middleware(request)\n \n modeladmin = RegistrationRequestAdmin(RegistrationRequest,admin.site)\n modeladmin.accept(request,reg_request)\n \n \n # request.status = RegistrationRequest.ACCEPTED\n # request.save() \n # after acceptance, user should receive notification email\n acceptance_mail = mail.outbox[-1]\n \n self.assertEmail(acceptance_mail,{\"to\":self.signedup_user.email,\n \"subject\":\"participation request accepted\",\n \"body\":\"has just accepted your request\"\n })\n \n # after acceptance, user should be able to access restricted pages.\n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n \n self._test_page_can_be_viewed(self.signedup_user,registeredonlypage)\n \n # just to test, a random user should not be able to see this page\n self._test_page_can_not_be_viewed(self._create_random_user(\"not_registered\"),registeredonlypage)\n \n # check if admin can load the view to add a registration requests\n admin_url = reverse('admin:comicmodels_registrationrequest_add')\n \n self._test_url_can_be_viewed(self.projectadmin,admin_url)\n #test whether the participationrequest is actually in the list in the admin\n projectadmin_list = reverse('admin:comicmodels_registrationrequest_changelist',\n current_app=self.testproject.get_project_admin_instance_name())\n \n # check an admin can see the registration request that was just made by signup_user.\n result = self._test_url_can_be_viewed(self.projectadmin,projectadmin_list)\n name = self.signedup_user.username\n \n self.assertTrue(name in result.rendered_content,\"An admin user in projectadmin should be able to see the participation\"\n \"request that was just made, but could not find the requesting users name '{}' anywhere in the content of\"\n \" page {}\". format(name,projectadmin_list)) \n \n #self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)", "def test_get_open_requests_by_team(self):\n pass", "def test_new_project_existing_project_new_user(self):\n setup_identity_cache()\n\n # create signup#1 - project1 with user 1\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # Create signup#2 - project1 with user 2\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n # approve signup #1\n new_task1 = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task1.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n # Attempt to approve signup #2\n new_task2 = Task.objects.all()[1]\n url = \"/v1/tasks/\" + new_task2.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_additional_emails_role_no_email(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\"email\": \"[email protected]\", \"roles\": [\"member\"]}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 1)\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[0].to[0], \"[email protected]\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True, \"password\": \"1234\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_existing_user(self):\n user = self.make_user('new_user')\n user.email = INVITE_USER_EMAIL\n user.save()\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def test_invite(self):\n self.client.invite(\"foo\", \"#bar\")\n self.assertEqual(self.client.lines, [\"INVITE foo #bar\"])", "def test_create_delegate(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_DELEGATE,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 1\n )\n invite = ProjectInvite.objects.first()\n self.assertEqual(invite.role, self.role_delegate)\n self.assertEqual(len(mail.outbox), 1)", "def test_inviteToEvent(self):\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = '2015-08-21T00:00:00.000Z'\n ))\n # Create sample itinerary for naina for the event day\n self.json_post('/createItinerary/naina', dict(\n name = 'New Day1',\n date = '2015-08-21T00:00:00.000Z'\n ))\n # Create sample itinerary for bugi for the event day\n self.json_post('/createItinerary/bugi', dict(\n name = 'New Day',\n date = '2015-08-21T00:00:00.000Z'\n ))\n # Create sample itinerary for amy for the event day\n self.json_post('/createItinerary/amy', dict(\n name = 'New Day',\n date = '2015-08-21T00:00:00.000Z'\n ))\n\n event = dict(start = '2015-08-21T11:23:00.000Z',\n end = '2015-08-21T11:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n rv = self.json_post('/createEvent/alex', event)\n uid = str('alex_' + event['start'] + event['end'])\n assert uid in str(rv.data)\n\n rv = self.json_post('/inviteToEvent/bbbb', event)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = \"invalidid\",\n invited = 'naina'\n ))\n print(rv.data)\n assert \"Event not found\" in str(rv.data)\n\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'bbbbb'\n ))\n assert \"Shared user does not exist\" in str(rv.data)\n\n # Share event with naina\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'naina'\n ))\n assert uid in str(rv.data)\n\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'naina'\n ))\n assert \"Already sent invitation\" in str(rv.data)\n\n rv = self.json_post('/createEvent/naina', dict(\n uid = uid\n ))\n assert uid in str(rv.data)\n\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'naina'\n ))\n assert \"Already shared with user\" in str(rv.data)\n\n # Share event with amy\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'amy'\n ))\n assert uid in str(rv.data)\n\n # Share event with amy\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'bugi'\n ))\n assert uid in str(rv.data)\n\n # Share event with amy\n rv = self.json_post('/inviteToEvent/alex', dict(\n uid = uid,\n invited = 'amy'\n ))\n assert \"Already sent invitation\" in str(rv.data)\n\n rv = self.json_post('/createEvent/amy', dict(\n uid = uid\n ))\n print(rv.data)\n assert uid in str(rv.data)", "def test_find_invitations_by_org(session): # pylint:disable=unused-argument\n invitation = factory_invitation_model(session=session, status='PENDING')\n session.add(invitation)\n session.commit()\n\n found_invitations = InvitationModel.find_invitations_by_org(invitation.membership[0].org_id)\n assert found_invitations\n assert len(found_invitations) == 1\n assert found_invitations[0].membership[0].org_id == invitation.membership[0].org_id\n assert invitation.invitation_status_code == 'PENDING'", "def request_item(request, locale=None):\n user = request.user\n\n # Request projects to be enabled for team\n if locale:\n slug_list = request.POST.getlist(\"projects[]\")\n locale = get_object_or_404(Locale, code=locale)\n\n # Validate projects\n project_list = (\n Project.objects.visible()\n .visible_for(user)\n .filter(slug__in=slug_list, can_be_requested=True)\n )\n if not project_list:\n return HttpResponseBadRequest(\n \"Bad Request: Non-existent projects specified\"\n )\n\n projects = \"\".join(f\"- {p.name} ({p.slug})\\n\" for p in project_list)\n\n mail_subject = \"Project request for {locale} ({code})\".format(\n locale=locale.name, code=locale.code\n )\n\n payload = {\n \"locale\": locale.name,\n \"code\": locale.code,\n \"projects\": projects,\n \"user\": user.display_name_and_email,\n \"user_role\": user.locale_role(locale),\n \"user_url\": request.build_absolute_uri(user.profile_url),\n }\n\n # Request new teams to be enabled\n else:\n form = LocaleRequestForm(request.POST)\n if not form.is_valid():\n if form.has_error(\"code\", \"unique\"):\n return HttpResponse(\"This team already exists.\", status=409)\n return HttpResponseBadRequest(form.errors.as_json())\n\n code = form.cleaned_data[\"code\"]\n name = form.cleaned_data[\"name\"]\n\n mail_subject = \"New team request: {locale} ({code})\".format(\n locale=name, code=code\n )\n\n payload = {\n \"locale\": name,\n \"code\": code,\n \"user\": user.display_name_and_email,\n \"user_role\": user.role(),\n \"user_url\": request.build_absolute_uri(user.profile_url),\n }\n\n if settings.PROJECT_MANAGERS[0] != \"\":\n template = get_template(\"teams/email_request_item.jinja\")\n mail_body = template.render(payload)\n cc = {user.contact_email}\n if locale:\n cc.update(\n set(locale.managers_group.user_set.values_list(\"email\", flat=True))\n )\n\n EmailMessage(\n subject=mail_subject,\n body=mail_body,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=settings.PROJECT_MANAGERS,\n cc=cc,\n reply_to=[user.contact_email],\n ).send()\n else:\n raise ImproperlyConfigured(\n \"PROJECT_MANAGERS not defined in settings. Email recipient unknown.\"\n )\n\n return HttpResponse(\"ok\")" ]
[ "0.72965556", "0.697636", "0.6641643", "0.6631371", "0.6587696", "0.6527494", "0.65196896", "0.65032", "0.64932173", "0.649059", "0.6410881", "0.63772416", "0.63487726", "0.6341639", "0.6314764", "0.6272159", "0.62623215", "0.6262167", "0.62577415", "0.61963934", "0.6161806", "0.6161134", "0.61553997", "0.613516", "0.613481", "0.6131618", "0.6120872", "0.6098145", "0.6096003", "0.60931563" ]
0.9356486
0
Test case for retest_deployment_run
def test_retest_deployment_run(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_relaunch_deployment_run(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_redeploy(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_update_deployment(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_get_deployment_runs(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_create_deployment(self):\n pass", "def test_redeploy_container_asset(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_download_deployment_run_test_report(self):\n pass", "def test_redeploy_same_app():\n\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", [{\"name\": \"d1\"}, {\"name\": \"d2\"}])\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n # Deploy the same app with different deployments\n unused_deployments = app_state_manager.deploy_application(\n \"test_app\", [{\"name\": \"d2\"}, {\"name\": \"d3\"}]\n )\n assert unused_deployments == [\"d1\"]\n\n app_state_manager.deployment_state_manager.add_deployment_status(\n DeploymentStatusInfo(\"d3\", DeploymentStatus.UPDATING)\n )\n assert app_state_manager._application_states[\"test_app\"].deployments_to_delete == {\n \"d1\"\n }\n\n # After updating, the deployment should be deleted successfully, and\n # deployments_to_delete should be empty\n app_state_manager.deployment_state_manager.delete_deployment(\"d1\")\n app_state_manager.update()\n assert (\n app_state_manager._application_states[\"test_app\"].deployments_to_delete == set()\n )", "def test_remove_deployment(self):\n del_deployment, mod_del_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='del_dep')\n\n undel_deployment, mod_undel_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='undel_dep')\n\n blu_id = BLUEPRINT_ID + '-del-1'\n self.client.blueprints.upload(mod_del_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n blu_id = BLUEPRINT_ID + '-undel-1'\n self.client.blueprints.upload(mod_undel_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(undel_deployment.id, blu_id)\n\n mod_del_dep_bp2 = self._get_blueprint_path(\n os.path.join('remove_deployment', 'modification2'),\n 'remove_deployment_modification2.yaml')\n blu_id = BLUEPRINT_ID + '-del-2'\n self.client.blueprints.upload(mod_del_dep_bp2, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n\n self.assertEqual(len(deployment_update_list.items), 2)\n\n # Delete deployment and assert deployment updates were removed\n uninstall = self.client.executions.start(\n del_deployment.id, 'uninstall')\n self.wait_for_execution_to_end(uninstall)\n\n self.client.deployments.delete(del_deployment.id)\n wait_for_deployment_deletion_to_complete(\n del_deployment.id, self.client\n )\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list.items), 0)\n\n # Assert no other deployment updates were deleted\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=undel_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list), 1)", "def test_redeploy_edges(self):\n pass", "def test_clone_deployment(self):\n pass", "def test_config_deploy_app(fail_deploy):\n signal = SignalActor.remote()\n\n @ray.remote\n def task():\n ray.get(signal.wait.remote())\n if fail_deploy:\n raise Exception(\"fail!\")\n\n object_ref = task.remote()\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.create_application_state(\"test_app\", object_ref)\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n signal.send.remote()\n time.sleep(2)\n if fail_deploy:\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n else:\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0)\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING", "def test_get_deployment_resources(self):\n pass", "def test_create_namespaced_deployment_config_rollback(self):\n pass", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def test_create_namespaced_deployment_config_rollback_rollback(self):\n pass" ]
[ "0.88667893", "0.81965613", "0.8175889", "0.8149607", "0.8062848", "0.80277836", "0.7990167", "0.79841083", "0.7932639", "0.79243463", "0.76463956", "0.731711", "0.73113394", "0.7272583", "0.7163515", "0.7129374", "0.7129374", "0.71230775", "0.7115689", "0.6957425", "0.6743325", "0.66710806", "0.66006213", "0.6580712", "0.6557025", "0.65299773", "0.6528198", "0.64785814", "0.64785814", "0.6448306" ]
0.94023836
0
Test case for retrieve_system_asset
def test_retrieve_system_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_system_asset(self):\n pass", "def test_get_test_asset(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_list_system_assets(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_delete_system_asset(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_get_test_assets(self):\n pass", "def _get(self) -> json_api.generic.Metadata:\n api_endpoint = ApiEndpoints.assets.fields\n return api_endpoint.perform_request(http=self.auth.http, asset_type=self.parent.ASSET_TYPE)", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_get_container_assets(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def test_update_software_asset_content(self):\n pass", "def test_import_test_asset(self):\n pass", "def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)", "def test_update_asset(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_file_asset(self):\n proto = struct_pb2.Struct()\n \n # pylint: disable=no-member\n subproto = proto.get_or_create_struct(\"asset\")\n subproto[rpc._special_sig_key] = rpc._special_asset_sig\n subproto[\"path\"] = \"foo.txt\"\n deserialized = rpc.deserialize_resource_props(proto)\n self.assertIsInstance(deserialized[\"asset\"], FileAsset)\n self.assertEqual(\"foo.txt\", deserialized[\"asset\"].path)", "def get_game_asset(collection_name, return_type=flask.Response):\n\n model = importlib.import_module('app.models.%s' % collection_name)\n A = model.Assets()\n\n if return_type == dict:\n return A.assets\n elif return_type == object:\n return A\n\n return A.request_response()", "def test_update_asset_content(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def get_asset(location, filename):\r\n return contentstore().find(Transcript.asset_location(location, filename))", "def test_aws_service_api_image_get(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def test_unlocked_asset(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_unlocked)\r\n self.assertEqual(resp.status_code, 200) # pylint: disable=E1103\r", "def api_asset_get():\n names = request.args.getlist(\"name\")\n\n result = []\n for name in names:\n asset = app.bank.get(name)\n if asset:\n result.append(asset)\n\n return jsonify(sorted(result)), 200", "def test_aws_service_api_volume_get(self):\n pass", "def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)" ]
[ "0.77519137", "0.7707771", "0.7535579", "0.7114247", "0.70973", "0.6749351", "0.6716273", "0.6668374", "0.6478854", "0.64206934", "0.63568413", "0.6275273", "0.6256317", "0.6254281", "0.6108569", "0.597755", "0.5956065", "0.5931337", "0.5883109", "0.58797544", "0.58766145", "0.5876146", "0.5870578", "0.5869728", "0.5837417", "0.57991517", "0.5731148", "0.5690435", "0.5677885", "0.5667539" ]
0.9224318
0
Test case for retrieve_team
def test_retrieve_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_get_team_v1(self):\n pass", "def test_teams_get_teams_v2(self):\n pass", "def test_basketballteams_get(self):\n pass", "def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_teams_get_teams_v1(self):\n pass", "def get_teams():", "def test_teams_read(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_teams_list(self):\n pass", "def test_teams_get_users_teams_v2(self):\n pass", "def test_get_teams(self):\n owner2 = AnotherUserFactory(email_confirmed=True)\n owner3 = AnotherUserFactory(username='team owner 3', email='[email protected]', email_confirmed=True,)\n TeamFactory(owner=owner2, name='second team')\n TeamFactory(owner=owner3, name='third team')\n\n usual_user = UserFactory(\n username='usualuser',\n email='[email protected]',\n email_confirmed=True,\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n response = self.client.get(reverse('api:teams-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 3)", "def get_people(team):", "def test_basketballteams_id_get(self):\n pass", "def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_get_for_team(self):\n user, team_dict = self.test_create_team_creates_survey()\n response = self.testapp.get(\n '/api/teams/{}/survey'.format(team_dict['uid']),\n headers=self.login_headers(user),\n )\n survey_dict = json.loads(response.body)\n self.assertTrue(survey_dict['uid'].startswith('Survey'))", "def test_create_team(self):\n pass", "def test_get_team_history(self):\n pass", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def test_get_one_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='[email protected]',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], teammate.uid)", "def test_user_get_team_page():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/team/2')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_teams_get_users_teams_v1(self):\n pass", "def test_success(self, data_flow_api_client):\n response = data_flow_api_client.get(self.view_url)\n\n assert response.status_code == status.HTTP_200_OK\n\n response_team = response.json()['results'][0]\n team = Team.objects.get(id=response_team['id'])\n\n assert response_team == get_expected_data_from_team(team)", "def test_update_team(self):\n pass", "def test_assign_managing_team(self):\n pass", "def test_get_open_requests_by_team(self):\n pass", "def test_teams_create(self):\n pass", "def test_team_view(self):\n with self.app.app_context():\n u = user(save=True)\n t = team(users=[u], save=True)\n\n response = self.client.get('/team/%s' % t.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/team/not-a-real-team')\n eq_(response.status_code, 404)" ]
[ "0.8301532", "0.8301532", "0.81889266", "0.8013086", "0.79963243", "0.79211015", "0.7728024", "0.7666184", "0.7615079", "0.75721085", "0.7569918", "0.7486092", "0.74240667", "0.7385046", "0.73560196", "0.7339386", "0.7335217", "0.73301536", "0.7326118", "0.72554547", "0.7216644", "0.7214794", "0.7128264", "0.7054458", "0.69882953", "0.695955", "0.69098467", "0.6905515", "0.68889505", "0.68867636" ]
0.92021567
0
Test case for retrieve_template_registration
def test_retrieve_template_registration(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_template_registrations(self):\n pass", "def test_update_template_registration(self):\n pass", "def test_register_template(self):\n pass", "def test_share_template_registration(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_get_subscription_template(self):\n pass", "def test_get_device_template(self):\n pass", "def test_get_template_success(self):\n template_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_1\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_get_subscription_templates(self):\n pass", "def test_create_template_subsciption(self):\n pass", "def test_unregister_template(self):\n pass", "def test_registration_get(self):\n\n resp = self.client.get(reverse(self.SIGNUP_URL))\n self.assertEqual(200, resp.status_code)\n self.assertTemplateUsed(resp, self.DEFAULT_SIGNUP_PAGE)\n self.failUnless(isinstance(resp.context['user_form'], UserForm))", "def test_get_device_templates(self):\n pass", "def test_unshare_template_registration(self):\n pass", "def test_registration_view_get(self):\n response = self.client.get(reverse('rdef_web:user_register'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response,\n 'rdef_web/registration.html')\n self.failUnless(isinstance(response.context['user_form'],\n forms.UserForm))", "def test_get_activity_template(self):\n pass", "def test_register_page_is_rendered(self):\n url = \"/regiter/\"\n response = self.client.get('/register/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"register_user.html\")", "def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')", "def test_template_feedback(self):\r\n pass", "def test_create_subscription_template(self):\n pass", "def test_registration_complete_view_get(self):\n response = self.client.post(reverse('rdef_web:user_register'),\n data={'username': 'alice',\n 'password': '123123',\n 'password_confirm': '123123',\n 'email': '[email protected]'})\n self.assertEqual(response.context['msg'], 'SUCCESS')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'rdef_web/reg_complited.html')", "def test_create_device_template(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_get_tosca_template(self):\n pass", "def test_get_activity_templates(self):\n pass", "def test_list_unregistered_templates(self):\n pass", "def test_template(self):\n\t\tself.assertTemplateUsed(self.resp, 'inicio.html')", "def test_template(self):\n self.assertTemplateUsed(self.response, 'formularios.html')", "def testTemplateGet(self):\n self.assertRaises(NotImplementedError, getattr,\n self.tempfile, 'template')", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)" ]
[ "0.80948377", "0.7862938", "0.7847954", "0.7625288", "0.75809413", "0.7120092", "0.6964303", "0.68536776", "0.68428296", "0.68417454", "0.68230385", "0.6750946", "0.67507726", "0.6680093", "0.66776", "0.66391975", "0.6622713", "0.65810066", "0.6524386", "0.6482459", "0.6446946", "0.64095175", "0.6401725", "0.6373364", "0.63462496", "0.6328138", "0.6295015", "0.62870866", "0.6269332", "0.61917144" ]
0.94213057
0
Test case for set_category_parent
def test_set_category_parent(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_parents_category(self):\n self.assertEqual(self.category.get_parents(), [])", "def set_CategoryParent(self, value):\n super(GetCategoriesInputSet, self)._set_input('CategoryParent', value)", "def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents", "def testSetParent(self):\n for child in self.color_corrections + self.color_decisions:\n self.assertEqual(\n None,\n child.parent\n )\n\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )\n child.parent = 'banana'\n self.assertEqual(\n 'banana',\n child.parent\n )\n\n self.node.set_parentage()\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )", "def test_change_parent_location(self):\n pass", "def test_lacking_parent(self):\n pass", "def test_get_parents_project(self):\n self.assertEqual(list(self.project.get_parents()), [self.category])", "def testSetParentage(self):\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )\n\n self.cc.parent = 'bob'\n self.media_ref.parent = 'joe'\n\n self.cd.set_parentage()\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )", "def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )", "def testParentage(self):\n self.assertEqual(\n self.cd,\n self.media_ref.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_replace_parent_to_self(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n with pytest.raises(Exception):\n groupa.replace_parent(groupb, groupa)", "def clean_parent(self):\r\n data = self.cleaned_data['parent']\r\n if data == self.instance:\r\n raise forms.ValidationError(\r\n _('A category cannot be parent of itself.'))\r\n return data", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def test_add_self_as_parent(self):\n groupa = Group('groupa')\n with pytest.raises(Exception):\n groupa.add_parent(groupa)", "def test_parent(self):\n self.assertEqual( self.xs_const.ancestor, None )\n self.assertEqual( self.xs_const.rootAncestor, self.xs_const )\n self.assertEqual( self.xs_const.ancestor, None )\n self.xs_const.setAncestor( 'fred' )\n self.assertEqual( self.xs_const.ancestor, 'fred' )", "def test_create_category(self):\n pass", "def test_create_parented_item(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='GreekHero', branch='draft'),\r\n 'chapter', block_id='chapter2'\r\n )\r\n original = modulestore().get_item(locator)\r\n\r\n locator = BlockUsageLocator(\r\n CourseLocator(org='testx', offering='wonderful', branch='draft'), 'course', 'head23456'\r\n )\r\n premod_course = modulestore().get_course(locator.course_key)\r\n category = 'chapter'\r\n new_module = modulestore().create_item(\r\n locator, category, 'user123',\r\n fields={'display_name': 'new chapter'},\r\n definition_locator=original.definition_locator\r\n )\r\n # check that course version changed and course's previous is the other one\r\n self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)\r\n parent = modulestore().get_item(locator)\r\n self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))\r\n self.assertEqual(new_module.definition_locator.definition_id, original.definition_locator.definition_id)", "def test_parent_link(self):\n def compare_func(obj, node):\n # no need to test for parents on root level of the tree\n if obj == self.document.root:\n return\n\n # parent-child link. obj must be in obj.parent.children\n self.assertTrue(obj in obj.parent.children)\n\n self.recursively_compare_tree_against_html(compare_func)", "def test_CategoriesMoveRight(self):\n # Check is move_children_right properly moved children's category one level up.\n self.cat1_1 = Category.objects.create(user=self.user, name=\"Category 1.1\", parent=self.cat1)\n self.cat1_2 = Category.objects.create(user=self.user, name=\"Category 1.2\", parent=self.cat1)\n self.cat1.move_children_right()\n self.assertEqual(self.cat1.get_children().count(), 0)\n\n # Check if disabled function properly moved children's category one level up.\n self.cat1_1.parent = self.cat1\n self.cat1_1.save()\n self.cat1_2.parent = self.cat1\n self.cat1_2.save()\n self.cat1.disable()\n self.assertEqual(self.cat1.get_children().count(), 0)", "def test_add_parent(self):\n _, _, groupa, groupb = create_objects()\n groupa.add_parent(groupb)\n assert groupb in groupa.parents\n assert groupa in groupb.children\n return (groupa, groupb)", "def test_parent_assigment():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(2, move=True)\n builder.add_child(13)\n builder.move_to_parent()\n builder.add_child(7)\n\n t = builder.build()\n\n assert t.parent is None\n assert t[0].parent == t\n assert t[1].parent == t\n assert t[0][0].parent == t[0]", "def test_set_parent_when_provided():\n\n # GIVEN a valid parent\n father: str = Pedigree.FATHER\n\n # WHEN running \"set_parent_if_missing\"\n validated_father: str = set_parent_if_missing(father)\n\n # THEN the returned string should not have been altered\n assert validated_father == father", "def save(self, *args, **kwargs):\n\n if self.parent:\n self.level = self.parent.level + 1\n self.blog_category = self.parent.blog_category\n\n super(Category, self).save(*args, **kwargs)", "def test_add_child_category(self):\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')" ]
[ "0.75247896", "0.72576463", "0.7246697", "0.7068522", "0.69167084", "0.6779439", "0.6769781", "0.6745938", "0.6740783", "0.6740783", "0.6687409", "0.6687409", "0.6642051", "0.6603109", "0.6516998", "0.65052515", "0.6504711", "0.65035063", "0.6463029", "0.6449206", "0.63634056", "0.6325728", "0.6289883", "0.62486887", "0.62403446", "0.6229187", "0.6176128", "0.6175809", "0.6169472", "0.61655676" ]
0.9430129
0
Test case for set_deployment_run_lock
def test_set_deployment_run_lock(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_lock_unlock(self):\n my_thing_id = str(uuid.uuid4())\n dweepy.lock(my_thing_id, test_lock, test_key)\n dweepy.unlock(my_thing_id, test_key)", "def test_remove_lock_locked(self):\n my_thing_id = str(uuid.uuid4())\n dweepy.lock(my_thing_id, test_lock, test_key)\n self.assertEqual(dweepy.remove_lock(test_lock, test_key), test_lock)", "def test_runs_with_lock(self):\n self.assertTrue(ResultRegistry.stop.synchronized)\n self.assertTrue(ResultRegistry.register.synchronized)", "def test_relaunch_deployment_run(self):\n pass", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_remove_lock_unlocked(self):\n try:\n dweepy.remove_lock(test_lock, test_key)\n except dweepy.DweepyError as e:\n self.assertEqual(e.args[0], 'this lock is not in use')", "def testLock(self):\n self.assertTrue(self._lock.LockIfNotInUse())\n self.assertTrue(os.path.isfile(self._lock_path))\n self._lock.Unlock()\n\n self.assertTrue(self._lock.LockIfNotInUse(timeout_secs=0))\n self._lock.SetInUse(True)\n self._lock.Unlock()\n\n self.assertFalse(self._lock.LockIfNotInUse())\n\n self.assertTrue(self._lock.Lock())\n self._lock.SetInUse(False)\n self._lock.Unlock()\n\n self.assertTrue(self._lock.Lock(timeout_secs=0))\n self._lock.Unlock()", "async def test_lock(hass, config_entry, zha_gateway):\n from zigpy.zcl.clusters.closures import DoorLock\n from zigpy.zcl.clusters.general import Basic\n\n # create zigpy device\n zigpy_device = await async_init_zigpy_device(\n hass, [DoorLock.cluster_id, Basic.cluster_id], [], None, zha_gateway)\n\n # load up lock domain\n await hass.config_entries.async_forward_entry_setup(\n config_entry, DOMAIN)\n await hass.async_block_till_done()\n\n cluster = zigpy_device.endpoints.get(1).door_lock\n entity_id = make_entity_id(DOMAIN, zigpy_device, cluster)\n zha_device = zha_gateway.get_device(zigpy_device.ieee)\n\n # test that the lock was created and that it is unavailable\n assert hass.states.get(entity_id).state == STATE_UNAVAILABLE\n\n # allow traffic to flow through the gateway and device\n await async_enable_traffic(hass, zha_gateway, [zha_device])\n\n # test that the state has changed from unavailable to unlocked\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n\n # set state to locked\n attr = make_attribute(0, 1)\n cluster.handle_message(False, 1, 0x0a, [[attr]])\n await hass.async_block_till_done()\n assert hass.states.get(entity_id).state == STATE_LOCKED\n\n # set state to unlocked\n attr.value.value = 2\n cluster.handle_message(False, 0, 0x0a, [[attr]])\n await hass.async_block_till_done()\n assert hass.states.get(entity_id).state == STATE_UNLOCKED\n\n # lock from HA\n await async_lock(hass, cluster, entity_id)\n\n # unlock from HA\n await async_unlock(hass, cluster, entity_id)", "def test_waits_on_existing_lockfile(self):\n self.lock.__enter__()\n self.assertTrue(os.path.exists(self.lock.lockfile_path))\n\n def exit_first_lock():\n time.sleep(0.1)\n self.lock.__exit__(None, None, None)\n thread = threading.Thread(target=exit_first_lock)\n thread.start()\n\n new_lock = disk.DiskDatasetLock(self.dataset, timeout_sec=1)\n new_lock.__enter__()\n\n thread.join()", "async def test_locking(hass: HomeAssistant) -> None:\n client_mock = await init_integration(hass)\n\n await hass.services.async_call(\n LOCK_DOMAIN,\n SERVICE_LOCK,\n {ATTR_ENTITY_ID: \"lock.my_mazda3_lock\"},\n blocking=True,\n )\n await hass.async_block_till_done()\n\n client_mock.lock_doors.assert_called_once()", "def test_lock(self):\n lock = self.tx_client.Lock(\"xyzzy\", identifier=\"iddqd\")\n self.assertIdentical(lock._reactor, self.reactor)\n self.assertIdentical(lock._pool, self.pool)\n\n self.assertEqual(lock.path, \"xyzzy\")\n self.assertEqual(lock.identifier, \"iddqd\")", "def test_creates_lockfile_on_enter(self):\n self.lock.__enter__()\n self.assertTrue(os.path.exists(self.lock.lockfile_path))", "def test_update_deployment_state(self):\n pass", "def test_lock_instance(self, instance, instances_steps):\n instances_steps.lock_instance(instance.name)\n instances_steps.unlock_instance(instance.name)", "async def test_lock_setup(hass: HomeAssistant) -> None:\n await init_integration(hass)\n\n entity_registry = er.async_get(hass)\n entry = entity_registry.async_get(\"lock.my_mazda3_lock\")\n assert entry\n assert entry.unique_id == \"JM000000000000000\"\n\n state = hass.states.get(\"lock.my_mazda3_lock\")\n assert state\n assert state.attributes.get(ATTR_FRIENDLY_NAME) == \"My Mazda3 Lock\"\n\n assert state.state == STATE_LOCKED", "def test_retest_deployment_run(self):\n pass", "def testLock(t, env):\n c = env.c1\n c.init_connection()\n # Create a file and partially lock it\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res, msg=\"Locking file %s\" % t.code)\n # Create and replay LOCK ops\n ops = c.use_obj(fh)\n lock_owner = exist_lock_owner4(res.lockid, 1)\n locker = locker4(FALSE, lock_owner=lock_owner)\n ops += [c.lock_op(WRITE_LT, FALSE, 0, 10, locker)]\n _replay(c, ops)", "def test_get_deployment_run(self):\n pass", "def test_disable_locking(tmpdir):\n lock_path = str(tmpdir.join(\"lockfile\"))\n\n old_value = spack.config.get(\"config:locks\")\n\n with spack.config.override(\"config:locks\", False):\n lock = lk.Lock(lock_path)\n\n lock.acquire_read()\n assert not os.path.exists(lock_path)\n\n lock.acquire_write()\n assert not os.path.exists(lock_path)\n\n lock.release_write()\n assert not os.path.exists(lock_path)\n\n lock.release_read()\n assert not os.path.exists(lock_path)\n\n assert old_value == spack.config.get(\"config:locks\")", "def test_release_deployment_run(self):\n pass", "def test_upgrade_apply_user_lock(setup, platform, kubectl, skuba):\n\n setup_kubernetes_version(skuba, PREVIOUS_VERSION)\n\n # lock kured\n kubectl.run_kubectl(\"-n kube-system annotate ds kured weave.works/kured-node-lock='{\\\"nodeID\\\":\\\"manual\\\"}'\")\n\n outs = {}\n for (r, n) in [(\"master\", 0), (\"worker\", 0)]:\n node = \"my-{}-{}\".format(r, n)\n # disable skuba-update.timer\n platform.ssh_run(r, n, \"sudo systemctl disable --now skuba-update.timer\")\n outs[node] = skuba.node_upgrade(\"apply\", r, n)\n assert platform.ssh_run(r, n, \"sudo systemctl is-enabled skuba-update.timer || :\").find(\"disabled\") != -1\n\n assert kubectl.run_kubectl(\"-n kube-system get ds/kured -o jsonpath='{.metadata.annotations.weave\\.works/kured-node-lock}'\").find(\"manual\") != -1\n\n master = outs[\"my-master-0\"]\n assert master.find(\"successfully upgraded\") != -1\n\n worker = outs[\"my-worker-0\"]\n assert worker.find(\"successfully upgraded\") != -1", "def try_aquire_lock():\n minor_print(\"Försöker skaffa låset ...\")\n\n lock_path = GITTED_FOLDER / \"lock\"\n\n try:\n lock_path.touch()\n except FileExistsError:\n return False\n\n shell_command([\"git\", \"add\", lock_path.as_posix()])\n shell_command([\"git\", \"commit\", \"-m\", f\"Aquired lock for {WORLD_NAME}\"])\n return True", "def set_lock_time():\n\n pass", "async def test_one_lock_operation(hass):\n lock_one = await _mock_lock_from_fixture(\n hass, \"get_lock.online_with_doorsense.json\"\n )\n lock_details = [lock_one]\n await _create_august_with_devices(hass, lock_details)\n\n lock_abc_name = hass.states.get(\"lock.abc_name\")\n\n assert lock_abc_name.state == STATE_LOCKED\n\n assert lock_abc_name.attributes.get(\"battery_level\") == 92\n assert lock_abc_name.attributes.get(\"friendly_name\") == \"ABC Name\"\n\n data = {}\n data[ATTR_ENTITY_ID] = \"lock.abc_name\"\n assert await hass.services.async_call(\n LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True\n )\n\n lock_abc_name = hass.states.get(\"lock.abc_name\")\n assert lock_abc_name.state == STATE_UNLOCKED\n\n assert lock_abc_name.attributes.get(\"battery_level\") == 92\n assert lock_abc_name.attributes.get(\"friendly_name\") == \"ABC Name\"\n\n assert await hass.services.async_call(\n LOCK_DOMAIN, SERVICE_LOCK, data, blocking=True\n )\n\n lock_abc_name = hass.states.get(\"lock.abc_name\")\n assert lock_abc_name.state == STATE_LOCKED", "def test_update_deployment(self):\n pass", "def lock(*args):", "def test_locking(self):\r\n def verify_asset_locked_state(locked):\r\n \"\"\" Helper method to verify lock state in the contentstore \"\"\"\r\n asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')\r\n content = contentstore().find(asset_location)\r\n self.assertEqual(content.locked, locked)\r\n\r\n def post_asset_update(lock, course):\r\n \"\"\" Helper method for posting asset update. \"\"\"\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)\r\n\r\n # Load the toy course.\r\n module_store = modulestore('direct')\r\n _, course_items = import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['toy'],\r\n static_content_store=contentstore(),\r\n verbose=True\r\n )\r\n course = course_items[0]\r\n verify_asset_locked_state(False)\r\n\r\n # Lock the asset\r\n resp_asset = post_asset_update(True, course)\r\n self.assertTrue(resp_asset['locked'])\r\n verify_asset_locked_state(True)\r\n\r\n # Unlock the asset\r\n resp_asset = post_asset_update(False, course)\r\n self.assertFalse(resp_asset['locked'])\r\n verify_asset_locked_state(False)", "def lock(project, env_spec_name):\n return _update_and_lock(project, env_spec_name, update=False)", "def test_execute_deployment(self):\n pass", "def test_launch_deployment(self):\n pass" ]
[ "0.65398216", "0.65217364", "0.6492108", "0.6301887", "0.62380147", "0.6139231", "0.6107046", "0.60549664", "0.60303235", "0.5951123", "0.5924364", "0.59068596", "0.5851768", "0.5823863", "0.58238256", "0.5823738", "0.5815837", "0.5814343", "0.5811986", "0.57598376", "0.57338905", "0.5725269", "0.5723509", "0.5709862", "0.5708022", "0.5702889", "0.56964624", "0.5679774", "0.5667704", "0.5655753" ]
0.9452139
0
Test case for set_power_schedule_for_deployment_run
def test_set_power_schedule_for_deployment_run(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_project_default_power_schedule(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def test_get_deployment_runs(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_set_deployment_run_lock(self):\n pass", "def test_retest_deployment_run(self):\n pass", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def test_execute_monitoring_schedule_vendor_v3(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_cron_workflow_service_update_cron_workflow(self):\n pass", "def schedule_deploy():\n\n logger.info(\"Scheduling deploy\")\n scheduler.schedule_job(\"op_deploy\", {}, \"#general\", 60)", "def test_publish_deployment_run(self):\n pass", "def test_execute_monitoring_schedule_manufacturer_v3(self):\n pass", "async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def test_release_deployment_run(self):\n pass", "def test_multiple_sleep_windows(self):\n self.mock_clock.now.return_value = datetime.datetime(2016, 5, 24, 4)\n sleep_windows = [(16, 20), (2, 8)]\n pump_sched = pump_scheduler.PumpScheduler(self.mock_clock,\n sleep_windows)\n self.assertFalse(pump_sched.is_running_pump_allowed())", "async def test_update(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule) # Save update on _scheduler\n\n await asyncio.sleep(1)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n # Update 'updated' schedule interval\n interval_schedule.name = 'updated'\n interval_schedule.process_name = \"sleep1\"\n interval_schedule.repeat = datetime.timedelta(seconds=5) # Set time interval to 5 sec\n\n await scheduler.save_schedule(interval_schedule) # Save update on _scheduler\n await asyncio.sleep(6)\n\n # Assert: only 1 task is running\n tasks = await scheduler.get_running_tasks() # list of current running tasks\n assert len(tasks) == 1\n\n interval_schedule.exclusive = False\n await scheduler.save_schedule(interval_schedule)\n\n # Check able to get same schedule after restart\n # Check fields have been modified\n await self.stop_scheduler(scheduler)\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n schedule = await scheduler.get_schedule(interval_schedule.schedule_id)\n\n # Make sure that the values used by schedule are as expected\n assert schedule.process_name == 'sleep1'\n assert schedule.name == 'updated'\n assert schedule.repeat.seconds == 5\n assert not schedule.exclusive\n\n await self.stop_scheduler(scheduler)", "def test_autoscaling_schedules_unset(self) -> None:\n if self.prod_env:\n schedules = self.autoscaling.describe_scheduled_actions(AutoScalingGroupName='saints-xctf-server-prod-asg')\n self.assertTrue(len(schedules.get('ScheduledUpdateGroupActions')) == 0)\n else:\n self.assertTrue(all([\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-morning',\n recurrence='30 11 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-morning',\n recurrence='30 13 * * 1-5', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekday-afternoon',\n recurrence='30 22 * * 1-5', max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekday-night',\n recurrence='30 3 * * 2-6', max_size=0, min_size=0, desired_size=0),\n self.validate_autoscaling_schedule('saints-xctf-server-online-weekend', recurrence='30 11 * * 0,6',\n max_size=1, min_size=1, desired_size=1),\n self.validate_autoscaling_schedule('saints-xctf-server-offline-weekend', recurrence='30 3 * * 0,1',\n max_size=0, min_size=0, desired_size=0)\n ]))", "def test_put_monitoring_schedule_vendor_v3(self):\n pass", "def _run_scheduled_weekly_tasks():\n worker.add_task(weekly.run)", "def test_update_deployment(self):\n pass", "def test_add_recurring_schedule(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_get_deployment_run_reports(self):\n pass", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def test_put_monitoring_schedule_manufacturer_v3(self):\n pass" ]
[ "0.7180433", "0.65632725", "0.64348245", "0.6323346", "0.6287594", "0.62824196", "0.61665934", "0.6153059", "0.61351377", "0.610851", "0.60932255", "0.609056", "0.6041572", "0.6008032", "0.59638983", "0.592223", "0.5913363", "0.5907483", "0.5869108", "0.58620614", "0.58414155", "0.58030295", "0.57990146", "0.57507706", "0.5676826", "0.56442493", "0.5635822", "0.56189275", "0.5611121", "0.55775887" ]
0.96088576
0
Test case for set_project_default_power_schedule
def test_set_project_default_power_schedule(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_power_schedule_for_deployment_run(self):\n pass", "def _use_default_schedule(self):\n def gen_day():\n dl = []\n ll = [-1, '', -1, '', '']\n for i in range(8):\n dl.append(ll[:])\n rl = []\n for i in range(4):\n rl.append(dl[:])\n return rl\n\n self.schedule = {\n 'current_week': [1, date.today().isocalendar()[1]],\n 'lessons_time': [\n ['8:00', '9:35'],\n ['9:45', '11:20'],\n ['11:40', '13:15'],\n ['13:25', '15:00'],\n ['15:20', '16:55'],\n ['17:05', '18:40'],\n ['18:45', '20:20'],\n ['20:25', '22:00']\n ],\n 'schedule': {\n 'Monday': gen_day(),\n 'Tuesday': gen_day(),\n 'Wednesday': gen_day(),\n 'Thursday': gen_day(),\n 'Friday': gen_day(),\n 'Saturday': gen_day()\n },\n 'subgroup': 0\n }", "def __init__(self,\n schedule_timesteps,\n final_p,\n framework,\n initial_p=1.0,\n power=2.0):\n super().__init__(framework=framework)\n assert schedule_timesteps > 0\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p\n self.power = power", "def test_change_default_dt_static(self):\n ct.set_defaults('control', default_dt=0)\n assert ct.tf(1, 1).dt is None\n assert ct.ss([], [], [], 1).dt is None", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def test_request_wmts_default_time_ppng(self):\n ref_hash = '6df00a0095f3df8586572988f6cb7e84'\n req_url = r'http://localhost/reproject/test/wmts/wmts.cgi?layer=test_daily_png&tilematrixset=GoogleMapsCompatible_Level3&Service=WMTS&Request=GetTile&Version=1.0.0&Format=image%2Fpng&TileMatrix=0&TileCol=0&TileRow=0&TIME=default'\n if DEBUG:\n print('\\nTesting: Request current (time=default) PPNG tile via WMTS')\n print('URL: ' + req_url)\n check_result = check_tile_request(req_url, ref_hash)\n self.assertTrue(check_result, 'Current (TIME=default) WMTS PPNG Tile Request does not match what\\'s expected. URL: ' + req_url)", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def test_update_instances_schedule_state(self):\n pass", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=\"rabi\") as default_schedule:\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "def test_change_default_dt(self, dt):\n ct.set_defaults('control', default_dt=dt)\n assert ct.ss(1, 0, 0, 1).dt == dt\n assert ct.tf(1, [1, 1]).dt == dt\n nlsys = ct.NonlinearIOSystem(\n lambda t, x, u: u * x * x,\n lambda t, x, u: x, inputs=1, outputs=1)\n assert nlsys.dt == dt", "def test_get_schedule_with_unbound_parameter(self):\n param1 = Parameter(\"param1\")\n param2 = Parameter(\"param2\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param2=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "def simulate(self):\n\t\tcounter = 1\n\t\tweek = 1\n\t\twhile counter != 0:\n\t\t\tself.oneWeek(week)\n\t\t\tweek += 1\n\t\t\tcounter = len(self.teams[0].schedule)", "def default():\n return DefaultWindPower.default()", "def test_binding_unassigned_parameters(self):\n param = Parameter(\"param\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n with self.assertRaises(PulseError):\n inst_map.get(\"target_sched\", (0,), P0=0)", "def test_datetime_default(self):\r\n default = datetime.now()\r\n prop = DateTime(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def _default_gate_schedule(self, backend: Optional[Backend] = None):\n\n if self.experiment_options.frequency_shift is None:\n try:\n anharm, _ = backend.properties().qubit_property(self.physical_qubits[0])[\n \"anharmonicity\"\n ]\n self.set_experiment_options(frequency_shift=anharm)\n except KeyError as key_err:\n raise CalibrationError(\n f\"The backend {backend} does not provide an anharmonicity for qubit \"\n f\"{self.physical_qubits[0]}. Use EFRabi.set_experiment_options(frequency_shift=\"\n f\"anharmonicity) to manually set the correct frequency for the 1-2 transition.\"\n ) from key_err\n except AttributeError as att_err:\n raise CalibrationError(\n \"When creating the default schedule without passing a backend, the frequency needs \"\n \"to be set manually through EFRabi.set_experiment_options(frequency_shift=..).\"\n ) from att_err\n\n amp = Parameter(\"amp\")\n with pulse.build(backend=backend, name=self.__rabi_gate_name__) as default_schedule:\n with pulse.frequency_offset(\n self.experiment_options.frequency_shift,\n pulse.DriveChannel(self.physical_qubits[0]),\n ):\n pulse.play(\n pulse.Gaussian(\n duration=self.experiment_options.duration,\n amp=amp,\n sigma=self.experiment_options.sigma,\n ),\n pulse.DriveChannel(self.physical_qubits[0]),\n )\n\n return default_schedule", "async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def __init__(self, schedule_timesteps, final_p, initial_p=1.0):\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p", "def __init__(self, schedule_timesteps, final_p, initial_p=1.0):\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p", "def schedule_task(self, Tau):\n return random.choice(self.tasks)", "def svn_info_t_schedule_set(svn_info_t_self, svn_wc_schedule_t_schedule): # real signature unknown; restored from __doc__\n pass", "def test_execute_monitoring_schedule_vendor_v3(self):\n pass", "def test_add_recurring_schedule(self):\n pass", "def setUp(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n timeCommand = \"time -p\"\n self.msTest = MS.MachineSettings(casDict,timeCommand)", "def _create_schedules(self):\n\n ''''''", "def test_request_twms_default_ppng(self):\n ref_hash = '6df00a0095f3df8586572988f6cb7e84'\n req_url = r'http://localhost/reproject/test/twms/twms.cgi?request=GetMap&layers=test_daily_png&srs=EPSG:3857&format=image%2Fpng&styles=&width=256&height=256&bbox=-20037508.34278925,-20037508.34278925,20037508.34278925,20037508.34278925&time=default'\n if DEBUG:\n print('\\nTesting: Request current (default TIME) PPNG tile via TWMS')\n print('URL: ' + req_url)\n check_result = check_tile_request(req_url, ref_hash)\n self.assertTrue(check_result, 'TWMS current PPNG request does not match what\\'s expected. URL: ' + req_url)", "def test_default_calendar(config):\n if config['default']['default_calendar'] is None:\n pass\n elif config['default']['default_calendar'] not in config['calendars']:\n logger.fatal(\n \"in section [default] {} is not valid for 'default_calendar', \"\n \"must be one of {}\".format(config['default']['default_calendar'],\n config['calendars'].keys())\n )\n raise InvalidSettingsError()\n elif config['calendars'][config['default']['default_calendar']]['readonly']:\n logger.fatal('default_calendar may not be read_only!')\n raise InvalidSettingsError()", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )" ]
[ "0.77880347", "0.6669773", "0.6062775", "0.5974395", "0.5895277", "0.58823246", "0.5771874", "0.5720453", "0.5702882", "0.56914073", "0.5675972", "0.5662644", "0.5597221", "0.55601764", "0.5552833", "0.5542259", "0.5505154", "0.548294", "0.5466301", "0.54607344", "0.54607344", "0.5411417", "0.53896385", "0.53772193", "0.53639007", "0.5361151", "0.53460205", "0.5332444", "0.5328677", "0.5325262" ]
0.963989
0
Test case for set_project_default_virtualization_realm
def test_set_project_default_virtualization_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_virtualization_realm_active(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_get_virtualization_realms(self):\n pass", "def test_determine_valid_virtualization_realms(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_list_virtualization_realm_templates(self):\n pass", "def test_get_project_virt_realms(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test_get_team_owned_or_managed_virtualization_realms(self):\n pass", "def default_virtual_machine(self, default_virtual_machine):\n\n self._default_virtual_machine = default_virtual_machine", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_create_virtual_account(self):\n pass", "def test_specify_non_default_tenant():\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_create_hyperflex_vcenter_config_policy(self):\n pass", "def test_switch_vlan_pool_default(self):\n self.assertEqual(self.options.vlan_pool, {})", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_disable_virt_realm_remote_access(self):\n pass", "def test_entities__NoFieldCustomization__default_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n assert u'Time zone' == nfc.default_value(\n IAddressBook['time_zone'], 'label')", "def test_set_project_default_power_schedule(self):\n pass", "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_patch_hyperflex_vcenter_config_policy(self):\n pass", "def testNeedProjectSetup(self, mock_ans):\n # Test need project setup.\n self.gcp_env_runner.project = \"\"\n self.gcp_env_runner.zone = \"\"\n self.assertTrue(self.gcp_env_runner._NeedProjectSetup())\n # Test no need project setup and get user's answer.\n self.gcp_env_runner.project = \"test_project\"\n self.gcp_env_runner.zone = \"test_zone\"\n self.gcp_env_runner._NeedProjectSetup()\n mock_ans.assert_called_once()" ]
[ "0.79450065", "0.7679045", "0.75553143", "0.74943614", "0.7327702", "0.6558012", "0.6478745", "0.6472373", "0.6440592", "0.6253826", "0.6249005", "0.6163103", "0.61597", "0.60134804", "0.58656627", "0.5804564", "0.5803319", "0.5653255", "0.56303865", "0.56241554", "0.55969036", "0.55758786", "0.5552903", "0.5547683", "0.5527157", "0.55216086", "0.5495751", "0.54937446", "0.5467127", "0.5422874" ]
0.9590921
0
Test case for set_project_itar_information
def test_set_project_itar_information(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_get_project(self):\n pass", "def test_patch_project(self):\n pass", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_add_project(self):\n pass", "def setUp(self):\n self.name = \"branch-rickey-papers\"\n self.project = \"test-project\"\n self.item_id = \"mss37820001\"", "def test_create_project_request(self):\n pass", "def update_project_info(data):\n\tif 'pk' in data:\n\t\tif data['pk'] is not None:\n\t\t\tproject = get_or_none(ProjectInfo, pk=data['pk'])\n\t\t\tif project:\n\t\t\t\tproject.name = data['name']\n\t\t\t\tproject.description = data['description']\n\t\t\t\tproject.start_date = data['start_date']\n\t\t\t\tproject.end_date = data['end_date']\n\t\t\t\tproject.save()\n\t\t\t\tprint ('Updated')\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn False\n\n\telse:\n\t\tprint (\"please provide pk for updating\")\n\t\treturn False", "def test_list_project(self):\n pass", "def test_get_projects(self):\n pass", "def test_fields_on_new_project(new_project) -> None:\n\n # assert isinstance(new_project.id, int) # not created yet, hasnt been committed\n assert isinstance(new_project.token, uuid.UUID)\n assert new_project.title == \"Lord of the Rings\"\n assert new_project.email == \"J. R. R. Tolkien\"\n assert new_project.phone == \"5558675309\"\n assert new_project.verification is None", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def create_project_info(data):\n\t\n\tproject = ProjectInfo()\n\tproject.name = data['name']\n\tproject.description = data['description']\n\tproject.start_date = data['start_date']\n\tproject.end_date = data['end_date']\n\tproject.save()\n\tprint ('Inserted')\n\treturn True", "def set_project_values(project, data):\n project.hashtag = data['hashtag']\n if 'name' in data and len(data['name']) > 0:\n project.name = data['name']\n else:\n project.name = project.hashtag.replace('-', ' ')\n if 'summary' in data and len(data['summary']) > 0:\n project.summary = data['summary']\n has_longtext = 'longtext' in data and len(data['longtext']) > 0\n if has_longtext:\n project.longtext = data['longtext']\n if 'autotext_url' in data and data['autotext_url'].startswith('http'):\n project.autotext_url = data['autotext_url']\n if not project.source_url or project.source_url == '':\n project.source_url = data['autotext_url']\n # MAX progress\n if 'levelup' in data and 0 < project.progress + data['levelup'] * 10 < 50:\n project.progress = project.progress + data['levelup'] * 10\n # return jsonify(data=data)\n if project.autotext_url is not None and not has_longtext:\n # Now try to autosync\n project = AddProjectData(project)\n return project", "def setInfo(*args):", "def test_change_title(self):\n test_title = 'Some Title'\n support.create_project(self, 'igor')\n cd.project.title = test_title\n self.assertEqual(cd.project.title, test_title)", "def test_project_detail(self):\n rv = self.app.get(\"/Assignment0\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"2015-02-04 21:57:12.156363\", rv.data)\n self.assertIn(\"221\", rv.data)\n self.assertIn(\"commit assignment0\", rv.data)\n\n self.assertIn(\"Assignment0/Procfile\", rv.data)\n self.assertIn(\"Assignment0/README.md\", rv.data)", "def test_set_project(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )\n\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 1)\n obj = AppSetting.objects.get(name=setting_name, project=self.project)\n self.assertEqual(obj.get_value(), 'value')\n tl_event = ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n self.assertIsNotNone(tl_event)\n self.assertEqual(tl_event.classified, True)\n self.assertEqual(tl_event.extra_data, {'value': 'value'})", "def test_save_project_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n inventory_[\"key\"] = \"value\"\n inventory_[\"key2\"] = \"value2\"\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n project = io.find_one({\"type\": \"project\", \"name\": PROJECT_NAME})\n assert_equals(project[\"data\"][\"key\"], \"value\")\n assert_equals(project[\"data\"][\"key2\"], \"value2\")", "def testWriteProjectInformation(self):\n project_information = resources.VSProjectInformation()\n\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteProjectInformation(project_information)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b'<VisualStudioProject\\r\\n'\n b'\\tProjectType=\"Visual C++\"\\r\\n'\n b'\\tVersion=\"9,00\"\\r\\n'\n b'\\tName=\"\"\\r\\n'\n b'\\tProjectGUID=\"{}\"\\r\\n'\n b'\\tRootNamespace=\"\"\\r\\n'\n b'\\tTargetFrameworkVersion=\"131072\"\\r\\n'\n b'\\t>\\r\\n'\n b'\\t<Platforms>\\r\\n'\n b'\\t\\t<Platform\\r\\n'\n b'\\t\\t\\tName=\"Win32\"\\r\\n'\n b'\\t\\t/>\\r\\n'\n b'\\t</Platforms>\\r\\n'\n b'\\t<ToolFiles>\\r\\n'\n b'\\t</ToolFiles>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def test_put_project(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n put_data = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.project.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': self.category.pk,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.project.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': True,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n },\n str(self.project.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner),\n 'inherited': False,\n 'sodar_uuid': str(self.project.get_owner().sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def _parse_project_params(self, response_tree, data):\n # if self.project == 'TWC':\n # data['lithium'] = response_tree.find('lithiumresults').text\n\n if self.project == 'ILAR':\n data['ilar_title'] = response_tree.find('ilartitle').text\n data['ilar_rank'] = int(response_tree.find('ilarrank').text)\n\n if response_tree.find('ilarresponse').text is None:\n data['ilar_response'] = None\n else:\n data['ilar_response'] = response_tree.find(\n 'ilarresponse').text.encode('ascii', 'ignore')", "def test_set_metadata(self, bibliographic_data):\n # Here's a provider that is not associated with any particular\n # Collection.\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n assert None == provider.collection\n\n # It can't set circulation data, because it's not a\n # CollectionCoverageProvider.\n assert not hasattr(provider, 'set_metadata_and_circulationdata')\n\n # But it can set metadata.\n identifier = self._identifier(\n identifier_type=Identifier.OVERDRIVE_ID,\n foreign_id=bibliographic_data.primary_identifier.identifier,\n )\n assert [] == identifier.primarily_identifies\n result = provider.set_metadata(identifier, bibliographic_data)\n\n # Here's the proof.\n edition = provider.edition(identifier)\n assert \"A Girl Named Disaster\" == edition.title\n\n # If no metadata is passed in, a CoverageFailure results.\n result = provider.set_metadata(identifier, None)\n assert isinstance(result, CoverageFailure)\n assert \"Did not receive metadata from input source\" == result.exception\n\n # If there's an exception setting the metadata, a\n # CoverageFailure results. This call raises a ValueError\n # because the primary identifier & the edition's primary\n # identifier don't match.\n bibliographic_data.primary_identifier = IdentifierData(\n type=Identifier.OVERDRIVE_ID, identifier=\"abcd\"\n )\n result = provider.set_metadata(identifier, bibliographic_data)\n assert isinstance(result, CoverageFailure)\n assert \"ValueError\" in result.exception", "def testWriteProjectInformation(self):\n project_information = resources.VSProjectInformation()\n\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteProjectInformation(project_information)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b' <PropertyGroup Label=\"Globals\">\\r\\n'\n b' <ProjectGuid>{}</ProjectGuid>\\r\\n'\n b' <RootNamespace></RootNamespace>\\r\\n'\n b' </PropertyGroup>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def test_list_project_request(self):\n pass" ]
[ "0.65574634", "0.65574634", "0.64233893", "0.64083505", "0.6405177", "0.6405177", "0.62742543", "0.6071113", "0.5999955", "0.5993673", "0.5983071", "0.5961538", "0.5895411", "0.58645904", "0.585192", "0.585192", "0.585192", "0.5847521", "0.5784457", "0.57828516", "0.5772661", "0.5741757", "0.57362646", "0.5716583", "0.5704325", "0.56790555", "0.5646753", "0.56378025", "0.5623378", "0.56168354" ]
0.9294886
0
Test case for set_project_limits
def test_set_project_limits(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def setPTLimits(*args):\n args[0].Limit.PTLimit.pt_limit = args[1]", "def test_custom_per_project_upper_limit(self):\n data = {'payment_amount': '50.00'}\n account = Account(goal=8000, current=3001)\n form = DonationAmountForm(data=data, account=account)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertEqual('max_value', errors['payment_amount'][0].code)\n self.assertTrue('$49.99' in errors['payment_amount'][0].message)\n\n account.current = 3000\n form = DonationAmountForm(data=data, account=account)\n self.assertTrue(form.is_valid())", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_creation_incorrect_change_softbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, softbounds=[0, 10])\n int_a.softbounds = [0, 10, 20]", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "def test_creation_incorrect_softbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, softbounds=[0, 10, 20])", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def test_set_invalid_scope_project(self):\n setting_name = 'project_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_creation_incorrect_change_hardbounds():\n with pytest.raises(ValueError) as __:\n value = 1\n int_a = param.Integer(value=value, hardbounds=[0, 10])\n int_a.hardbounds = [0, 10, 20]", "def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None", "def _process_threadpool_limits_initializier():\n import numpy # required for loky's autodetection\n from threadpoolctl import threadpool_limits\n\n threadpool_limits(limits=1)", "def create_limit(self, test_suite_name, test_instance, test_pins, test_modes, test_numbers, usl_list, lsl_list, test_limits_dict=None):\n if not test_limits_dict:\n test_limits_dict = self.test_limits_dict\n if not isinstance(test_modes, list):\n test_modes = [test_modes]\n if not isinstance(test_pins, list):\n test_pins = [test_pins]\n if not isinstance(test_numbers, list):\n test_numbers = [test_numbers]\n if not isinstance(usl_list, list):\n usl_list = [usl_list]\n if not isinstance(lsl_list, list):\n lsl_list = [lsl_list]\n pass", "def SetLimit(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetLimit(self, *args)", "def test_returns_limit_projects(self):\n # Arrange\n # Create and arrange test projects\n self.arrange_projects()\n # Act\n response = self.client.get(\n f\"{self.url}?limit=1\", headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)", "def test_set_invalid_scope_project_user(self):\n setting_name = 'project_user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_creation_incorrect_hardbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, hardbounds=[0, 10, 20])", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def _collection_sample_limits(limits):\n build = lambda gran, coll : {'granule': gran, 'collection': coll}\n bound = lambda lower, value, upper : min(max(lower, value), upper)\n\n default_granule_limit = 10\n default_collection_limit = 20\n\n if limits is None:\n limit_obj = build(default_granule_limit, default_collection_limit)\n elif isinstance(limits, int):\n # assume the value is the granule sample limit\n limit_obj = build(limits, default_collection_limit)\n elif isinstance(limits, dict):\n limit_obj = limits\n elif isinstance(limits, list):\n # user may have sent in 0 or more values, first 2 are significant\n if len(limits)>1:\n # at least 2 exist\n limit_obj = build(limits[0], limits[1])\n elif len(limits)>0:\n # at least one exists\n limit_obj = build(limits[0], default_collection_limit)\n else:\n # assume none exist\n limit_obj = build(default_granule_limit, default_collection_limit)\n else:\n limit_obj = build(default_granule_limit, default_collection_limit)\n\n if limit_obj.get(\"collection\") is None:\n limit_obj[\"collection\"] = default_collection_limit\n\n if limit_obj.get(\"granule\") is None:\n limit_obj[\"granule\"] = default_granule_limit\n\n limit_obj[\"collection\"] = bound(1, limit_obj[\"collection\"], 200)\n limit_obj[\"granule\"] = bound(1, limit_obj[\"granule\"], 100)\n\n return limit_obj", "def services_limit(self, project_id, limit):\n self.storage_controller.set_service_limit(\n project_id=project_id,\n project_limit=limit)", "def check_resource_limit(self, selection_count, population_count):\n p = self.ctx.policy\n max_resource_limits = MaxResourceLimit(p, selection_count, population_count)\n return max_resource_limits.check_resource_limits()", "def test_get_limit_no_dependants(self):\n self.assertEqual(\n gross_income.get_limit(),\n gross_income.BASE_LIMIT\n )", "def set_glidein_config_limits(self, limits_data):\n self.glidein_config_limits = limits_data", "def test_operate_resource_cap_max(self, on):\n\n if on is False:\n override = {}\n else:\n override = {\"techs.test_supply_plus.constraints.resource_cap_max\": 1e6}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n if on is False:\n assert check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert np.isinf(\n m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item()\n )\n elif on is True:\n assert not check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item() == 1e6", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum" ]
[ "0.64873147", "0.64516187", "0.6388095", "0.6365517", "0.61912036", "0.6153099", "0.61436015", "0.6053346", "0.6042491", "0.601646", "0.5969472", "0.59587514", "0.5958157", "0.5952138", "0.59172004", "0.5898083", "0.5892134", "0.5890774", "0.5870673", "0.5806265", "0.5777135", "0.57598054", "0.57560825", "0.5732409", "0.5703185", "0.56937575", "0.56446207", "0.5644295", "0.5643928", "0.5634766" ]
0.95085496
0
Test case for set_virtualization_realm_active
def test_set_virtualization_realm_active(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_virtualization_realm(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_set_project_default_virtualization_realm(self):\n pass", "def test_determine_valid_virtualization_realms(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_get_virtualization_realms(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_get_team_owned_or_managed_virtualization_realms(self):\n pass", "def test_list_virtualization_realm_templates(self):\n pass", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_disable_virt_realm_remote_access(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_api_object_active_property(self, api_object):\n api_object.status = 'ACTIVE'\n assert api_object.active\n assert not api_object.creating", "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "def test_modify_virtual_service(self):\n pass", "def test_powerup(self):\n self.assertIdentical(self.realm, IRealm(self.store))", "def test_create_virtual_account(self):\n pass", "def test_update_hyperflex_auto_support_policy(self):\n pass", "def test_deallocate_virt_realm(self):\n pass", "def test_get_virtual_accounts(self):\n pass", "def test_update_hyperflex_proxy_setting_policy(self):\n pass", "def test_get_project_virt_realms(self):\n pass", "def test_list_virt_realms_in_cloud(self):\n pass" ]
[ "0.80295473", "0.78152186", "0.7771694", "0.77008396", "0.72018975", "0.70542246", "0.6944395", "0.6893473", "0.6855249", "0.68041956", "0.65922964", "0.6319137", "0.6224216", "0.61792886", "0.6141512", "0.5971205", "0.58212847", "0.5810792", "0.57516867", "0.57303715", "0.5580054", "0.5529325", "0.55263376", "0.55077356", "0.55008644", "0.5466995", "0.54366827", "0.54050034", "0.53905195", "0.5388034" ]
0.9532621
0
Test case for share_template_registration
def test_share_template_registration(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unshare_template_registration(self):\n pass", "def test_retrieve_template_registration(self):\n pass", "def test_register_template(self):\n pass", "def test_update_template_registration(self):\n pass", "def test_create_template_subsciption(self):\n pass", "def test_list_template_registrations(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_unregister_template(self):\n pass", "def test_create_subscription_template(self):\n pass", "def test_create_namespaced_template(self):\n pass", "def test_update_template_subscription(self):\n pass", "def test_template_feedback(self):\r\n pass", "def test_template_home(self):\n self.assertTemplateUsed(self.response, 'index.html')", "def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')", "def test_create_namespaced_processed_template(self):\n pass", "def test_correct_template(self):\n self.assertCorrectTemplateUsed('common/home.html')", "async def test_setup_duplicate_resource_template(hass: HomeAssistant) -> None:\n respx.get(\"http://localhost\") % HTTPStatus.OK\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"resource_template\": \"http://localhost\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0", "def test_create_activity_template(self):\n pass", "def test_get_subscription_template(self):\n pass", "def test_template(self):\n self.assertTemplateUsed(self.response, 'formularios.html')", "def _assertPageTemplatesUsed(self, response):\n self.assertGSoCTemplatesUsed(response)\n self.assertTemplateUsed(response, 'modules/gsoc/participants/base.html')\n self.assertTemplateUsed(\n response, 'modules/gsoc/participants/_mentors_list.html')\n self.assertTemplateUsed(response, 'soc/list/lists.html')\n self.assertTemplateUsed(response, 'soc/list/list.html')", "def test_search_template(self):\n self.assertTemplateUsed(self.response, 'rango/search.html', f\"{FAILURE_HEADER}Your search() view does not use the expected template.{FAILURE_FOOTER}\")", "def test_team_template_folders_post(self):\n pass", "def test_create_device_template(self):\n pass", "def test_create_template_for_all_namespaces(self):\n pass", "def test_template(self):\n # Setup\n # Test\n response = self.client.get(self.WIZARD_URL)\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"publish/feed_form.html\")", "def test_template(self):\n # Setup\n # Test\n response = self.client.get(self.WIZARD_URL)\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"publish/feed_form.html\")", "def test_get_subscription_templates(self):\n pass", "def test_dashboards_v2_share(self):\n pass", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")" ]
[ "0.81283075", "0.79852206", "0.784368", "0.78152233", "0.70685333", "0.70141304", "0.6897447", "0.6868497", "0.6802155", "0.66807735", "0.66322416", "0.65081495", "0.64922345", "0.64888346", "0.64753515", "0.6442192", "0.6432612", "0.6390854", "0.63531464", "0.63472337", "0.63016856", "0.6249711", "0.62309533", "0.6218897", "0.6214042", "0.6210229", "0.6210229", "0.6202925", "0.618293", "0.61456853" ]
0.94731945
0
Test case for submit_asset_to_submission_service
def test_submit_asset_to_submission_service(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_submission_service(self):\n pass", "def test_submit_report(self):\n from .test.encrypted_blob_generator import ReportEncryptor\n url = submit_report_ep;\n test_post_sample_submission = copy.copy(self.post_sample_submission)\n test_post_sample_submission['encrypted_blob']=ReportEncryptor('reportivist_rest/test/server_encryption_key').encrypt_report(json.dumps(self.sample_report))\n resp = self.client.post(url, test_post_sample_submission)\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_add_submission_service_to_project(self):\n pass", "def submit(self, script, **kwargs):\n raise NotImplementedError()", "def test_submit_attachment(self):\n from .test.encrypted_blob_generator import ReportEncryptor\n url = submit_attachment_ep\n\n #now we need to turn the attachment data to binary to simulate real\n #app condition\n # binary_submission = bytearray(unhexlify(hex(self.sample_attachment['attachment_id'])[2:-1])) + bytearray(unhexlify(hex(self.sample_attachment['client_id'])[2:-1])) + bytearray(unhexlify(hex(self.sample_attachment['report_id'])[2:-1])) + bytearray(self.sample_attachment['time-stamp']) + bytearray(self.sample_attachment['attachment_type'] + chr(0)*(8-len(self.sample_attachment['attachment_type'])))\n\n #first we need to decode the base64 as it is just facilitating the submission.\n # binary_submission += bytearray(base64.decodestring(self.sample_attachment['attachment_data']))\n\n #encrypting the attechement\n from Crypto import Random\n from Crypto.Cipher import AES\n from Crypto.Util import Counter\n import binascii\n\n iv = Random.new().read(16)\n enc_key = Random.new().read(32)\n test_sample_attachment = copy.copy(self.sample_attachment)\n test_sample_attachment['encryption_key'] = base64.encodestring(enc_key)\n test_sample_attachment['encryption_iv'] = base64.encodestring(iv)\n\n #we have all meta data ready now\n test_post_sample_submission = copy.copy(self.post_sample_submission)\n test_post_sample_submission['encrypted_blob'] = ReportEncryptor('reportivist_rest/test/server_encryption_key').encrypt_report(json.dumps(test_sample_attachment))\n\n encryptor = AES.new(enc_key, AES.MODE_CTR, counter=Counter.new(128, initial_value=int(binascii.hexlify(iv), 16)))\n\n with open(self.sample_attachment_file, \"rb\") as data_file:\n with open(self.sample_attachment_file + \".enc\", \"w\") as encrypted_file:\n encrypted_file.write(encryptor.encrypt(data_file.read()))\n \n with open(self.sample_attachment_file + \".enc\") as encrypted_data:\n test_post_sample_submission['attachment_data'] = encrypted_data \n\n resp = self.client.post(url, test_post_sample_submission)\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)", "def test_submit_for_endorsement(self):", "def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')", "def test_get_submission(self):\n # creating a submission\n sub_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n # getting it from the service\n get_response = get_submissions(self, self.token)\n response_data = json.loads(get_response.data.decode())\n self.assertTrue(response_data['data'][0]['text_count']==2)\n self.assertTrue(isinstance(response_data['data'][0]['texts'], list))", "def test_update_asset(self):\n pass", "def test_file_submission(self):\n response = self.call_file_submission()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.headers.get(\"Content-Type\"), \"application/json\")\n\n json = response.json\n self.assertIn(\"test1.csv\", json[\"appropriations_key\"])\n self.assertIn(\"test2.csv\", json[\"award_financial_key\"])\n self.assertIn(CONFIG_BROKER[\"award_file_name\"], json[\"award_key\"])\n self.assertIn(\"test4.csv\", json[\"program_activity_key\"])\n self.assertIn(\"credentials\", json)\n\n credentials = json[\"credentials\"]\n for requiredField in (\"AccessKeyId\", \"SecretAccessKey\", \"SessionToken\", \"SessionToken\"):\n self.assertIn(requiredField, credentials)\n self.assertTrue(len(credentials[requiredField]))\n\n self.assertIn(\"bucket_name\", json)\n self.assertTrue(len(json[\"bucket_name\"]))\n\n file_results = self.upload_file_by_url(\"/\" + json[\"appropriations_key\"], \"test1.csv\")\n self.assertGreater(file_results['bytesWritten'], 0)\n\n # Test that job ids are returned\n response_dict = json\n file_keys = [\"program_activity\", \"award_financial\", \"appropriations\"]\n with create_app().app_context():\n sess = GlobalDB.db().session\n for key in file_keys:\n id_key = '{}_id'.format(key)\n self.assertIn(id_key, response_dict)\n job_id = response_dict[id_key]\n self.assertIsInstance(job_id, int)\n # Check that original filenames were stored in DB\n original_filename = sess.query(Job).filter(Job.job_id == job_id).one().original_filename\n self.assertEquals(original_filename, self.filenames[key])\n # check that submission got mapped to the correct user\n submission_id = response_dict[\"submission_id\"]\n self.file_submission_id = submission_id\n submission = sess.query(Submission).filter(Submission.submission_id == submission_id).one()\n self.assertEqual(submission.user_id, self.submission_user_id)\n # Check that new submission is unpublished\n self.assertEqual(submission.publish_status_id, PUBLISH_STATUS_DICT['unpublished'])\n\n # Call upload complete route\n finalize_response = self.check_upload_complete(response_dict[\"appropriations_id\"])\n self.assertEqual(finalize_response.status_code, 200)", "def submit(id, host):", "def test_update_test_asset(self):\n pass", "def _submitInstance( self, imageName, workDir ):\n return S_OK()", "def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...", "def test_submissions(self):\r\n # Basic case, things go well.\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"submitted\")\r\n\r\n # We post, but Software Secure doesn't like what we send for some reason\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_error):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")\r\n\r\n # We try to post, but run into an error (in this case a newtork connection error)\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_unavailable):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")", "def test_add_asset_share_feed(self):\n pass", "def submit(self, content):\n pass", "def test_update_submission(self):\n self.call_file_submission()\n # note: this is a quarterly test submission, so\n # updated dates must still reflect a quarter\n if CONFIG_BROKER[\"use_aws\"]:\n update_json = {\"existing_submission_id\": self.updateSubmissionId,\n \"award_financial\": \"updated.csv\",\n \"reporting_period_start_date\": \"04/2016\",\n \"reporting_period_end_date\": \"06/2016\"}\n else:\n # If local must use full destination path\n file_path = CONFIG_BROKER[\"broker_files\"]\n update_json = {\"existing_submission_id\": self.updateSubmissionId,\n \"award_financial\": os.path.join(file_path, \"updated.csv\"),\n \"reporting_period_start_date\": \"04/2016\",\n \"reporting_period_end_date\": \"06/2016\"}\n # Mark submission as published\n with create_app().app_context():\n sess = GlobalDB.db().session\n update_submission = sess.query(Submission).filter(Submission.submission_id == self.updateSubmissionId).one()\n update_submission.publish_status_id = PUBLISH_STATUS_DICT['published']\n sess.commit()\n update_response = self.app.post_json(\"/v1/submit_files/\", update_json,\n headers={\"x-session-id\": self.session_id})\n self.assertEqual(update_response.status_code, 200)\n self.assertEqual(update_response.headers.get(\"Content-Type\"), \"application/json\")\n\n json = update_response.json\n self.assertIn(\"updated.csv\", json[\"award_financial_key\"])\n submission_id = json[\"submission_id\"]\n submission = sess.query(Submission).filter(Submission.submission_id == submission_id).one()\n self.assertEqual(submission.cgac_code, \"SYS\") # Should not have changed agency name\n self.assertEqual(submission.reporting_start_date.strftime(\"%m/%Y\"), \"04/2016\")\n self.assertEqual(submission.reporting_end_date.strftime(\"%m/%Y\"), \"06/2016\")\n self.assertEqual(submission.publish_status_id, PUBLISH_STATUS_DICT['updated'])", "def test_import_test_asset(self):\n pass", "def test_get_test_asset(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_create_system_asset(self):\n pass", "def submit(self):\n raise NotImplementedError()", "def test_import_software_asset(self):\n pass", "async def transfer_asset(request):\n \n required_fields = ['label', 'source', 'target' , 'amount' ,'resource' ]\n common.validate_fields(required_fields, request.json)\n\n transfer = _create_transfer_dict(request)\n sender = _create_transfer_participant(request.json, transfer)\n signer = await common.get_signer(request)\n\n # print(\"transfer =======> \", transfer)\n # print(\"sender =========> \", sender)\n\n batches, batch_id = transaction_creation.transfer_asset(\n txn_key = signer,\n batch_key = request.app.config.SIGNER,\n identifier = transfer['id'],\n label = transfer.get('label'),\n sender = sender,\n amount = transfer['amount'])\n\n # print(\"batches =========> \", batches)\n\n await messaging.send(\n request.app.config.VAL_CONN,\n request.app.config.TIMEOUT,\n batches)\n\n await messaging.check_batch_status(request.app.config.VAL_CONN, batch_id)\n\n return response.json({\"transfer\" : \"asad\"})", "def test_update_test_asset_content(self):\n pass", "def upload_asset(ctx, asset, release):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Uploading {} to release {}...'\n .format(os.path.basename(asset), release), break_line=False)\n asset_url = gh.upload_asset(asset=asset, release=release)\n log.checkmark()\n log.echo('Uploaded asset: {}'.format(asset_url))\n return asset_url\n except BaseException as _:\n log.xmark()\n raise", "def test_upload_job_description_file_post(self):\n pass", "def _call_submit(submit_cmd, script, pretend):\n submit_cmd_string = \" \".join(submit_cmd)\n if pretend:\n print(f\"# Submit command: {submit_cmd_string}\")\n print(script)\n print()\n else:\n with tempfile.NamedTemporaryFile() as tmp_submit_script:\n tmp_submit_script.write(str(script).encode(\"utf-8\"))\n tmp_submit_script.flush()\n submit_cmd.append(tmp_submit_script.name)\n try:\n subprocess.check_output(submit_cmd, stderr=subprocess.STDOUT, text=True)\n except subprocess.CalledProcessError as error:\n raise SubmitError(\n f\"Error when calling submission command {submit_cmd_string}:\\n{error.output}\"\n )\n\n return True", "def submission():\n\n # @ToDo: Something better than this crude check\n if not auth.s3_logged_in():\n auth.permission.fail()\n\n from io import StringIO\n import cgi\n from lxml import etree\n\n source = request.post_vars.get(\"xml_submission_file\", None)\n if isinstance(source, cgi.FieldStorage):\n if source.filename:\n xmlinput = source.file\n else:\n xmlinput = source.value\n\n if isinstance(xmlinput, str):\n xmlinput = StringIO(xmlinput)\n elif request.env.request_method == \"HEAD\":\n raise HTTP(204)\n else:\n raise HTTP(400, \"Invalid Request: Expected an XForm\")\n\n tree = etree.parse(xmlinput)\n tablename = tree.getroot().tag\n\n resource = s3db.resource(tablename)\n\n stylesheet = os.path.join(request.folder, \"static\", \"formats\", \"odk\",\n \"import.xsl\")\n\n try:\n result = resource.import_xml(source=tree, stylesheet=stylesheet)\n except (IOError, SyntaxError):\n raise HTTP(500, \"Internal server error\")\n\n # Parse response\n status = json.loads(result)[\"statuscode\"]\n\n if status == \"200\":\n r = HTTP(201, \"Saved\") # ODK Collect only accepts 201\n r.headers[\"Location\"] = request.env.http_host\n raise r\n else:\n raise HTTP(status, result)" ]
[ "0.6690172", "0.66271394", "0.6500486", "0.64511997", "0.63678914", "0.61843795", "0.6177509", "0.6070321", "0.60634094", "0.6015269", "0.6006148", "0.597104", "0.59475404", "0.59474057", "0.59460247", "0.5938888", "0.5933266", "0.59007806", "0.58934855", "0.5891681", "0.5877461", "0.58623946", "0.58580023", "0.58267134", "0.582569", "0.5823176", "0.5818983", "0.58030623", "0.57913417", "0.57567376" ]
0.9587426
0
Test case for unassign_managing_team
def test_unassign_managing_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_team_manager_from_team(self):\n pass", "def test_handle_unassign_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n with self.app.app_context():\r\n resp, code = \\\r\n self.testcommand.handle(\"project unassign 1\",\r\n user)\r\n assert (resp, code) == (\"Project successfully unassigned!\", 200)", "def test_assign_managing_team(self):\n pass", "def test_teams_remove_user_from_team_v2(self):\n pass", "def test_delete_team(self):\n pass", "def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_delete_team_member(self):\n pass", "def test_teams_remove_user_from_team_v1(self):\n pass", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def test_update_team(self):\n pass", "def test_handle_unassign_as_admin(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n with self.app.app_context():\r\n resp, code = \\\r\n self.testcommand.handle(\"project unassign 1\",\r\n user)\r\n assert (resp, code) == (\"Project successfully unassigned!\", 200)", "def test_captain_removes_teammate_success(self):\n team = Team.create(name='foo', program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n captain = User.create(name='captain', email='[email protected]',\n user_type='user', owned_teams=[team.uid])\n team.captain_id = captain.uid\n user.put()\n captain.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(captain),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def test_teams_delete_team_v1(self):\n pass", "def test_update_team_state(self):\n pass", "def test_handle_unassign_team_lookup_error(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n else:\r\n raise LookupError(\"team lookup error\")\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(self.testcommand.handle(\"project unassign ID\",\r\n user),\r\n (\"team lookup error\", 200))", "def check_existing_teams(user, teams_from_lms):\n teams = user.teams.all()\n for team in teams:\n if team not in teams_from_lms:\n user.teams.remove(team)", "def test_teacher_reset_result_with_arg_positive():\n expected = []\n Teacher.reset_results(oop_hw)\n assert Teacher.homework_done[oop_hw] == expected", "def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_remove_learner_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def test_remove_learner_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def test_retrieve_team(self):\n pass", "def test_remove_project_member(self):\n pass", "def test_handle_unassign_permission_error(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n return Team(\"GTID\", \"team-name\", \"display-name\")\r\n else:\r\n return User(user)\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project unassign 1\",\r\n user),\r\n (self.testcommand.permission_error, 200))", "def test_kyc_delete_legal_board_member(self):\n pass", "def on_unassign(self):", "def test_handle_remove(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user,\n test_user, other_user]\n self.db.query.return_value = [team]\n team_attach = [team.get_attachment()]\n with self.app.app_context():\n self.testcommand.handle(\"team add brs ID\", user)\n resp, code = self.testcommand.handle(\"team remove brs ID\", user)\n expect = {'attachments': team_attach,\n 'text': 'Removed ' 'User from brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n self.gh.remove_team_member.assert_called_once_with(\"myuser\",\n \"githubid\")", "def test_dropping_nochange(self):\r\n self.dropping_setup()\r\n self.dropping_homework_stage1()\r\n self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})\r\n\r\n self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])\r\n self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])\r\n self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])\r\n self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters\r\n self.check_grade_percent(0.75)", "def test_teams_remove_customer_from_workgroup_v1(self):\n pass", "def test_remove_supervisor_and_projects(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n supervisor = game.supervisors[0]\n projects = supervisor.projects\n\n game._remove_player(supervisor, \"supervisors\")\n assert supervisor not in game.supervisors\n assert all(project not in game.projects for project in projects)" ]
[ "0.74560046", "0.7446941", "0.71779084", "0.6980405", "0.6945563", "0.683937", "0.68131936", "0.680543", "0.666881", "0.65452445", "0.65035367", "0.64305186", "0.63904566", "0.6305872", "0.6301154", "0.6292395", "0.6246176", "0.6204388", "0.6099144", "0.6093116", "0.6054029", "0.6026529", "0.5996483", "0.5960738", "0.5953592", "0.5942005", "0.58858514", "0.58823264", "0.5853902", "0.5846066" ]
0.9542753
0
Test case for unregister_template
def test_unregister_template(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unshare_template_registration(self):\n pass", "def test_list_unregistered_templates(self):\n pass", "def test_delete_template_subscription(self):\n pass", "def test_delete_namespaced_template(self):\n pass", "def test_register_template(self):\n pass", "def test_update_template_registration(self):\n pass", "def test_delete_subscription_template(self):\n pass", "def test_retrieve_template_registration(self):\n pass", "def test_delete_device_template(self):\n pass", "def test_share_template_registration(self):\n pass", "def delete_custom_template(self, name, filename, context):\n pass", "def test_list_template_registrations(self):\n pass", "def test_update_template_subscription(self):\n pass", "def test_hook_unregister(self):\n self.assertEqual(list(self.registry), [])\n item = self.DummyItem(123)\n self.hook_cls(self.extension, item)\n\n self.extension.shutdown()\n self.assertEqual(list(self.registry), [])", "def test_get_template_subscription(self):\n pass", "def pre_service_template_delete(self, resource_id):\n pass", "def test_replace_namespaced_template(self):\n pass", "def test_unsubscribe_instructions(self):\n w = watch(save=True)\n template = Template('{% load unsubscribe_instructions %}'\n '{% unsubscribe_instructions watch %}')\n assert w.unsubscribe_url() in template.render(Context({'watch': w}))", "def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)", "def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)", "def unregister(self):\r\n self._unregister()", "def delete_template(self):\n try:\n os.remove(self.path)\n except Exception:\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def unregister(target: str) -> bool:\n ...", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_patch_namespaced_template(self):\n pass", "def test_delete_activity_template(self):\n pass", "def deregister_specialization(self, t):\n t = self.canon(t)\n self.cython_ctypes.pop(t, None)\n self.cython_cytypes.pop(t, None)\n self.cython_pytypes.pop(t, None)\n self.cython_cimports.pop(t, None)\n self.cython_cyimports.pop(t, None)\n self.cython_pyimports.pop(t, None)\n self.clearmemo()", "def delete_template():\n posted_json = request.get_json(force=True)\n try:\n name = posted_json['template_name']\n except KeyError:\n print(\"Not all required keys are present!\")\n r = jsonify(message=\"Not all required keys for add template are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n if bootstrapper_utils.delete_template(name):\n return jsonify(success=True, message='Deleted Template Successfully', status_code=200)\n else:\n r = jsonify(success=False, message='Could not delete template', status_code=500)\n r.status_code = 500\n return r", "def test_list_template_subscriptions(self):\n pass" ]
[ "0.8266894", "0.75216526", "0.72108436", "0.70623195", "0.7020946", "0.689085", "0.6745585", "0.6614777", "0.6574184", "0.652842", "0.6404528", "0.62667716", "0.6230103", "0.61180145", "0.60583645", "0.60513514", "0.60434884", "0.60297287", "0.59873724", "0.59873724", "0.59415257", "0.5922125", "0.5898897", "0.58974147", "0.5897171", "0.5859955", "0.58572435", "0.58230716", "0.5732441", "0.5711032" ]
0.9313844
0
Test case for unshare_template_registration
def test_unshare_template_registration(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unregister_template(self):\n pass", "def test_share_template_registration(self):\n pass", "def test_list_unregistered_templates(self):\n pass", "def test_delete_template_subscription(self):\n pass", "def test_update_template_registration(self):\n pass", "def test_delete_namespaced_template(self):\n pass", "def test_delete_subscription_template(self):\n pass", "def test_retrieve_template_registration(self):\n pass", "def test_register_template(self):\n pass", "def test_delete_device_template(self):\n pass", "def test_list_template_registrations(self):\n pass", "def test_update_template_subscription(self):\n pass", "def test_delete_activity_template(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_unshare(self):\n\n # In the actual test, we'll want to confirm that a shared IP\n # address can be unshared from a group\n\n # Try to unshare from the group--fails for now (operation not\n # implemented in nova); note: change '10.0.0.1' to IP\n dtutil.assert_raises(novaclient.OpenStackException,\n self.server.unshare_ip, '10.0.0.1')", "def test_unsubscribe_instructions(self):\n w = watch(save=True)\n template = Template('{% load unsubscribe_instructions %}'\n '{% unsubscribe_instructions watch %}')\n assert w.unsubscribe_url() in template.render(Context({'watch': w}))", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def test_dashboards_v2_delete_share(self):\n pass", "def test_destroy_nas_share(self):\n pass", "def test_template_feedback(self):\r\n pass", "def test_replace_namespaced_template(self):\n pass", "def test_destroy_nas_share_by_pool(self):\n pass", "def test_list_template_subscriptions(self):\n pass", "def test_destroy_nas_share_by_nas(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_get_subscription_templates(self):\n pass", "def test_update_template_profile_for_system_module(self):\n pass", "def test_delete_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.delete(self.fixture.global_template, request=mock_request)", "def test_delete_collection_namespaced_template(self):\n pass", "def test_create_subscription_template(self):\n pass" ]
[ "0.8519667", "0.79101807", "0.73495406", "0.70867145", "0.69696724", "0.68918645", "0.68361384", "0.68302506", "0.6418135", "0.6403143", "0.62631977", "0.6220697", "0.62183076", "0.620692", "0.6064587", "0.60095567", "0.5959378", "0.5951477", "0.59085053", "0.58501303", "0.5821308", "0.5780138", "0.57609105", "0.575093", "0.57344514", "0.5730691", "0.57289773", "0.5724285", "0.5712186", "0.5698126" ]
0.9629609
0
Test case for update_asset
def test_update_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_test_asset(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update_test_asset_impact_level(self):\n pass", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_update_software_asset_install_script(self):\n pass", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def update_asset(cls, id, asset_data):\n\n return ph_base._update_record('asset', id, asset_data)", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_update_scenario(self):\n pass", "def _update_asset(request, course_key, asset_key):\r\n if request.method == 'DELETE':\r\n # Make sure the item to delete actually exists.\r\n try:\r\n content = contentstore().find(asset_key)\r\n except NotFoundError:\r\n return JsonResponse(status=404)\r\n\r\n # ok, save the content into the trashcan\r\n contentstore('trashcan').save(content)\r\n\r\n # see if there is a thumbnail as well, if so move that as well\r\n if content.thumbnail_location is not None:\r\n # We are ignoring the value of the thumbnail_location-- we only care whether\r\n # or not a thumbnail has been stored, and we can now easily create the correct path.\r\n thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.name)\r\n try:\r\n thumbnail_content = contentstore().find(thumbnail_location)\r\n contentstore('trashcan').save(thumbnail_content)\r\n # hard delete thumbnail from origin\r\n contentstore().delete(thumbnail_content.get_id())\r\n # remove from any caching\r\n del_cached_content(thumbnail_location)\r\n except:\r\n logging.warning('Could not delete thumbnail: %s', thumbnail_location)\r\n\r\n # delete the original\r\n contentstore().delete(content.get_id())\r\n # remove from cache\r\n del_cached_content(content.location)\r\n return JsonResponse()\r\n\r\n elif request.method in ('PUT', 'POST'):\r\n if 'file' in request.FILES:\r\n return _upload_asset(request, course_key)\r\n else:\r\n # Update existing asset\r\n try:\r\n modified_asset = json.loads(request.body)\r\n except ValueError:\r\n return HttpResponseBadRequest()\r\n contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])\r\n # Delete the asset from the cache so we check the lock status the next time it is requested.\r\n del_cached_content(asset_key)\r\n return JsonResponse(modified_asset, status=201)", "def test_update_software_asset_impact_level(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_delete_asset(self):\n pass", "def test_update_case(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_get_test_asset(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_inventory(self):\n pass", "def test_add_new_asset(self):\n self.assertEqual(self.all_assets.count(), 1)\n new_asset = Asset(asset_code=\"IC002\",\n serial_number=\"SN0045\",\n model_number=self.test_assetmodel,\n assigned_to=self.user)\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 2)", "def test_update_activity(self):\n pass" ]
[ "0.9143298", "0.8754931", "0.85765153", "0.84248626", "0.8293034", "0.82076514", "0.79654044", "0.7586861", "0.75134826", "0.74644655", "0.73476195", "0.7059753", "0.70486987", "0.69465137", "0.6925012", "0.6866125", "0.6836165", "0.6777377", "0.67356104", "0.6722325", "0.6722325", "0.6722325", "0.6715625", "0.6690457", "0.6636734", "0.6590392", "0.65171814", "0.6511909", "0.6496429", "0.6444699" ]
0.93774635
0
Test case for update_asset_content
def test_update_asset_content(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_test_asset_content(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update_software_asset_bundle(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def _update_asset(request, course_key, asset_key):\r\n if request.method == 'DELETE':\r\n # Make sure the item to delete actually exists.\r\n try:\r\n content = contentstore().find(asset_key)\r\n except NotFoundError:\r\n return JsonResponse(status=404)\r\n\r\n # ok, save the content into the trashcan\r\n contentstore('trashcan').save(content)\r\n\r\n # see if there is a thumbnail as well, if so move that as well\r\n if content.thumbnail_location is not None:\r\n # We are ignoring the value of the thumbnail_location-- we only care whether\r\n # or not a thumbnail has been stored, and we can now easily create the correct path.\r\n thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.name)\r\n try:\r\n thumbnail_content = contentstore().find(thumbnail_location)\r\n contentstore('trashcan').save(thumbnail_content)\r\n # hard delete thumbnail from origin\r\n contentstore().delete(thumbnail_content.get_id())\r\n # remove from any caching\r\n del_cached_content(thumbnail_location)\r\n except:\r\n logging.warning('Could not delete thumbnail: %s', thumbnail_location)\r\n\r\n # delete the original\r\n contentstore().delete(content.get_id())\r\n # remove from cache\r\n del_cached_content(content.location)\r\n return JsonResponse()\r\n\r\n elif request.method in ('PUT', 'POST'):\r\n if 'file' in request.FILES:\r\n return _upload_asset(request, course_key)\r\n else:\r\n # Update existing asset\r\n try:\r\n modified_asset = json.loads(request.body)\r\n except ValueError:\r\n return HttpResponseBadRequest()\r\n contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])\r\n # Delete the asset from the cache so we check the lock status the next time it is requested.\r\n del_cached_content(asset_key)\r\n return JsonResponse(modified_asset, status=201)", "def updateContent(content, **kwargs):", "def test_update_software_asset_install_script(self):\n pass", "def test_update_content_no_file(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(Content.objects.count(), 1)\n content = Content.objects.first()\n last_uploaded_time = content.last_uploaded_time\n updated_data = {\n 'name': 'Updated Content Name',\n 'description': 'New description'\n }\n url = reverse('content-detail', args=[content.pk])\n response = self.client.patch(url, updated_data, format='json')\n content = Content.objects.first()\n self.assertEqual(last_uploaded_time, content.last_uploaded_time)", "def test_update_test_asset_impact_level(self):\n pass", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def test_get_test_asset(self):\n pass", "def update_asset(cls, id, asset_data):\n\n return ph_base._update_record('asset', id, asset_data)", "def test_delete_asset(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_submit_asset_to_submission_service(self):\n pass", "def test_locking(self):\r\n def verify_asset_locked_state(locked):\r\n \"\"\" Helper method to verify lock state in the contentstore \"\"\"\r\n asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')\r\n content = contentstore().find(asset_location)\r\n self.assertEqual(content.locked, locked)\r\n\r\n def post_asset_update(lock, course):\r\n \"\"\" Helper method for posting asset update. \"\"\"\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)\r\n\r\n # Load the toy course.\r\n module_store = modulestore('direct')\r\n _, course_items = import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['toy'],\r\n static_content_store=contentstore(),\r\n verbose=True\r\n )\r\n course = course_items[0]\r\n verify_asset_locked_state(False)\r\n\r\n # Lock the asset\r\n resp_asset = post_asset_update(True, course)\r\n self.assertTrue(resp_asset['locked'])\r\n verify_asset_locked_state(True)\r\n\r\n # Unlock the asset\r\n resp_asset = post_asset_update(False, course)\r\n self.assertFalse(resp_asset['locked'])\r\n verify_asset_locked_state(False)", "def test_import_test_asset(self):\n pass", "def test_auto_add_assets_to_story(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_update_metadata(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)" ]
[ "0.9216964", "0.8613375", "0.85233027", "0.8422238", "0.77063775", "0.7631445", "0.7557946", "0.7261228", "0.71722364", "0.7138799", "0.70724344", "0.6819154", "0.666328", "0.6620676", "0.65834427", "0.6485968", "0.647259", "0.6469849", "0.64016527", "0.6347597", "0.633786", "0.63143396", "0.62497246", "0.6186101", "0.6152522", "0.6126889", "0.60902333", "0.60521495", "0.60269403", "0.60159445" ]
0.94159484
0
Test case for update_asset_state
def test_update_asset_state(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_asset(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_state(self):\n pass", "def test_update_state3(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_team_state(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_scenario(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_ensure_state_change_if_needed(self, setState, commit):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('NEW_FILES')\n setState.assert_called()", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update_case(self):\n pass", "def test_update_inventory(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_activity(self):\n pass", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)" ]
[ "0.80782825", "0.78780204", "0.7578399", "0.742401", "0.74090177", "0.7325714", "0.7273254", "0.7257175", "0.72393346", "0.7206092", "0.71312016", "0.7070647", "0.6897969", "0.674873", "0.66741043", "0.66584134", "0.6597938", "0.6551835", "0.65515196", "0.64148074", "0.63831306", "0.6345444", "0.63438547", "0.6280135", "0.62469465", "0.6231626", "0.6231626", "0.6231626", "0.6154836", "0.6119475" ]
0.9406183
0
Test case for update_asset_visibility_query
def test_update_asset_visibility_query(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query(self):\n pass", "def test_update_visibility_query2(self):\n pass", "def test_update_visibility_query3(self):\n pass", "def test_update_visibility_query1(self):\n pass", "def test_update_visibility_query4(self):\n pass", "def test_update_deployment_visibility_query(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def artifact_status_put_req(artifact_id, user_id, visibility):\n if visibility not in get_visibilities():\n return {'status': 'error',\n 'message': 'Unknown visibility value: %s' % visibility}\n\n pd = Artifact(int(artifact_id))\n access_error = check_access(pd.study.id, user_id)\n if access_error:\n return access_error\n user = User(str(user_id))\n status = 'success'\n msg = 'Artifact visibility changed to %s' % visibility\n # Set the approval to private if needs approval and admin\n if visibility == 'private':\n if not qiita_config.require_approval:\n pd.visibility = 'private'\n # Set the approval to private if approval not required\n elif user.level == 'admin':\n pd.visibility = 'private'\n # Trying to set approval without admin privileges\n else:\n status = 'error'\n msg = 'User does not have permissions to approve change'\n else:\n pd.visibility = visibility\n\n return {'status': status,\n 'message': msg}", "def glance_update_and_set_public(glance, image, image_info):\n image_properties = image_info['image_properties']\n try:\n logger.debug(\"glance image update: properties=%s\", image_properties)\n glance.images.update(image.id, **image_properties)\n logger.debug(\"glance image update: visibility=public\")\n glance.images.update(image.id, visibility='public')\n except Exception:\n logger.exception(\"Updating (-> public) Glance image '%s' [%s] failed\", image.name, image.id)\n return 1\n\n return 0", "def test_update_system_asset(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update_software_asset(self):\n pass", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_ipam_vlans_update(self):\n pass", "def do_visible(self, exp_res, img_owner, img_public, **kwargs):\n\n img = FakeImage(img_owner, img_public)\n ctx = context.RequestContext(**kwargs)\n\n self.assertEqual(ctx.is_image_visible(img), exp_res)", "def test_ipam_vlans_partial_update(self):\n pass", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "def test_query_filter_field(self):\n obj = self.provision_single_asset()\n # TODO: Write a positive test for this\n ret = self.get('widget', 200,\n params={'__filter': [\n {'field': 'created_at', 'name': 'name', 'op': 'eq'}]})\n assert len(ret['objects']) == 0", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def test_ocean_assets_search(publisher_ocean_instance, metadata):\n identifier = str(uuid.uuid1()).replace(\"-\", \"\")\n metadata_copy = metadata.copy()\n metadata_copy[\"main\"][\"name\"] = identifier\n assert len(publisher_ocean_instance.assets.search(identifier)) == 0\n\n publisher = get_publisher_wallet()\n ddo = publisher_ocean_instance.assets.create(metadata_copy, publisher)\n wait_for_ddo(publisher_ocean_instance, ddo.did)\n time.sleep(1) # apparently changes are not instantaneous\n assert len(publisher_ocean_instance.assets.search(identifier)) == 1\n assert len(publisher_ocean_instance.assets.search(\"Gorilla\")) == 0", "def test_call_update_if_organization(self):\n Project.can_access.return_value = False\n self.mixin.check_can_access.when\\\n .called_with(MagicMock(user=self.user))\\\n .should.throw(PermissionDenied)\n Project.objects.update_user_projects.asset_called_once_with(\n self.user,\n )" ]
[ "0.8186437", "0.81362695", "0.81230724", "0.8075404", "0.80016315", "0.7795965", "0.6261526", "0.6221363", "0.61021906", "0.60756254", "0.6004977", "0.59099925", "0.5834948", "0.5724236", "0.5715103", "0.562968", "0.5584751", "0.54838", "0.54498", "0.54482436", "0.5418662", "0.53590506", "0.53426754", "0.5267116", "0.5224288", "0.5187647", "0.51703244", "0.5155956", "0.51370066", "0.5135691" ]
0.9468239
0
Test case for update_category
def test_update_category(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_category(self):\n self.update_success(self.test_data['pants'], self.test_data['shirts'])", "def test_category_update(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.put('/api/v2/categories/1',\n data=json.dumps(self.pdata),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Updated!')\n self.assertEqual(res.status_code, 200)", "def test_update_category(self):\n category = sample_category()\n url = category_details_url(category.id)\n self.client.put(url, {\"name\": \"school\"})\n category.refresh_from_db()\n self.assertEqual(category.name, 'school')", "def test_edit_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)", "def test_update(self, init_db, category):\n category_name = fake.alphanumeric()\n category.update(name=category_name)\n assert category.name == category_name", "def test_edit_category(self):\n response = self.client.put('/api/v1/category/1',\n data=json.dumps(category[3]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 201)\n self.assertIn('Apparels', str(response.data))", "def test_edit_category(self):\n rv = self.client().post(\n '/categories/',\n data={'category_name': 'Sauces'})\n self.assertEqual(rv.status_code, 201)\n rv = self.client().put(\n '/categories/1',\n data={\n \"name\": \"Soups and Sauces\"\n })\n #self.assertEqual(rv.status_code, 200)\n results = self.client().get('/categories/1')\n #self.assertIn('Soups and', str(results.data))", "def test_update_preferences_by_category(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def test_edit_recipe_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n rv = self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n self.assertIn(b'Recipe successfully updated', rv.data)", "def test_update_child_category(self):\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')", "def test_update_category_to_existing_name(self):\n sample_category()\n category = sample_category(name='House')\n url = category_details_url(category.id)\n res = self.client.put(url, {\"name\": \"place\"})\n\n category.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')", "def test_put_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n put_data = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.category.refresh_from_db()\n model_dict = model_to_dict(self.category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.category.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': False,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_update_category(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n self.tester.update_category('country', {\"foo\": \"bar\"})\n\n with self.assertRaises(QiitaDBColumnError):\n self.tester.update_category('missing column',\n {'1.SKM7.640188': 'stuff'})\n\n negtest = self.tester['1.SKM7.640188']['country']\n\n mapping = {'1.SKB1.640202': \"1\",\n '1.SKB5.640181': \"2\",\n '1.SKD6.640190': \"3\"}\n\n self.tester.update_category('country', mapping)\n\n self.assertEqual(self.tester['1.SKB1.640202']['country'], \"1\")\n self.assertEqual(self.tester['1.SKB5.640181']['country'], \"2\")\n self.assertEqual(self.tester['1.SKD6.640190']['country'], \"3\")\n self.assertEqual(self.tester['1.SKM7.640188']['country'], negtest)\n\n # test updating a required_sample_info\n mapping = {'1.SKB1.640202': \"1\",\n '1.SKB5.640181': \"2\",\n '1.SKD6.640190': \"3\"}\n self.tester.update_category('required_sample_info_status_id', mapping)\n self.assertEqual(\n self.tester['1.SKB1.640202']['required_sample_info_status'],\n \"received\")\n self.assertEqual(\n self.tester['1.SKB5.640181']['required_sample_info_status'],\n \"in_preparation\")\n self.assertEqual(\n self.tester['1.SKD6.640190']['required_sample_info_status'],\n \"running\")\n self.assertEqual(\n self.tester['1.SKM7.640188']['required_sample_info_status'],\n \"completed\")\n\n # testing that if fails when trying to change an int column value\n # to str\n st = SampleTemplate.create(self.metadata, self.new_study)\n mapping = {'2.Sample1': \"no_value\"}\n with self.assertRaises(ValueError):\n st.update_category('int_column', mapping)", "def test_patch_category(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {\n 'title': UPDATED_TITLE,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n }\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.category.refresh_from_db()\n model_dict = model_to_dict(self.category)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.category.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'full_title': UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.category.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(self.category.get_owner().user, self.user_owner_cat)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': None,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': False,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_24_admin_update_category(self):\r\n self.create()\r\n obj = db.session.query(Category).get(1)\r\n _name = obj.name\r\n category = obj.dictize()\r\n\r\n # Anonymous user GET\r\n url = '/admin/categories/update/%s' % obj.id\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n # Anonymous user POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin GET\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n # Authenticated user but not admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin GET\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Category should be listed for admin user\"\r\n assert _name in res.data, err_msg\r\n # Check 404\r\n url_404 = '/admin/categories/update/5000'\r\n res = self.app.get(url_404, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n # Admin POST\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n err_msg = \"Category should be updated\"\r\n assert \"Category updated\" in res.data, err_msg\r\n assert category['name'] in res.data, err_msg\r\n updated_category = db.session.query(Category).get(obj.id)\r\n assert updated_category.name == obj.name, err_msg\r\n # With not valid form\r\n category['name'] = None\r\n res = self.app.post(url, data=category, follow_redirects=True)\r\n assert \"Please correct the errors\" in res.data, err_msg", "def test_delete_category(self):\n pass", "def test_create_category(self):\n pass", "def test_category_mixed_on_edit(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'categories', 'onecat')\n self.submit200()\n minus = MinusRecord.objects.all()[0]\n self.assert_equal(minus.categories.count(), 1)\n self.go200('minus_edit', [self.superuser, minus.id])\n self.fv('minus_upload', 'add_category', 'yuppie')\n self.submit200()\n self.assert_equal(minus.categories.count(), 2)\n self.assert_equal(minus.categories.all()[0].name, 'onecat')\n self.assert_equal(minus.categories.all()[1].name, 'yuppie')", "def update(self, request, *args, **kwargs):\n response = super(CategoryViewSet).update(self, request, *args, *kwargs)\n response.data['message'] = \"Categoria ha sido editada\"", "def update_category(data, key):\n try:\n category = Categories.objects.get(pk=key, is_delete=False)\n except ObjectDoesNotExist:\n return Response({'status': CATEGORY_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n valid_fields = ['period_number', 'period_name', 'terms']\n correct_details = True\n for field in data:\n if field in valid_fields:\n setattr(category, field, data[field])\n else:\n correct_details = False\n break\n\n if correct_details:\n category.save()\n return Response({'status': CATEGORY_UPDATED}, status=status.HTTP_200_OK)\n return Response({'status': INVALID_FIELDS}, status=status.HTTP_400_BAD_REQUEST)", "def test_update_notification_category(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_category_name_one = 'Error 1'\n post_response_one = create_notification_category(\n client,\n new_notification_category_name_one)\n assert post_response_one.status_code == HttpStatus.created_201.value\n\n post_response_data_one = json.loads(post_response_one.get_data(as_text=True))\n new_notification_category_url = post_response_data_one['url']\n new_notification_category_name_two = 'Error 2'\n data = {'name': new_notification_category_name_two}\n patch_response = client.patch(\n new_notification_category_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS),\n data=json.dumps(data))\n assert patch_response.status_code == HttpStatus.ok_200.value\n\n get_response = client.get(\n new_notification_category_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['name'] == new_notification_category_name_two", "def apply_updates(self, updated_unit_dict, errors):\n entity_dict = {}\n self.registration().convert_json_to_entity(\n updated_unit_dict, entity_dict)\n\n key = entity_dict['category']\n description = entity_dict['description']\n visible = entity_dict['visible']\n\n if course_category.CourseCategoryDAO.get_category_by_key(key):\n course_category.CourseCategoryDAO.update_category(\n\t\tkey, description, visible, errors)\n else:\n errors.append('Category not found : %s' % key)", "def test_add_category(self):\n self.add_success(self.test_data['pants'])", "async def update_recipe_category(category: str, new_category: CategoryIn, session: Session = Depends(generate_session)):\n\n try:\n return db.categories.update(session, category, new_category.dict())\n except Exception:\n raise HTTPException(status.HTTP_400_BAD_REQUEST)", "def test_add_category_to_product(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(product_url,\n data=json.dumps(self.product_data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.post(productcategory_url,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Updated!')\n self.assertEqual(res.status_code, 200)", "def updateCategory(request, category_id):\n try:\n category = Category.objects.get(id=category_id)\n print(\"id\", category)\n except Category.DoesNotExist:\n return Response({'message': \"requested category does not exist\"}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = CategorySerializer(category)\n return Response({\"data\": serializer.data}, status=status.HTTP_200_OK)\n\n elif request.method == \"PUT\":\n serializer = CategorySerializer(category, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(\n {'data': serializer.data, \"message\": \"Data updated successfullt\", \"status\": status.HTTP_200_OK})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def test_update_case(self):\n pass", "def apply_updates(self, updated_unit_dict, errors):\n\n entity_dict = {}\n self.registration().convert_json_to_entity(\n updated_unit_dict, entity_dict)\n\n key = entity_dict['category']\n description = entity_dict['description']\n visible = entity_dict['visible']\n\n if course_category.CourseCategoryDAO.get_category_by_key(key):\n errors.append(\n 'Category %s already exists.' % key)\n else:\n course_category.CourseCategoryDAO.add_new_category(\n\t\tkey, description, visible, errors)", "def test_patch_role_finder_category(self):\n user_new = self.make_user('user_new')\n new_as = self.make_assignment(self.category, user_new, self.role_finder)\n self.assertEqual(RoleAssignment.objects.count(), 4)\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': new_as.sodar_uuid},\n )\n patch_data = {'role': PROJECT_ROLE_FINDER}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 4)\n new_as.refresh_from_db()\n self.assertEqual(new_as.role, self.role_finder)", "def test_photo_classification_view_set_post_category_update_not_allowed(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n photo_models.PhotoClassification.objects.create_or_update(name='night', classification_type='category')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'category'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n\n self.assertEquals(request.status_code, 400)\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 12)" ]
[ "0.86986804", "0.82034546", "0.8127593", "0.81214726", "0.8116833", "0.77859855", "0.7695535", "0.76880234", "0.7634107", "0.75649005", "0.75559545", "0.7522682", "0.73546076", "0.73352194", "0.7146677", "0.71418315", "0.71250314", "0.6967468", "0.6949111", "0.69295293", "0.69081557", "0.6878907", "0.6846365", "0.6843272", "0.68311155", "0.6815129", "0.6809232", "0.66597503", "0.6581628", "0.65690553" ]
0.93830985
0
Test case for update_cloud
def test_update_cloud(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_cloud(self):\n pass", "def test_update_case(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_cloud_api(tmp_path, is_cloud):\n config = mk_tmp_file(\n tmp_path, key_to_update=\"auth.is_cloud\", value_to_update=is_cloud\n )\n result = runner.invoke(app, [\"--config\", str(config), \"validate\"])\n assert result.exit_code == 0\n if is_cloud:\n assert state.confluence_instance.api_version == \"cloud\"\n else:\n assert state.confluence_instance.api_version == \"latest\"", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_delete_cloud(self):\n pass", "def test_update_bucket(self):\n pass", "def test_update_system(self):\n pass", "def test_client_update(self):\n pass", "def test_update(self):\n\n # Test that instances without application information cannot be started\n incomplete_instance = Instance(self.client, 'foo')\n with self.assertRaises(ValueError):\n incomplete_instance.update()\n\n value = self.instance.update()\n update_instance = self.client.update_instance\n update_instance.assert_called_once_with('nginx', 'nginx', 'latest',\n parameters={\n 'SETTING': 'value'\n },\n options={\n 'storageBucket': 'custom'\n })\n self.assertEqual(value, update_instance.return_value)", "def test_update_scenario(self):\n pass", "def test_update_client(self):\n pass", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_update9(self):\n pass", "def test_update_deployment(self):\n pass", "def test_update_metadata(self):\n pass", "def test_get_clouds(self):\n pass", "def test_update(app):\n\n assert False", "def test_register_cloud(self):\n pass", "def test_update(client):\n rv = update(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_kyc_put_request(self):\n pass", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def test_update_one(self):\n pass", "def test_update_metadata1(self):\n pass", "def test_update_record(self):\n pass", "def test_ipam_services_update(self):\n pass" ]
[ "0.7099971", "0.70921254", "0.7071093", "0.7071093", "0.7071093", "0.69353676", "0.6912366", "0.68711144", "0.6801207", "0.67395914", "0.6733072", "0.67293656", "0.6703742", "0.6690799", "0.6664624", "0.6664624", "0.6492792", "0.6480173", "0.64646196", "0.6428826", "0.63902086", "0.63828635", "0.63778484", "0.6359346", "0.63269985", "0.63269985", "0.6316721", "0.629854", "0.62744766", "0.6268775" ]
0.941336
0
Test case for update_composition
def test_update_composition(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_composition(self):", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_get_composition(self):\n pass", "def test_launch_composition(self):\n pass", "def test_composition_adds_to_100_percent(self):", "def test_update_collection(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_case(self):\n pass", "def test_publish_scenario_to_composition(self):\n pass", "def test_client_partial_update(self):\n pass", "def test_delete_composition(self):\n pass", "def test_list_compositions(self):\n pass", "def test_list_composition_status(self):\n pass", "def test_teams_partial_update(self):\n pass", "def test_update_container(self):\n pass", "def test_update_state(self):\n pass", "def test_update_connector(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update_one(self):\n pass", "def test_update_group(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_occurrence(self):\n pass", "def test_update_state3(self):\n pass", "def test_update9(self):\n pass", "def testDirtyRefresh(self):\n \n pass", "def test_patch_collection(self):\n pass", "def test_update_state4(self):\n pass" ]
[ "0.7167518", "0.7037734", "0.69457155", "0.68069243", "0.6690189", "0.668691", "0.6638788", "0.6638788", "0.6638788", "0.65686834", "0.6405884", "0.63726896", "0.63652927", "0.6346053", "0.63192606", "0.62668085", "0.6254495", "0.62431693", "0.6226514", "0.6187013", "0.61274254", "0.61131334", "0.6110521", "0.60957015", "0.6070434", "0.60245603", "0.6023134", "0.60151994", "0.60096437", "0.60071117" ]
0.93948174
0
Test case for update_deployment
def test_update_deployment(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_deployment_state(self):\n pass", "def test_retest_deployment_run(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_create_deployment(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def test_get_deployment_run(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment(self):\n pass", "def test_get_deployment_runs1(self):\n pass", "def test_create_deployment_entire(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_remove_deployment(self):\n del_deployment, mod_del_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='del_dep')\n\n undel_deployment, mod_undel_dep_bp1 = \\\n self._deploy_and_get_modified_bp_path('remove_deployment',\n deployment_id='undel_dep')\n\n blu_id = BLUEPRINT_ID + '-del-1'\n self.client.blueprints.upload(mod_del_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n blu_id = BLUEPRINT_ID + '-undel-1'\n self.client.blueprints.upload(mod_undel_dep_bp1, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(undel_deployment.id, blu_id)\n\n mod_del_dep_bp2 = self._get_blueprint_path(\n os.path.join('remove_deployment', 'modification2'),\n 'remove_deployment_modification2.yaml')\n blu_id = BLUEPRINT_ID + '-del-2'\n self.client.blueprints.upload(mod_del_dep_bp2, blu_id)\n wait_for_blueprint_upload(blu_id, self.client)\n self._do_update(del_deployment.id, blu_id)\n\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n\n self.assertEqual(len(deployment_update_list.items), 2)\n\n # Delete deployment and assert deployment updates were removed\n uninstall = self.client.executions.start(\n del_deployment.id, 'uninstall')\n self.wait_for_execution_to_end(uninstall)\n\n self.client.deployments.delete(del_deployment.id)\n wait_for_deployment_deletion_to_complete(\n del_deployment.id, self.client\n )\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=del_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list.items), 0)\n\n # Assert no other deployment updates were deleted\n deployment_update_list = self.client.deployment_updates.list(\n deployment_id=undel_deployment.id,\n _include=['id']\n )\n self.assertEqual(len(deployment_update_list), 1)", "def test_redeploy(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_launch_deployment(self):\n pass", "def test_patch_namespaced_deployment_config_status(self):\n pass", "def test_patch_namespaced_deployment_config(self):\n pass", "def test_update_container(self):\n pass", "def test_update_task_deployment_notebook(self, mock_config_load, mock_core_v1_api):\n task_id = util.MOCK_UUID_4\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"deploymentNotebook\": loads(util.SAMPLE_NOTEBOOK),\n },\n )\n result = rv.json()\n expected = {\n \"uuid\": \"uuid-4\",\n \"name\": \"task-4\",\n \"description\": None,\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"arguments\": None,\n \"category\": \"DEFAULT\",\n \"tags\": [],\n \"dataIn\": None,\n \"dataOut\": None,\n \"docs\": None,\n \"hasNotebook\": True,\n \"image\": EXPERIMENT_IMAGE,\n \"memoryLimit\": \"10Gi\",\n \"memoryRequest\": \"2Gi\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": 60,\n \"createdAt\": mock.ANY,\n }\n machine_generated = [\"updatedAt\"]\n for attr in machine_generated:\n self.assertIn(attr, result)\n del result[attr]\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)\n\n mock_core_v1_api.assert_any_call()\n mock_config_load.assert_any_call()", "def test_get_deployment_runs(self):\n pass", "def test_update(app):\n\n assert False", "def test_update_scenario(self):\n pass", "def test_update_system(self):\n pass", "def test_duo_application_update(self):\n pass", "def test_update_app_deploy_failed():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", {})\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_unhealthy(0)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n # rerun update, application status should not make difference\n app_state_manager.update()\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED", "def test_redeploy_container_asset(self):\n pass", "def test_redeploy_same_app():\n\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", [{\"name\": \"d1\"}, {\"name\": \"d2\"}])\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n # Deploy the same app with different deployments\n unused_deployments = app_state_manager.deploy_application(\n \"test_app\", [{\"name\": \"d2\"}, {\"name\": \"d3\"}]\n )\n assert unused_deployments == [\"d1\"]\n\n app_state_manager.deployment_state_manager.add_deployment_status(\n DeploymentStatusInfo(\"d3\", DeploymentStatus.UPDATING)\n )\n assert app_state_manager._application_states[\"test_app\"].deployments_to_delete == {\n \"d1\"\n }\n\n # After updating, the deployment should be deleted successfully, and\n # deployments_to_delete should be empty\n app_state_manager.deployment_state_manager.delete_deployment(\"d1\")\n app_state_manager.update()\n assert (\n app_state_manager._application_states[\"test_app\"].deployments_to_delete == set()\n )", "def test_update_case(self):\n pass" ]
[ "0.86214006", "0.7897574", "0.77938586", "0.74998325", "0.7491759", "0.7470526", "0.73979163", "0.7376222", "0.7376222", "0.7341087", "0.7328213", "0.73271334", "0.73019856", "0.72808087", "0.7272297", "0.7238414", "0.71365225", "0.7043166", "0.6964594", "0.69216716", "0.68576825", "0.68485415", "0.6835982", "0.6817657", "0.6786978", "0.67619765", "0.67485267", "0.6723979", "0.6722688", "0.6694096" ]
0.9462699
0
Test case for update_deployment_state
def test_update_deployment_state(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_deployment(self):\n pass", "def test_update_state(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_state3(self):\n pass", "def test_update_app_running():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", {})\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING\n\n # rerun update, application status should not make difference\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING", "def test_update_app_deploy_failed():\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", {})\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n app_state_manager.deployment_state_manager.set_deployment_statuses_unhealthy(0)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n # rerun update, application status should not make difference\n app_state_manager.update()\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED", "def test_retest_deployment_run(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_redeploy_same_app():\n\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.deploy_application(\"test_app\", [{\"name\": \"d1\"}, {\"name\": \"d2\"}])\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n # Deploy the same app with different deployments\n unused_deployments = app_state_manager.deploy_application(\n \"test_app\", [{\"name\": \"d2\"}, {\"name\": \"d3\"}]\n )\n assert unused_deployments == [\"d1\"]\n\n app_state_manager.deployment_state_manager.add_deployment_status(\n DeploymentStatusInfo(\"d3\", DeploymentStatus.UPDATING)\n )\n assert app_state_manager._application_states[\"test_app\"].deployments_to_delete == {\n \"d1\"\n }\n\n # After updating, the deployment should be deleted successfully, and\n # deployments_to_delete should be empty\n app_state_manager.deployment_state_manager.delete_deployment(\"d1\")\n app_state_manager.update()\n assert (\n app_state_manager._application_states[\"test_app\"].deployments_to_delete == set()\n )", "def test_update_page_state(self):\n pageStateObj = PageState()\n response = self.client.open(\n '/rui_support/page-state/{tempIdentifier}'.format(tempIdentifier='tempIdentifier_example'),\n method='PATCH',\n data=json.dumps(pageStateObj),\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_relaunch_deployment_run(self):\n pass", "def test_patch_namespaced_deployment_config_status(self):\n pass", "def test_update_team_state(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def test_update_node_state_servicelight(self):\n pass", "def test_config_deploy_app(fail_deploy):\n signal = SignalActor.remote()\n\n @ray.remote\n def task():\n ray.get(signal.wait.remote())\n if fail_deploy:\n raise Exception(\"fail!\")\n\n object_ref = task.remote()\n app_state_manager = ApplicationStateManager(MockDeploymentStateManager())\n app_state_manager.create_application_state(\"test_app\", object_ref)\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOYING\n\n signal.send.remote()\n time.sleep(2)\n if fail_deploy:\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.DEPLOY_FAILED\n else:\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0)\n app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1)\n app_state_manager.update()\n app_status = app_state_manager.get_app_status(\"test_app\")\n assert app_status.status == ApplicationStatus.RUNNING", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update_case(self):\n pass", "def test_redeploy(self):\n pass", "def test_restart_statestore(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n\n # Restart statestore. Verify one catalogd is assigned as active, the other is\n # assigned as standby.\n self.cluster.statestored.restart()\n wait_time_s = build_flavor_timeout(90, slow_build_timeout=180)\n self.cluster.statestored.service.wait_for_metric_value('statestore.live-backends',\n expected_value=5, timeout=wait_time_s)\n sleep_time_s = build_flavor_timeout(2, slow_build_timeout=5)\n sleep(sleep_time_s)\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n\n unexpected_msg = re.compile(\"Ignore the update of active catalogd since more recent \"\n \"update has been processed ([0-9]+ vs [0-9]+)\")\n self.assert_catalogd_log_contains(\"INFO\", unexpected_msg, expected_count=0)\n self.assert_impalad_log_contains(\"INFO\", unexpected_msg, expected_count=0)", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_check_from_deployment(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_from_deployment()\n self.assertListEqual([call([1, 2, 3, 4, 5])], mocked_check.call_args_list)", "def test_update_container(self):\n pass", "async def test_api_state_change(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n hass.states.async_set(\"test.test\", \"not_to_be_set\")\n\n await mock_api_client.post(\n \"/api/states/test.test\", json={\"state\": \"debug_state_change2\"}\n )\n\n assert hass.states.get(\"test.test\").state == \"debug_state_change2\"" ]
[ "0.8097301", "0.7649791", "0.74259627", "0.739222", "0.7294069", "0.7267434", "0.71443653", "0.7071182", "0.68353486", "0.67738706", "0.6750318", "0.6656775", "0.66130435", "0.65754175", "0.65620977", "0.65335715", "0.6463222", "0.6454027", "0.6409663", "0.63748467", "0.6357605", "0.6328999", "0.6313238", "0.6305245", "0.6254149", "0.62266874", "0.6218365", "0.6212725", "0.6209668", "0.6204951" ]
0.9399369
0
Test case for update_deployment_visibility_query
def test_update_deployment_visibility_query(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query(self):\n pass", "def test_update_visibility_query2(self):\n pass", "def test_update_visibility_query1(self):\n pass", "def test_update_visibility_query4(self):\n pass", "def test_update_visibility_query3(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_update_deployment(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def artifact_status_put_req(artifact_id, user_id, visibility):\n if visibility not in get_visibilities():\n return {'status': 'error',\n 'message': 'Unknown visibility value: %s' % visibility}\n\n pd = Artifact(int(artifact_id))\n access_error = check_access(pd.study.id, user_id)\n if access_error:\n return access_error\n user = User(str(user_id))\n status = 'success'\n msg = 'Artifact visibility changed to %s' % visibility\n # Set the approval to private if needs approval and admin\n if visibility == 'private':\n if not qiita_config.require_approval:\n pd.visibility = 'private'\n # Set the approval to private if approval not required\n elif user.level == 'admin':\n pd.visibility = 'private'\n # Trying to set approval without admin privileges\n else:\n status = 'error'\n msg = 'User does not have permissions to approve change'\n else:\n pd.visibility = visibility\n\n return {'status': status,\n 'message': msg}", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "def test_ipam_vlans_update(self):\n pass", "def _force_visibility(self, visibility_field):\r\n authorized_project = acl.get_limited_to_project(pecan.request.headers)\r\n is_admin = authorized_project is None\r\n if not is_admin:\r\n self._restrict_to_project(authorized_project, visibility_field)\r\n self._check_cross_project_references(authorized_project,\r\n visibility_field)", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False", "def test_put_public_guest_access_category(self):\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n put_data = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_CATEGORY,\n 'parent': '',\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_get_deployment_runs1(self):\n pass", "def test_patch_project_public(self):\n self.assertEqual(self.project.public_guest_access, False)\n self.assertEqual(self.category.has_public_children, False)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'public_guest_access': True}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.project.refresh_from_db()\n self.category.refresh_from_db()\n self.assertEqual(self.project.public_guest_access, True)\n # Assert the parent category has_public_children is set true\n self.assertEqual(self.category.has_public_children, True)", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_ipam_vlans_partial_update(self):\n pass", "def test_execute_deployment(self):\n pass", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def test_get_deployments_expanded(self):\n pass", "def test_step_visibility(self, _step: PropertyMock):\n _step.return_value = MagicMock(is_visible=True)\n es = exposed.ExposedStep()\n self.assertTrue(es.visible)\n es.visible = False\n self.assertFalse(es.visible)", "def test_update_domain_only(self):\n self.test_update()", "def test_get_deployment_runs(self):\n pass", "def test_filter_by_atribute(admin_client, public_resource_with_metadata, private_resource_with_metadata):\n query_filter = {\"availability\": [\"public\"]}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(json.dumps(query_filter)), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n short_ids = [x['short_id'] for x in json.loads(response['resources'])]\n assert djangoresponse.status_code == 200\n assert public_resource_with_metadata.short_id in short_ids\n assert private_resource_with_metadata.short_id not in short_ids", "def test_update_container_privilege(self):\n pass" ]
[ "0.8173803", "0.8062232", "0.8044011", "0.7952982", "0.7947686", "0.78972185", "0.59307784", "0.5796152", "0.5660492", "0.5593931", "0.55589646", "0.54316604", "0.54103506", "0.53888404", "0.5297964", "0.5226097", "0.52231145", "0.52171427", "0.5216443", "0.5186348", "0.51708263", "0.5164509", "0.5156073", "0.5133842", "0.5131848", "0.5130625", "0.5120593", "0.5090808", "0.5090518", "0.50843376" ]
0.95018893
0
Test case for update_impact_level
def test_update_impact_level(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_test_asset_impact_level(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update_case(self):\n pass", "def test_update_goal_metric(self):\n pass", "def test_level_up(self):\n\n self.sold.experience = 12\n self.sold.level_up()\n self.assertEqual(self.sold.experience, 13)", "def test_update_review_status(self):\n risk = factories.RiskFactory()\n new_value = all_models.Review.STATES.REVIEWED\n\n self.api.put(risk, risk.id, {\n \"review_status\": new_value,\n \"review_status_display_name\": \"some status\"\n })\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertEquals(risk.review_status, new_value)", "def test_update_goal(self):\n pass", "def test_update_team_state(self):\n pass", "def test_update_activity_occurrence_status(self):\n pass", "def test_UpdateHealthAt0 (self) :\n\t\t\n\t\tself.person2.updateHealth ()\n\t\tself.assertEqual(self.person2.getHealth(), \\\n\t\t100 + self.healthEffect)", "def test_skills_updated(self):\n assert self.agent_config.skills == {self.new_skill_id}", "def test_level_up_out_of_limit(self):\n self.sold.experience = 50\n self.sold.level_up()\n self.assertEqual(self.sold.experience, 50)", "def test_update_score_multiple(self):\r\n self.update_score_multiple()\r\n score = self.openendedmodule.latest_score()\r\n self.assertEquals(score, 1)", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def test_update_risk_profile_using_put(self):\n pass", "def test_update_team(self):\n pass", "def test_UpdateHealthLessThan0 (self) :\n\t\t\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), \\\n\t\t100 + self.healthEffect2)\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), 0)", "def test_increase_verbosity(self):\n # Start from a known state.\n set_level(logging.INFO)\n assert get_level() == logging.INFO\n # INFO -> VERBOSE.\n increase_verbosity()\n assert get_level() == logging.VERBOSE\n # VERBOSE -> DEBUG.\n increase_verbosity()\n assert get_level() == logging.DEBUG\n # DEBUG -> SPAM.\n increase_verbosity()\n assert get_level() == logging.SPAM\n # SPAM -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET\n # NOTSET -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_client_risk_assessment_partial_update(self):\n pass", "def test_update_risk(self):\n test_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d\")\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n created_at = test_date\n updated_at = test_date\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": created_at,\n \"updated_at\": updated_at,\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n risk = all_models.Risk.query.get(risk_id)\n\n response = self.api.put(risk, risk.id, new_values)\n\n self.assert200(response)\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)", "def test_update_state4(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_state3(self):\n pass", "def test_update9(self):\n pass", "def test_update_state1(self):\n pass" ]
[ "0.84757", "0.8180517", "0.6904667", "0.6527928", "0.63468087", "0.6327154", "0.6299115", "0.6278013", "0.62684965", "0.61658657", "0.61547214", "0.6081237", "0.6079794", "0.605033", "0.6037451", "0.6036785", "0.60179037", "0.6013135", "0.5981305", "0.59558624", "0.59506905", "0.59506905", "0.59506905", "0.5932251", "0.5909346", "0.5898994", "0.5883042", "0.5875244", "0.58584046", "0.58583796" ]
0.94299084
0
Test case for update_instance_limit
def test_update_instance_limit(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_instance_limit1(self):\n pass", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_update(self):\n\n # Test that instances without application information cannot be started\n incomplete_instance = Instance(self.client, 'foo')\n with self.assertRaises(ValueError):\n incomplete_instance.update()\n\n value = self.instance.update()\n update_instance = self.client.update_instance\n update_instance.assert_called_once_with('nginx', 'nginx', 'latest',\n parameters={\n 'SETTING': 'value'\n },\n options={\n 'storageBucket': 'custom'\n })\n self.assertEqual(value, update_instance.return_value)", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def test_limits(manager):\n manager.update(days=40)\n compare_results_attrs(manager.items, fixtures.FIXTURES[51])", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_max_members(self):\n self.login_as(self.USER)\n\n group_members = Membership.objects.filter(community_id=self.GROUP_ID).count()\n Community.objects.filter(pk=self.GROUP_ID).update(max_members=group_members)\n \n with self.assertNumQueries(5):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"This group has reached its member limit.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test_limit_cpu(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n self.assertEqual(json.loads(response.data['cpu']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['cpu'])\n # set an initial limit\n body = {'cpu': json.dumps({'web': '1024'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # set an additional value\n body = {'cpu': json.dumps({'worker': '512'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_limit_memory(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n self.assertEqual(json.loads(response.data['memory']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['memory'])\n # set an initial limit\n mem = {'web': '1G'}\n body = {'memory': json.dumps(mem)}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('memory', response.data)\n memory = json.loads(response.data['memory'])\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # set an additional value\n body = {'memory': json.dumps({'worker': '512M'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n memory = json.loads(response.data['memory'])\n self.assertIn('worker', memory)\n self.assertEqual(memory['worker'], '512M')\n self.assertIn('web', memory)\n self.assertEqual(memory['web'], '1G')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_recommended_num_objects(self):\n\n batch = Batch(Mock())\n self.check_instance(batch)\n\n self.assertIsNone(batch.recommended_num_objects)\n self.check_instance(batch)\n\n batch._recommended_num_objects = 10\n self.assertEqual(batch.recommended_num_objects, 10)\n self.check_instance(batch, recom_num_obj=10)\n\n batch._recommended_num_objects = 20\n self.assertEqual(batch.recommended_num_objects, 20)\n self.check_instance(batch, recom_num_obj=20)", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():", "def limit_instances(sender, instance, created, *args, **kwargs):\n\tif created:\n\t\traise ValidationError(\"There can only be 1 instance of this model.\")", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "def test_set_maximum(self):\n self.server_widget.maximum = 1000\n assert self.client_widget.maximum == self.server_widget.maximum", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():", "def test_update_case(self):\n pass", "def test_update_loop(self):\n self.create_org(provider='qbo')\n old_task_count = 0\n\n while True:\n update_call = self.app.post('/adapter/qbo/test/update')\n self.assertEqual(update_call.status_code, 204)\n\n new_task_count = len(self.taskqueue.get_filtered_tasks())\n\n if new_task_count == old_task_count:\n break\n\n if new_task_count > 100:\n self.fail(\"too many adapter calls, infinite loop maybe???\")\n\n old_task_count = new_task_count\n\n self.assertEqual(new_task_count, 20)", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "def _testRatingLimit(self):\n\n comment = models.Comment.objects.all()[0]\n type = models.RatingType.objects.all()[0]\n try:\n val = type.limit + 10\n rating = models.Rating(comment=comment, type=type, value=val)\n rating.save()\n assert rating.value == type.limit\n finally:\n rating.delete()", "def test_store_elements_count_exceed_default_limit(self, mocker):\n proxy = mocker.patch('saana_lib.ranking.RankingToDatabase.proxy')\n _compute = mocker.patch('saana_lib.ranking.Ranking.compute')\n\n _compute.return_value = dict((i, list(range(5))) for i in range(10))\n self.klass.store()\n assert proxy.call_count == 20", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_execute_scale_up_after_maxentities_met(self):\n upd_maxentities = 3\n self._update_group_min_max_entities(group=self.group,\n maxentities=upd_maxentities)\n change_num = upd_maxentities - \\\n self.group.groupConfiguration.minEntities\n policy_up = {'change': change_num, 'cooldown': 0}\n execute_policy = self.autoscale_behaviors.create_policy_webhook(\n group_id=self.group.id,\n policy_data=policy_up,\n execute_policy=True)\n self.assertEquals(\n execute_policy['execute_response'], 202,\n msg='Scale up policy execution failed for group {0}'\n 'when change delta < maxentities with response: {1}'\n .format(self.group.id, execute_policy['execute_response']))\n reexecute_scale_up = self.autoscale_client.execute_policy(\n self.group.id,\n execute_policy['policy_id'])\n self.assertEquals(\n reexecute_scale_up.status_code, 403,\n msg='Scale up policy executed for group {0} when group already'\n ' has maxentities, response code: {1}'\n .format(self.group.id, reexecute_scale_up.status_code))", "def test_update_instances_schedule_state(self):\n pass", "def submit_vserver_limit(vs_host, type_instance, value):\n submit_generic(vs_host, 'context', 'vs_vlimit', value, type_instance)", "def test_instances_pagination(self, instances_steps, create_instance,\n update_settings):\n instance_name = next(generate_ids('instance'))\n instances = create_instance(instance_name, count=3)\n update_settings(items_per_page=1)\n\n page_instances = instances_steps.page_instances()\n\n page_instances.table_instances.row(\n name=instances[2].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_absence()\n\n page_instances.table_instances.link_next.click()\n\n page_instances.table_instances.row(\n name=instances[1].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_presence()\n\n page_instances.table_instances.link_next.click()\n\n page_instances.table_instances.row(\n name=instances[0].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_absence()\n page_instances.table_instances.link_prev.wait_for_presence()\n\n page_instances.table_instances.link_prev.click()\n\n page_instances.table_instances.row(\n name=instances[1].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_presence()\n\n page_instances.table_instances.link_prev.click()\n\n page_instances.table_instances.row(\n name=instances[2].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_absence()", "def test_update_scenario(self):\n pass", "def limit(self, limit):\n self._evaluated = False\n self._limit = limit\n return self", "def test_fail_on_rate_limit_exceeded(self):\n\n # setup 'short' limit for testing\n self.client.protocol.rate_limiter.rules = []\n self.client.protocol.rate_limiter.rules.append(\n XRateLimitRule(\n {\n \"short\": {\n \"usage\": 0,\n \"limit\": 600,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n \"long\": {\n \"usage\": 0,\n \"limit\": 30000,\n \"time\": 5,\n \"lastExceeded\": None,\n },\n }\n )\n )\n\n # interact with api to get the limits\n self.client.get_athlete()\n\n # access the default rate limit rule\n rate_limit_rule = self.client.protocol.rate_limiter.rules[0]\n\n # get any of the rate limits, ex the 'short'\n limit = rate_limit_rule.rate_limits[\"short\"]\n\n # get current usage\n usage = limit[\"usage\"]\n print(\"last rate limit usage is {0}\".format(usage))\n\n # for testing purpses set the limit to usage\n limit[\"limit\"] = usage\n print(\"changing limit to {0}\".format(limit[\"limit\"]))\n\n # expect exception because of RateLimit has been\n # exceeded (or reached max)\n with self.assertRaises(exc.RateLimitExceeded):\n self.client.get_athlete()\n\n # request fired to early (less than 5 sec) causes timeout exception\n with self.assertRaises(exc.RateLimitTimeout):\n self.client.get_athlete()\n\n # once rate limit has exceeded wait until another request is possible\n # check if timeout has been set\n self.assertTrue(rate_limit_rule.limit_timeout > 0)\n print(\"limit timeout {0}\".format(rate_limit_rule.limit_timeout))\n\n # resetting limit\n # simulates Strava api - it would set the usage again to 0\n limit[\"limit\"] = 600\n print(\"resetting limit to {0}\".format(limit[\"limit\"]))\n\n try:\n # waiting until timeout expires\n time.sleep(5)\n\n # this time it should work again\n self.client.get_athlete()\n self.assertTrue(\"No exception raised\")\n except exc.RateLimitExceeded as e:\n self.fail(\"limiter raised RateLimitTimeout unexpectedly!\")\n\n # continue other tests with DefaultRateLimiter\n print(\"setting default rate limiter\")\n self.client.protocol.rate_limiter = DefaultRateLimiter()", "def test_update_speed_limit():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_speed_limit() == speed\n\n i.update_speed_limit(27.7)\n\n assert i.get_speed_limit() == 27.7" ]
[ "0.89634675", "0.69365686", "0.67566144", "0.6548813", "0.6407262", "0.63972616", "0.6323105", "0.6290356", "0.6200288", "0.6181754", "0.6152498", "0.61242133", "0.60993487", "0.60549104", "0.60436743", "0.60218275", "0.60065717", "0.60033876", "0.59989065", "0.5996914", "0.59900874", "0.59728295", "0.5971893", "0.5967053", "0.59639674", "0.59134233", "0.589213", "0.5882895", "0.5868703", "0.5857375" ]
0.94088084
0
Test case for update_instance_limit1
def test_update_instance_limit1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_instance_limit(self):\n pass", "def test_update(self):\n\n # Test that instances without application information cannot be started\n incomplete_instance = Instance(self.client, 'foo')\n with self.assertRaises(ValueError):\n incomplete_instance.update()\n\n value = self.instance.update()\n update_instance = self.client.update_instance\n update_instance.assert_called_once_with('nginx', 'nginx', 'latest',\n parameters={\n 'SETTING': 'value'\n },\n options={\n 'storageBucket': 'custom'\n })\n self.assertEqual(value, update_instance.return_value)", "def test_update_case(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update_one(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_update9(self):\n pass", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_jwp(self):\n v1, = set_resources_and_sync([make_video(media_id='1234')])\n jwp1 = jwpmodels.Video.objects.get(key=v1.key)\n self.assertEqual(jwp1.updated, v1['updated'])\n\n v1['updated'] += 20\n v1, = set_resources_and_sync([v1])\n jwp1 = jwpmodels.Video.objects.get(key=v1.key)\n self.assertEqual(jwp1.updated, v1['updated'])", "def test_update_occurrence(self):\n pass", "def test_instances_pagination(self, instances_steps, create_instance,\n update_settings):\n instance_name = next(generate_ids('instance'))\n instances = create_instance(instance_name, count=3)\n update_settings(items_per_page=1)\n\n page_instances = instances_steps.page_instances()\n\n page_instances.table_instances.row(\n name=instances[2].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_absence()\n\n page_instances.table_instances.link_next.click()\n\n page_instances.table_instances.row(\n name=instances[1].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_presence()\n\n page_instances.table_instances.link_next.click()\n\n page_instances.table_instances.row(\n name=instances[0].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_absence()\n page_instances.table_instances.link_prev.wait_for_presence()\n\n page_instances.table_instances.link_prev.click()\n\n page_instances.table_instances.row(\n name=instances[1].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_presence()\n\n page_instances.table_instances.link_prev.click()\n\n page_instances.table_instances.row(\n name=instances[2].name).wait_for_presence(30)\n page_instances.table_instances.link_next.wait_for_presence()\n page_instances.table_instances.link_prev.wait_for_absence()", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def test_update_loop(self):\n self.create_org(provider='qbo')\n old_task_count = 0\n\n while True:\n update_call = self.app.post('/adapter/qbo/test/update')\n self.assertEqual(update_call.status_code, 204)\n\n new_task_count = len(self.taskqueue.get_filtered_tasks())\n\n if new_task_count == old_task_count:\n break\n\n if new_task_count > 100:\n self.fail(\"too many adapter calls, infinite loop maybe???\")\n\n old_task_count = new_task_count\n\n self.assertEqual(new_task_count, 20)", "def limit_instances(sender, instance, created, *args, **kwargs):\n\tif created:\n\t\traise ValidationError(\"There can only be 1 instance of this model.\")", "def test_update_cloud(self):\n pass", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_invalid_update_kwarg(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)", "def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)", "def test_recommended_num_objects(self):\n\n batch = Batch(Mock())\n self.check_instance(batch)\n\n self.assertIsNone(batch.recommended_num_objects)\n self.check_instance(batch)\n\n batch._recommended_num_objects = 10\n self.assertEqual(batch.recommended_num_objects, 10)\n self.check_instance(batch, recom_num_obj=10)\n\n batch._recommended_num_objects = 20\n self.assertEqual(batch.recommended_num_objects, 20)\n self.check_instance(batch, recom_num_obj=20)", "def test_instance_api(self):\n\n # Test creating a db instance.\n # ----------------------------\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"%s\",\n \"flavorRef\": \"103\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\" % INSTANCE_NAME\n\n client = httplib2.Http(\".cache\", timeout=TIMEOUTS['http'], disable_ssl_certificate_validation=True)\n resp, content = self._execute_request(client, \"instances\", \"POST\", body)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status, (\"Expecting 201 as response status of create instance but received %s\" % resp.status))\n content = self._load_json(content,'Create Instance')\n self.assertTrue(content.has_key('instance'), \"Response body of create instance does not have 'instance' field\")\n\n credential = content['instance']['credential']\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n\n # Test listing all db instances.\n # ------------------------------\n LOG.info(\"* Listing all db instances\")\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the response is\n # in the expected format (e.g. a JSON object beginning with an\n # 'instances' key).\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of list instance but received %s\" % resp.status))\n content = self._load_json(content,'List all Instances')\n self.assertTrue(content.has_key('instances'), \"Response body of list instances does not contain 'instances' field.\")\n\n\n # Test getting a specific db instance.\n # ------------------------------------\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the returned\n # instance is the same as the accepted instance.\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n self.assertEqual(self.instance_id, str(content['instance']['id']), \"Instance ID not found in Show Instance response\")\n\n\n # Check to see if the instance we previously created is \n # in the 'running' state\n # -----------------------------------------------------\n wait_so_far = 0\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n while status != 'running' or pub_ip is None or len(pub_ip) <= 0:\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n\n if status != 'running':\n\n self.fail(\"for some reason the instance did not switch to 'running' in %s\" % TIMEOUT_STR)\n else:\n # try to connect to mysql instance\n pub_ip = content['instance']['hostname']\n # user/pass = credentials\n db_user = credential['username']\n db_passwd = credential['password']\n db_name = 'mysql'\n\n LOG.info(\"* Trying to connect to mysql DB on first boot: %s, %s, %s\" %(db_user, db_passwd, pub_ip))\n conn = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed on first boot over %s: \" % pub_ip)\n conn.close()\n\n\n\n # Test resetting the password on a db instance.\n # ---------------------------------------------\n LOG.info(\"* Resetting password on instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/resetpassword\", \"POST\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of reset password but received %s\" % resp.status))\n content = self._load_json(content,'Get new password')\n\n if resp.status == 200 :\n db_new_passwd = content['password']\n LOG.info(\"* Trying to connect to mysql DB after resetting password: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n LOG.exception(\"* something is wrong with mysql connection after resetting password\")\n conn.close()\n LOG.info(\"* Maybe the old password still works ?\")\n conn_2 = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn_2 is None:\n LOG.exception(\"* no, old password does not work anymore\")\n else:\n LOG.info(\"* old password still works, new password has not kicked in\")\n conn_2.close()\n self.fail(\"* maximum trials reached, db connection failed after resetting password over %s: \" % pub_ip)\n\n\n # XXX: Suspect restarting too soon after a \"reset password\" command is putting the instance in a bad mood on restart\n time.sleep(DELAYS['between_reset_and_restart'])\n\n # Test restarting a db instance.\n # ------------------------------\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/restart\", \"POST\", \"\")\n self.assertEqual(204, resp.status, (\"Expecting 204 as response status of restart instance but received %s\" % resp.status))\n\n # Test getting a specific db instance.\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance after Restart')\n \n wait_so_far = 0\n status = content['instance']['status']\n while status != 'running':\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n\n if status != 'running':\n self.fail(\"Instance %s did not go to running after a reboot and waiting %s\" % (self.instance_id, TIMEOUT_STR))\n else:\n # try to connect to mysql instance\n time.sleep(DELAYS['between_reboot_and_connect'])\n LOG.info(\"* Trying to connect to mysql DB after rebooting the instance: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed after rebooting instance over %s: \" % pub_ip)\n conn.close()\n\n # Test deleting a db instance.\n # ----------------------------\n LOG.info(\"* Deleting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"DELETE\", \"\")\n\n # Assert 1) that the request was accepted and 2) that the instance has\n # been deleted.\n self.assertEqual(204, resp.status, \"Response status of instance delete did not return 204\")\n\n LOG.debug(\"Verifying that instance %s has been deleted\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n if not content:\n pass\n else:\n content = json.loads(content)\n for each in content['instances']:\n self.assertFalse(each['id'] == self.instance_id, (\"Instance %s did not actually get deleted\" % self.instance_id))\n\n LOG.debug(\"Sleeping...\")\n time.sleep(DELAYS['after_delete'])", "def test_concurrent_updates(self):\r\n instance = TestCounterModel.create()\r\n new1 = TestCounterModel.get(partition=instance.partition)\r\n new2 = TestCounterModel.get(partition=instance.partition)\r\n\r\n new1.counter += 5\r\n new1.save()\r\n new2.counter += 5\r\n new2.save()\r\n\r\n actual = TestCounterModel.get(partition=instance.partition)\r\n assert actual.counter == 10", "def test_update_activity(self):\n pass", "def test_limit_cpu(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n self.assertEqual(json.loads(response.data['cpu']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['cpu'])\n # set an initial limit\n body = {'cpu': json.dumps({'web': '1024'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # set an additional value\n body = {'cpu': json.dumps({'worker': '512'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def test_update_ban(self):\n pass", "def test_success_case(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n tm2 = tm.update(count=9)\r\n\r\n tm3 = TestModel.get(tm.vid)\r\n assert tm2.count == 9\r\n assert tm3.count == 9" ]
[ "0.90392417", "0.72254086", "0.664209", "0.6522369", "0.6504876", "0.64205045", "0.6413986", "0.64058137", "0.63731486", "0.63731486", "0.63731486", "0.6229071", "0.6178786", "0.6153487", "0.6126983", "0.6108289", "0.61072", "0.6079496", "0.6018102", "0.60180384", "0.59937835", "0.5992824", "0.59826016", "0.5956675", "0.59540904", "0.594196", "0.5911387", "0.5911058", "0.5899317", "0.5892015" ]
0.9368797
0
Test case for update_offline_status
def test_update_offline_status(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_status() -> None:\n ...", "def take_server_offline(self, server):\n server.status = 0\n print(f\"Server {server.server_name} is offline\")", "def is_offline(self):\n return not self.is_online", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def refresh_status(self):\n\n pass", "def set_offline(shop_id: int) -> None:\n sql = f\"UPDATE t_shops SET a_online = '0' WHERE a_id = {shop_id}\"\n my_cursor.execute(sql)\n my_db.commit()", "def test_out_of_date(self):\n self.assertTrue(update_available(0.0))", "def check_status(self):", "def test_get_refresh_job_status(self):\n pass", "def can_detect_offline(self):\n raise NotImplementedError(\"Abstract method, must be overridden\")", "def get_offline(self):\n\n\t\treturn self.__offline", "def updateStatus(self, status):\n pass", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def test_get_refresh_status_if_none(account: Account):\n account._latest_refresh_job_id = None # type: ignore\n assert account.get_refresh_status() == \"UNKNOWN\"", "def set_offline(self, offline):\n\n\t\tif offline is not None and not isinstance(offline, bool):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: offline EXPECTED TYPE: bool', None, None)\n\t\t\n\t\tself.__offline = offline\n\t\tself.__key_modified['offline'] = 1", "def test_update_activity_occurrence_status(self):\n pass", "def test_update_activity(self):\n pass", "def getStatus():", "def test_status(self):\n self.assertEqual('perfect', self.__metric.status())", "def test_set_deprecated_status(self):\n control = factories.ControlFactory()\n self.assertIsNone(control.end_date)\n\n self.api.put(control, control.id, {\n \"status\": all_models.Control.DEPRECATED,\n })\n\n control = db.session.query(all_models.Control).get(control.id)\n self.assertIsNotNone(control.end_date)", "def remote_status():", "def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False", "def test_set_scan_status(self):\n pass", "def get_status(self):\n if self.status:\n print(f\"Server '{self.server_name}' is online\")\n else:\n print(f\"Server '{self.server_name}' is offline\")", "def get_status() -> None:\n assert scraper.get_status() == True", "def test_update_node_state_servicelight(self):\n pass", "def test_offline_nodes_disabled(self):\n with Nodes()as n:\n for node in n.nodes_offline:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node disabled')", "def _run_offline_test(*args, **kwargs):\n import responses # noqa: F401", "def offline(name, path):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Setting {} to offline\".format(path)\n return ret\n\n __salt__[\"trafficserver.offline\"](path)\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Set {} as offline\".format(path)\n return ret" ]
[ "0.68435526", "0.6768095", "0.6515803", "0.6448386", "0.6448386", "0.63707757", "0.63168746", "0.6233449", "0.6192555", "0.61632586", "0.61563146", "0.61377037", "0.6125013", "0.6087687", "0.6074899", "0.60706824", "0.605092", "0.60194963", "0.59912175", "0.597738", "0.5959864", "0.59297234", "0.5919127", "0.5887178", "0.58816004", "0.5857064", "0.5850311", "0.5847546", "0.5844075", "0.58422" ]
0.9403645
0
Test case for update_project
def test_update_project(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_patch_project(self):\n pass", "def test_replace_project(self):\n pass", "def test_patch_project(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {\n 'title': UPDATED_TITLE,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n }\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.project.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': self.category.pk,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.project.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': True,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n },\n str(self.project.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner),\n 'inherited': False,\n 'sodar_uuid': str(self.project.get_owner().sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def test_add_project(self):\n pass", "def test_put_project(self):\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n put_data = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)\n\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n model_dict['readme'] = model_dict['readme'].raw\n expected = {\n 'id': self.project.pk,\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': self.category.pk,\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n 'archive': False,\n 'full_title': self.category.title + ' / ' + UPDATED_TITLE,\n 'has_public_children': False,\n 'sodar_uuid': self.project.sodar_uuid,\n }\n self.assertEqual(model_dict, expected)\n\n expected = {\n 'title': UPDATED_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': UPDATED_DESC,\n 'readme': UPDATED_README,\n 'public_guest_access': True,\n 'archive': False,\n 'roles': {\n str(self.category.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'inherited': True,\n 'sodar_uuid': str(self.category.get_owner().sodar_uuid),\n },\n str(self.project.get_owner().sodar_uuid): {\n 'role': PROJECT_ROLE_OWNER,\n 'user': self.get_serialized_user(self.user_owner),\n 'inherited': False,\n 'sodar_uuid': str(self.project.get_owner().sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n }\n self.assertEqual(json.loads(response.content), expected)", "def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def test_projects_patch(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PATCH',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def do_project_update(cs, args):\n raise NotImplementedError", "def test_projects_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_create_project_request(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_remove_project(self):\n pass", "def test_updateVersion(self):\n project = self.makeProject(Version(\"bar\", 2, 1, 0))\n newVersion = Version(\"bar\", 3, 2, 9)\n project.updateVersion(newVersion)\n self.assertEquals(project.getVersion(), newVersion)\n self.assertEquals(\n project.directory.child(\"topfiles\").child(\"README\").getContent(),\n \"3.2.9\")", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_case(self):\n pass", "def test_get_project(self):\n pass", "def update_project_info(data):\n\tif 'pk' in data:\n\t\tif data['pk'] is not None:\n\t\t\tproject = get_or_none(ProjectInfo, pk=data['pk'])\n\t\t\tif project:\n\t\t\t\tproject.name = data['name']\n\t\t\t\tproject.description = data['description']\n\t\t\t\tproject.start_date = data['start_date']\n\t\t\t\tproject.end_date = data['end_date']\n\t\t\t\tproject.save()\n\t\t\t\tprint ('Updated')\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn False\n\n\telse:\n\t\tprint (\"please provide pk for updating\")\n\t\treturn False", "def test_set_project_itar_information(self):\n pass", "def test_update_team(self):\n pass", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def test_update(app):\n\n assert False", "def test_update_goal(self):\n pass", "def test_update9(self):\n pass", "def test_patch_project_move(self):\n self.assertEqual(\n self.project.full_title,\n self.category.title + ' / ' + self.project.title,\n )\n\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n self.make_assignment(new_category, self.user_owner_cat, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n self.assertEqual(model_dict['parent'], new_category.pk)\n owners = [a.user for a in self.project.get_owners()]\n self.assertIn(self.user_owner_cat, owners)\n self.assertIn(self.user_owner, owners)\n\n # Assert child project full title update\n self.assertEqual(\n self.project.full_title,\n new_category.title + ' / ' + self.project.title,\n )\n self.assertEqual(\n json.loads(response.content)['parent'], str(new_category.sodar_uuid)\n )", "def test_projects_id_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))" ]
[ "0.82712364", "0.7873975", "0.7442448", "0.74199355", "0.7384629", "0.7369599", "0.73155415", "0.7272609", "0.7195377", "0.7130262", "0.7127845", "0.7127845", "0.7127845", "0.7080842", "0.7078068", "0.7027116", "0.7027116", "0.7027116", "0.7019853", "0.7015513", "0.700853", "0.6979212", "0.697569", "0.6932203", "0.6932203", "0.69294965", "0.67687577", "0.6764707", "0.6743658", "0.67169243" ]
0.94864285
1
Test case for update_scenario
def test_update_scenario(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_case(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_one(self):\n pass", "def test_update_record(self):\n pass", "def test_update_goal(self):\n pass", "def test_update9(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_activity(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_rule(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_state(self):\n pass", "def test_update_state3(self):\n pass", "def test_update(app):\n\n assert False", "def test_update_team(self):\n pass", "def test_add_or_update_case(self):\n pass", "def test_beneficiaries_update_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-update', kwargs={'pk': 1})\n response = self.client.post(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)", "def test_client_update(self):\n pass", "def test_update_inventory(self):\n pass", "def test_update_attribute_data(self):\n pass", "def test_update_sample(self):\n response = self.client.post(reverse('update-proband', args=[self.gel_ir.id]),\n {'outcome': 'testoutcome',\n 'comment': 'testcomment',\n 'case_status': 'N',\n 'pilot_case': True,\n 'mdt_status': 'R',\n 'case_sent': False,\n 'no_primary_findings': False},\n follow=True)\n self.assertContains(response, 'Proband Updated')\n self.assertEquals(response.status_code, 200)\n proband = Proband.objects.get(id=self.proband.id)\n gelir = GELInterpretationReport.objects.get(id=self.gel_ir.id)\n self.assertEqual(proband.comment, 'testcomment')\n self.assertEqual(gelir.pilot_case, True)", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def test_update(self):\n payload = {\n 'name': 'Pecho inclinado',\n 'description': \"New description\",\n 'muscle_group': \"pecho\"\n }\n response = self.client.put(\n '/exercises/{}/'.format(self.exer1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Exercise.objects.get(id=self.exer1.id).name, payload['name'])", "def test_update_client(self):\n pass", "def test_user_update_request(self):\n pass", "def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)", "def test_update_occurrence(self):\n pass" ]
[ "0.8633607", "0.8184428", "0.8184428", "0.8184428", "0.79896855", "0.7689423", "0.7678988", "0.7498525", "0.74358207", "0.73994005", "0.7351109", "0.7314285", "0.72363174", "0.72361696", "0.72323847", "0.72033507", "0.72013026", "0.715421", "0.7147391", "0.7114819", "0.70637745", "0.7058424", "0.69945914", "0.6993654", "0.6993212", "0.698439", "0.69772035", "0.6958537", "0.6957127", "0.694769" ]
0.9363554
0
Test case for update_software_asset
def test_update_software_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_update_inventory(self):\n pass", "def test_itar_restrict_software_asset(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update_software_component_for_system_module(self):\n pass", "def test_update_deployment(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_update_software_components_for_system_module(self):\n pass", "def test_update_system(self):\n pass", "def test_set_asset_license_connected(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n story.save()\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)" ]
[ "0.90097904", "0.88216394", "0.84057766", "0.8372628", "0.82926196", "0.81492966", "0.79494447", "0.77838933", "0.77556676", "0.76564956", "0.7455382", "0.74485105", "0.70124674", "0.6892675", "0.6845004", "0.67140436", "0.66513544", "0.66512203", "0.66395545", "0.66260374", "0.6586878", "0.6586241", "0.6526527", "0.6523522", "0.65143263", "0.6377741", "0.6336765", "0.63253635", "0.6300685", "0.6300524" ]
0.9420912
0
Test case for update_software_asset_bundle
def test_update_software_asset_bundle(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_asset(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_get_software_asset_bundle_expanded(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_update_deployment(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_create_software_bundle_from_system_module(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update_software_component_for_system_module(self):\n pass", "def test_redeploy_container_asset(self):\n pass", "def test_release_deployment_run(self):\n pass", "def test_update_software_components_for_system_module(self):\n pass", "def test_delete_system_asset(self):\n pass" ]
[ "0.87701994", "0.8455163", "0.82160974", "0.8140121", "0.7851298", "0.781395", "0.77796733", "0.7751182", "0.75031376", "0.7462378", "0.7409722", "0.7196904", "0.712641", "0.709311", "0.70342386", "0.6997318", "0.6971441", "0.6736863", "0.6725377", "0.6657159", "0.6550591", "0.6428757", "0.6393659", "0.6348923", "0.6273105", "0.62533766", "0.6194761", "0.61796993", "0.6155509", "0.6140976" ]
0.9494568
0
Test case for update_software_asset_content
def test_update_software_asset_content(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update_asset_state(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_delete_software_asset_bundle(self):\n pass", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def test_update_test_asset_impact_level(self):\n pass", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_retrieve_system_asset(self):\n pass", "def test_itar_restrict_software_asset(self):\n pass", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def test_create_system_asset(self):\n pass", "def test_set_asset_license_connected(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n story.save()\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def test_submit_asset_to_submission_service(self):\n pass", "def test_locking(self):\r\n def verify_asset_locked_state(locked):\r\n \"\"\" Helper method to verify lock state in the contentstore \"\"\"\r\n asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')\r\n content = contentstore().find(asset_location)\r\n self.assertEqual(content.locked, locked)\r\n\r\n def post_asset_update(lock, course):\r\n \"\"\" Helper method for posting asset update. \"\"\"\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)\r\n\r\n # Load the toy course.\r\n module_store = modulestore('direct')\r\n _, course_items = import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['toy'],\r\n static_content_store=contentstore(),\r\n verbose=True\r\n )\r\n course = course_items[0]\r\n verify_asset_locked_state(False)\r\n\r\n # Lock the asset\r\n resp_asset = post_asset_update(True, course)\r\n self.assertTrue(resp_asset['locked'])\r\n verify_asset_locked_state(True)\r\n\r\n # Unlock the asset\r\n resp_asset = post_asset_update(False, course)\r\n self.assertFalse(resp_asset['locked'])\r\n verify_asset_locked_state(False)", "def test_update_inventory(self):\n pass", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def _update_asset(request, course_key, asset_key):\r\n if request.method == 'DELETE':\r\n # Make sure the item to delete actually exists.\r\n try:\r\n content = contentstore().find(asset_key)\r\n except NotFoundError:\r\n return JsonResponse(status=404)\r\n\r\n # ok, save the content into the trashcan\r\n contentstore('trashcan').save(content)\r\n\r\n # see if there is a thumbnail as well, if so move that as well\r\n if content.thumbnail_location is not None:\r\n # We are ignoring the value of the thumbnail_location-- we only care whether\r\n # or not a thumbnail has been stored, and we can now easily create the correct path.\r\n thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.name)\r\n try:\r\n thumbnail_content = contentstore().find(thumbnail_location)\r\n contentstore('trashcan').save(thumbnail_content)\r\n # hard delete thumbnail from origin\r\n contentstore().delete(thumbnail_content.get_id())\r\n # remove from any caching\r\n del_cached_content(thumbnail_location)\r\n except:\r\n logging.warning('Could not delete thumbnail: %s', thumbnail_location)\r\n\r\n # delete the original\r\n contentstore().delete(content.get_id())\r\n # remove from cache\r\n del_cached_content(content.location)\r\n return JsonResponse()\r\n\r\n elif request.method in ('PUT', 'POST'):\r\n if 'file' in request.FILES:\r\n return _upload_asset(request, course_key)\r\n else:\r\n # Update existing asset\r\n try:\r\n modified_asset = json.loads(request.body)\r\n except ValueError:\r\n return HttpResponseBadRequest()\r\n contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])\r\n # Delete the asset from the cache so we check the lock status the next time it is requested.\r\n del_cached_content(asset_key)\r\n return JsonResponse(modified_asset, status=201)", "def test_create_software_asset_bundle_from_system_module(self):\n pass" ]
[ "0.8799268", "0.8617799", "0.84500706", "0.8301085", "0.79783267", "0.7929388", "0.787803", "0.78279155", "0.7656188", "0.7133955", "0.7055889", "0.69850993", "0.69302255", "0.67347336", "0.66582596", "0.6600296", "0.64972675", "0.6450786", "0.64347553", "0.6350145", "0.6347334", "0.6327608", "0.6309078", "0.62654865", "0.61628276", "0.6162146", "0.6056771", "0.6037308", "0.60238945", "0.60186976" ]
0.942054
0
Test case for update_software_asset_impact_level
def test_update_software_asset_impact_level(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_test_asset_impact_level(self):\n pass", "def test_update_impact_level(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def test_update_risk_profile_using_put(self):\n pass", "def test_update_inventory(self):\n pass", "def test_change_brightness_back_to_10():", "def test_set_and_get_led_brightness_level(self):", "def test_update_scenario(self):\n pass", "def test_client_risk_assessment_partial_update(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_risk(self):\n test_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d\")\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n created_at = test_date\n updated_at = test_date\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": created_at,\n \"updated_at\": updated_at,\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n risk = all_models.Risk.query.get(risk_id)\n\n response = self.api.put(risk, risk.id, new_values)\n\n self.assert200(response)\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)", "def test_do_update(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _function = DUT.tree.get_node(1).data\r\n _function.availability_logistics = 0.9832\r\n\r\n _error_code, _msg = DUT.do_update(1)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\"RAMSTK SUCCESS: Updating the RAMSTK Program \" \"database.\")", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_UpdateHealthLessThan0 (self) :\n\t\t\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), \\\n\t\t100 + self.healthEffect2)\n\t\tself.person3.updateHealth ()\n\t\tself.assertEqual(self.person3.getHealth(), 0)", "def test_update_goal_metric(self):\n pass", "def test_change_provisioned_throughput_usual_case():", "def report_update():\r\n resources[\"water\"] = resources[\"water\"] - MENU[order][\"ingredients\"][\"water\"]\r\n resources[\"milk\"] = resources[\"milk\"] - MENU[order][\"ingredients\"][\"milk\"]\r\n resources[\"coffee\"] = resources[\"coffee\"] - MENU[order][\"ingredients\"][\"coffee\"]\r\n resources[\"money\"] = resources[\"money\"] + total", "def test_UpdateHealthAt0 (self) :\n\t\t\n\t\tself.person2.updateHealth ()\n\t\tself.assertEqual(self.person2.getHealth(), \\\n\t\t100 + self.healthEffect)", "def test_update_review_status(self):\n risk = factories.RiskFactory()\n new_value = all_models.Review.STATES.REVIEWED\n\n self.api.put(risk, risk.id, {\n \"review_status\": new_value,\n \"review_status_display_name\": \"some status\"\n })\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertEquals(risk.review_status, new_value)" ]
[ "0.9056089", "0.8565206", "0.70213765", "0.66773397", "0.66148573", "0.6394925", "0.63873", "0.63491744", "0.6302349", "0.62323797", "0.61223865", "0.5888726", "0.5873419", "0.58609945", "0.5777155", "0.5771139", "0.575647", "0.5701396", "0.5689409", "0.56543475", "0.5647792", "0.5645708", "0.5599465", "0.5597421", "0.5578451", "0.55583435", "0.5554242", "0.5550999", "0.5530233", "0.54853517" ]
0.9490574
0
Test case for update_software_asset_install_script
def test_update_software_asset_install_script(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_import_software_asset(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_update_test_asset_content(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_install(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_itar_restrict_software_asset(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_deployment(self):\n pass", "def test_install(self):\n\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.install(TOOLNAME,adminuser,adminpass)", "def test_update_software_components_for_system_module(self):\n pass", "def test_installed(self):\n # OSA script should have been installed in setUp function\n self.assertTrue(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))\n # Clean up install\n self.run_function(\"assistive.remove\", [OSA_SCRIPT])\n # Installed should now return False\n self.assertFalse(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_update_system(self):\n pass", "def test_get_software_bundles(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_retrieve_system_asset(self):\n pass" ]
[ "0.83913547", "0.8184054", "0.8025467", "0.7466697", "0.7389904", "0.71649176", "0.7088116", "0.69908977", "0.6915503", "0.6860652", "0.66807467", "0.6628598", "0.6353948", "0.6325582", "0.62891716", "0.62386537", "0.6202956", "0.61861473", "0.6186", "0.61837184", "0.615646", "0.6114712", "0.60914826", "0.60621625", "0.60060513", "0.6003649", "0.59585005", "0.59233683", "0.5910981", "0.5909941" ]
0.9479009
0
Test case for update_software_component_for_system_module
def test_update_software_component_for_system_module(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_update_system(self):\n pass", "def test_get_software(self):\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def test_create_software_bundle_from_system_module(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def test_update_software_asset_bundle(self):\n pass", "def test_component_update_available_REMOVE(self):\n MockPopen.mock_stdout = 'Remv c (old PKG)\\nRemv d PKG'\n self.assertTrue(self.u.component_update_available())", "def test_update_software_asset_install_script(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_update_bios_unit(self):\n pass", "def test_get_software_set(self):\n pass", "def test_get_system(self):\n pass", "def test_patch_hyperflex_software_version_policy(self):\n pass", "def _PerformCompare(self, component):\n\n updater_commands = ['/usr/sbin/chromeos-firmwareupdate', '-V']\n content = subprocess.Popen(updater_commands,\n stdout=subprocess.PIPE).stdout.read()\n system_version = self._GetSystemVersion(component, content)\n whitelist_version = self._GetWhitelistVersion(component)\n self.assertEqual(system_version, whitelist_version, msg='%s does not match'\n ' what is in the whitelist.\\n\\tSystem: %s\\n\\tWhitelist: '\n '%s' % (component, system_version, whitelist_version))", "def test_component_update_available_NEW(self):\n MockPopen.mock_stdout = 'Inst b (new from)'\n self.assertTrue(self.u.component_update_available())", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_get_systems(self):\n pass", "def test_get_current_component_status_OK(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n 'repository/online/component/b': 'yes',\n 'repository/online/component/c': 'yes',\n 'repository/online/component/d': 'yes',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n print >> tmp, 'deb http://host:port/prefix/0.0/maintained/component/ c/arch/'\n print >> tmp, 'deb http://host:port/prefix/0.0/unmaintained/component/ d/arch/'\n tmp.flush()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_AVAILABLE, self.u.get_current_component_status('c'))\n self.assertEqual(UU.COMPONENT_AVAILABLE, self.u.get_current_component_status('d'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()", "def test_update_system_asset(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_component_update_get_packages(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)\\nInst b (new from)\\nRemv c (old PKG)\\nRemv d PKG'\n installed, upgraded, removed = self.u.component_update_get_packages()\n self.assertEqual([('b', 'new')], installed)\n self.assertEqual([('a', 'old', 'new')], upgraded)\n self.assertEqual([('c', 'old'), ('d', 'unknown')], removed)", "def test_update_device(self):\n pass" ]
[ "0.9378847", "0.8261632", "0.73587364", "0.70133525", "0.7002387", "0.69219327", "0.68841183", "0.6825436", "0.67245394", "0.66946787", "0.6648212", "0.659147", "0.6586715", "0.65491015", "0.6407388", "0.63306653", "0.6315472", "0.6305889", "0.63046604", "0.6295179", "0.6291226", "0.6273297", "0.6231148", "0.6200821", "0.6196731", "0.612074", "0.60621345", "0.6018256", "0.59759367", "0.59527403" ]
0.9578852
0
Test case for update_software_components_for_system_module
def test_update_software_components_for_system_module(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_component_for_system_module(self):\n pass", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_update_system(self):\n pass", "def test_create_software_bundle_from_system_module(self):\n pass", "def test_get_software(self):\n pass", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_component_update_available_REMOVE(self):\n MockPopen.mock_stdout = 'Remv c (old PKG)\\nRemv d PKG'\n self.assertTrue(self.u.component_update_available())", "def test_update_software_asset_bundle(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def test_get_systems(self):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_get_software_bundle(self):\n pass", "def test_get_software_set(self):\n pass", "def test_patch_hyperflex_software_version_policy(self):\n pass", "def test_get_system(self):\n pass", "def test_update_bios_unit(self):\n pass", "def test_component_update_get_packages(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)\\nInst b (new from)\\nRemv c (old PKG)\\nRemv d PKG'\n installed, upgraded, removed = self.u.component_update_get_packages()\n self.assertEqual([('b', 'new')], installed)\n self.assertEqual([('a', 'old', 'new')], upgraded)\n self.assertEqual([('c', 'old'), ('d', 'unknown')], removed)", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def _PerformCompare(self, component):\n\n updater_commands = ['/usr/sbin/chromeos-firmwareupdate', '-V']\n content = subprocess.Popen(updater_commands,\n stdout=subprocess.PIPE).stdout.read()\n system_version = self._GetSystemVersion(component, content)\n whitelist_version = self._GetWhitelistVersion(component)\n self.assertEqual(system_version, whitelist_version, msg='%s does not match'\n ' what is in the whitelist.\\n\\tSystem: %s\\n\\tWhitelist: '\n '%s' % (component, system_version, whitelist_version))", "def test_component_update_available_NEW(self):\n MockPopen.mock_stdout = 'Inst b (new from)'\n self.assertTrue(self.u.component_update_available())", "def add_software_per_node(session, data, username='system_user'):\n session = validate_session(session)\n session.commit()\n operation = operation_exists(session, data['operation_id'])\n node_id = data['node_id']\n if node_id:\n node = session.query(SystemInfo).\\\n filter(SystemInfo.node_id == node_id).first()\n if operation:\n results = add_results_non_json(session, node_id=node_id,\n oper_id=data['operation_id'],\n result=True, results_received=datetime.now()\n )\n for addupdate in data['data']:\n update_exists = node_package_exists(session, node_id,\n addupdate['toppatch_id'])\n if 'date_installed' in addupdate:\n date_installed = date_parser(addupdate['date_installed'])\n else:\n date_installed = None\n hidden = return_bool(addupdate['hidden'])\n installed = return_bool(addupdate['installed'])\n if not update_exists:\n if node.os_code == \"linux\":\n node_update = PackagePerNode(node_id,\n addupdate['toppatch_id'], date_installed,\n hidden, installed=installed, is_linux=True\n )\n elif node.os_code == \"windows\":\n node_update = PackagePerNode(node_id,\n addupdate['toppatch_id'], date_installed,\n hidden, installed=installed, is_windows=True\n )\n elif node.os_code == \"darwin\":\n node_update = PackagePerNode(node_id,\n addupdate['toppatch_id'], date_installed,\n hidden, installed=installed, is_darwin=True\n )\n elif node.os_code == \"bsd\":\n node_update = PackagePerNode(node_id,\n addupdate['toppatch_id'], date_installed,\n hidden, installed=installed, is_bsd=True\n )\n elif node.os_code == \"unix\":\n node_update = PackagePerNode(node_id,\n addupdate['toppatch_id'], date_installed,\n hidden, installed=installed, is_unix=True\n )\n try:\n session.add(node_update)\n session.commit()\n except Exception as e:\n session.rollback()\n else:\n try:\n update_exists.installed = installed\n update_exists.hidden = hidden\n update_exists.date_installed = date_installed\n session.commit()\n except Exception as e:\n session.rollback()\n update_node_stats(session, node_id)\n update_tag_stats(session, node_id)\n update_network_stats(session, node_id)", "def test_update_storage_systems_info_pass(self):\n self._set_args({\"password\": \"password\", \"subnet_mask\": \"192.168.1.0/24\",\n \"systems\": [{\"ssid\": \"1\", \"serial\": \"1\"}, {\"addresses\": [\"192.168.1.36\"]}, {\"serial\": \"2\"}, {\"serial\": \"5\"}]})\n systems = NetAppESeriesProxySystems()\n systems.systems = [\n {\"ssid\": \"1\", \"serial\": \"1\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.5\", \"192.168.1.6\"], \"embedded_available\": True, \"accept_certificate\": True,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}]\n\n with mock.patch(self.REQUEST_FUNC, return_value=(200, [{\"id\": \"1\", \"passwordStatus\": \"valid\", \"metaTags\": []},\n {\"id\": \"5\", \"passwordStatus\": \"valid\", \"metaTags\": []}])):\n systems.update_storage_systems_info()\n self.assertEquals(systems.systems_to_remove, [\"5\"])\n self.assertEquals(systems.systems_to_add, [\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None,\n \"stored_password_valid\": None, \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False,\n \"accept_certificate\": False, \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}])", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def test_update_template_profile_for_system_module(self):\n pass" ]
[ "0.94248444", "0.8288344", "0.7360887", "0.6953799", "0.6945915", "0.67524755", "0.6744062", "0.67024505", "0.6653616", "0.65795577", "0.65515274", "0.6533397", "0.63919365", "0.63894856", "0.6340409", "0.63366187", "0.6326915", "0.6287757", "0.6284077", "0.6283095", "0.62538254", "0.6234224", "0.61960846", "0.6188628", "0.61689144", "0.599509", "0.5982744", "0.5945588", "0.59344286", "0.5922677" ]
0.95613813
0
Test case for update_software_configuration_for_system_module
def test_update_software_configuration_for_system_module(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_update_system(self):\n pass", "def test_update_hyperflex_sys_config_policy(self):\n pass", "def test_get_software(self):\n pass", "def test_update_global_system_config(self):\n new_config = self._create_global_system_config()\n update_name = data_utils.rand_name('test')\n with self.override_role():\n self.config_client.update_global_system_config(\n new_config['uuid'],\n display_name=update_name)", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_update_reg_ex_config(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_create_software_bundle_from_system_module(self):\n pass", "def test_get_system(self):\n pass", "def test_update_template_profile_for_system_module(self):\n pass", "def test_get_software_set(self):\n pass", "def test_patch_hyperflex_sys_config_policy(self):\n pass", "def test_patch_hyperflex_software_version_policy(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_update_hyperflex_ucsm_config_policy(self):\n pass", "def system_upgrade(cfg, target):\n mycfg = {'system_upgrade': {'enabled': False}}\n config.merge_config(mycfg, cfg)\n mycfg = mycfg.get('system_upgrade')\n if not isinstance(mycfg, dict):\n LOG.debug(\"system_upgrade disabled by config. entry not a dict.\")\n return\n\n if not config.value_as_boolean(mycfg.get('enabled', True)):\n LOG.debug(\"system_upgrade disabled by config.\")\n return\n\n util.system_upgrade(target=target)", "async def test_manual_configuration_update_configuration(hass):\n device = await setup_axis_integration(hass)\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN, context={\"source\": \"user\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n mock_device = Mock()\n mock_device.vapix.params.system_serialnumber = MAC\n\n with patch(\n \"homeassistant.components.axis.config_flow.get_device\",\n return_value=mock_device,\n ):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"2.3.4.5\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"2.3.4.5\"", "def test_update_storage_systems_info_pass(self):\n self._set_args({\"password\": \"password\", \"subnet_mask\": \"192.168.1.0/24\",\n \"systems\": [{\"ssid\": \"1\", \"serial\": \"1\"}, {\"addresses\": [\"192.168.1.36\"]}, {\"serial\": \"2\"}, {\"serial\": \"5\"}]})\n systems = NetAppESeriesProxySystems()\n systems.systems = [\n {\"ssid\": \"1\", \"serial\": \"1\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.5\", \"192.168.1.6\"], \"embedded_available\": True, \"accept_certificate\": True,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}]\n\n with mock.patch(self.REQUEST_FUNC, return_value=(200, [{\"id\": \"1\", \"passwordStatus\": \"valid\", \"metaTags\": []},\n {\"id\": \"5\", \"passwordStatus\": \"valid\", \"metaTags\": []}])):\n systems.update_storage_systems_info()\n self.assertEquals(systems.systems_to_remove, [\"5\"])\n self.assertEquals(systems.systems_to_add, [\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None,\n \"stored_password_valid\": None, \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False,\n \"accept_certificate\": False, \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}])", "def test_update_deployment(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_node_driveconfig(self):\n pass", "def test_get_systems(self):\n pass", "def test_update_pci_switch(self):\n pass", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"" ]
[ "0.85446596", "0.8518756", "0.7423729", "0.68361396", "0.6770421", "0.6716852", "0.67028815", "0.66129404", "0.6603769", "0.6377524", "0.6371242", "0.6350365", "0.6344052", "0.6289237", "0.6274943", "0.622654", "0.62143934", "0.62125593", "0.61739755", "0.6142919", "0.60932356", "0.60646826", "0.6053832", "0.60103244", "0.59955907", "0.5983404", "0.59570974", "0.5927294", "0.5927294", "0.5904769" ]
0.9527486
0
Test case for update_state
def test_update_state(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_state2(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_state3(self):\n pass", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_update_team_state(self):\n pass", "def update_state(self, dstate):\n pass", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def update_to_state(self, game_state):\n pass", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_case(self):\n pass", "def update_state(self, context):\n pass", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_page_state(self):\n pageStateObj = PageState()\n response = self.client.open(\n '/rui_support/page-state/{tempIdentifier}'.format(tempIdentifier='tempIdentifier_example'),\n method='PATCH',\n data=json.dumps(pageStateObj),\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)", "def test_update_scenario(self):\n pass", "def update(self):\n self._state = 23", "def test_update_asset_state(self):\n pass", "def update(self, new_gameStateData):\r\n pass", "def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)", "def test_update_deployment_state(self):\n pass", "def test_update_node_state_smartfail(self):\n pass", "def test_update_node_state_servicelight(self):\n pass", "def _update_state_value(self, old_state, new_state):\n if not self._test_mode:\n if isinstance(new_state, int):\n self._state_values[old_state] += self._alpha * \\\n (new_state - self._state_values[old_state])\n else:\n self._state_values[old_state] += self._alpha * \\\n (self._state_values[new_state] - self._state_values[old_state])", "def set_state(self, state: int):" ]
[ "0.90650016", "0.8994204", "0.8882347", "0.88507277", "0.7840166", "0.7732111", "0.77048653", "0.7547397", "0.75329256", "0.7501524", "0.74654764", "0.7379991", "0.7304678", "0.7224549", "0.7195168", "0.7174975", "0.7174975", "0.7174975", "0.7053789", "0.70275736", "0.7019611", "0.7019404", "0.69811124", "0.69807166", "0.69387764", "0.6889305", "0.6870769", "0.6870658", "0.68512315", "0.68318933" ]
0.92860615
0
Test case for update_state1
def test_update_state1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_state2(self):\n pass", "def test_update_state3(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_state(self):\n pass", "def test_update_team_state(self):\n pass", "def test_update_case(self):\n pass", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_scenario(self):\n pass", "def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)", "def test_update_one(self):\n pass", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def test_add_or_update_state_for_none_state_key(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n has_value, val = _run(state_manager.try_get_state('state1'))\n self.assertTrue(has_value)\n self.assertEqual('value1', val)\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)", "def update_to_state(self, game_state):\n pass", "def update_state(self, dstate):\n pass", "def test_update9(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_update_node_state_smartfail(self):\n pass", "async def test_state_update(hass):\n assert await setup_multimatic(hass)\n assert_entities_count(hass, 11)\n\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"off\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"off\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes.get(\"start_date\") is None\n assert state.attributes.get(\"end_date\") is None\n assert state.attributes.get(\"temperature\") is None\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"off\")\n\n dhw = SystemManagerMock.data[\"get_dhw\"]\n dhw.circulation.time_program = time_program(SettingModes.ON, None)\n dhw.circulation.operating_mode = OperatingModes.AUTO\n\n hvac_status = SystemManagerMock.data[\"get_hvac_status\"]\n hvac_status.boiler_status.status_code = \"F11\"\n hvac_status.online = \"OFFLINE\"\n hvac_status.update = \"UPDATE_PENDING\"\n hvac_status.errors = [\n Error(\"device\", \"title\", \"status_code\", \"descr\", datetime.datetime.now())\n ]\n\n rooms = SystemManagerMock.data[\"get_rooms\"]\n rooms[0].devices = [Device(\"Device 1\", \"123456789\", \"VALVE\", True, True)]\n rooms[0].time_program = time_program(None, 20)\n rooms[0].temperature = 22\n rooms[0].target_high = 24\n rooms[0].operating_mode = OperatingModes.AUTO\n rooms[0].child_lock = True\n rooms[0].window_open = True\n\n new_holiday_mode = active_holiday_mode()\n SystemManagerMock.data[\"get_holiday_mode\"] = new_holiday_mode\n SystemManagerMock.data[\"get_quick_mode\"] = QuickModes.HOTWATER_BOOST\n\n await goto_future(hass)\n\n assert_entities_count(hass, 11)\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"on\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes[\"start_date\"] == new_holiday_mode.start_date.isoformat()\n assert state.attributes[\"end_date\"] == new_holiday_mode.end_date.isoformat()\n assert state.attributes[\"temperature\"] == new_holiday_mode.target\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_quick_mode\")\n assert state.attributes[\"quick_mode\"] == QuickModes.HOTWATER_BOOST.name\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n\n SystemManagerMock.data[\"get_holiday_mode\"] = HolidayMode(False)\n\n await goto_future(hass)\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"on\")", "def update_state(self, context):\n pass", "async def test_state(hass, service, state):\n calls_1 = async_mock_service(hass, DOMAIN, service)\n\n await async_reproduce_states(hass, [\n State(ENTITY_1, state)\n ])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 1\n assert calls_1[0].data == {'entity_id': ENTITY_1}", "def test_update_node_state_servicelight(self):\n pass", "def dummy_update( self ):\r\n pass", "def test_update_asset_state(self):\n pass" ]
[ "0.9220482", "0.90468025", "0.8997684", "0.89022136", "0.7444404", "0.7378123", "0.73367244", "0.73064166", "0.72740257", "0.71150786", "0.71029127", "0.7085263", "0.7074055", "0.70731896", "0.70468974", "0.70468974", "0.70468974", "0.6989267", "0.69715285", "0.69641393", "0.69199383", "0.68485284", "0.684169", "0.6826347", "0.6812724", "0.67060417", "0.6703801", "0.6699872", "0.6692081", "0.66597664" ]
0.94308144
0
Test case for update_state2
def test_update_state2(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_state1(self):\n pass", "def test_update_state3(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_state(self):\n pass", "def test_update_team_state(self):\n pass", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def test_update_case(self):\n pass", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)", "def update_to_state(self, game_state):\n pass", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_node_state_smartfail(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def update_state(self, dstate):\n pass", "def test_add_or_update_state_for_none_state_key(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n has_value, val = _run(state_manager.try_get_state('state1'))\n self.assertTrue(has_value)\n self.assertEqual('value1', val)\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)", "def test_update_scenario(self):\n pass", "def update_state(self, context):\n pass", "def test_state_after_failure(self):\n pass", "async def test_state_update(hass):\n assert await setup_multimatic(hass)\n assert_entities_count(hass, 11)\n\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"off\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"off\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes.get(\"start_date\") is None\n assert state.attributes.get(\"end_date\") is None\n assert state.attributes.get(\"temperature\") is None\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"off\")\n\n dhw = SystemManagerMock.data[\"get_dhw\"]\n dhw.circulation.time_program = time_program(SettingModes.ON, None)\n dhw.circulation.operating_mode = OperatingModes.AUTO\n\n hvac_status = SystemManagerMock.data[\"get_hvac_status\"]\n hvac_status.boiler_status.status_code = \"F11\"\n hvac_status.online = \"OFFLINE\"\n hvac_status.update = \"UPDATE_PENDING\"\n hvac_status.errors = [\n Error(\"device\", \"title\", \"status_code\", \"descr\", datetime.datetime.now())\n ]\n\n rooms = SystemManagerMock.data[\"get_rooms\"]\n rooms[0].devices = [Device(\"Device 1\", \"123456789\", \"VALVE\", True, True)]\n rooms[0].time_program = time_program(None, 20)\n rooms[0].temperature = 22\n rooms[0].target_high = 24\n rooms[0].operating_mode = OperatingModes.AUTO\n rooms[0].child_lock = True\n rooms[0].window_open = True\n\n new_holiday_mode = active_holiday_mode()\n SystemManagerMock.data[\"get_holiday_mode\"] = new_holiday_mode\n SystemManagerMock.data[\"get_quick_mode\"] = QuickModes.HOTWATER_BOOST\n\n await goto_future(hass)\n\n assert_entities_count(hass, 11)\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"on\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes[\"start_date\"] == new_holiday_mode.start_date.isoformat()\n assert state.attributes[\"end_date\"] == new_holiday_mode.end_date.isoformat()\n assert state.attributes[\"temperature\"] == new_holiday_mode.target\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_quick_mode\")\n assert state.attributes[\"quick_mode\"] == QuickModes.HOTWATER_BOOST.name\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n\n SystemManagerMock.data[\"get_holiday_mode\"] = HolidayMode(False)\n\n await goto_future(hass)\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"on\")", "def test_update_asset_state(self):\n pass", "def test_add_or_update_state_for_removed_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n _run(state_manager.remove_state('state1'))\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)", "def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)", "def test_update_node_state_servicelight(self):\n pass", "def test_update9(self):\n pass", "def test_update_one(self):\n pass" ]
[ "0.91473377", "0.89780223", "0.8873793", "0.87756175", "0.74923503", "0.73244196", "0.7199641", "0.7134783", "0.70961064", "0.70937085", "0.7061208", "0.7052283", "0.6953614", "0.69286966", "0.6883234", "0.68626595", "0.68626595", "0.68626595", "0.68593365", "0.68404084", "0.68292236", "0.68038905", "0.66689223", "0.6655468", "0.6643177", "0.66415834", "0.6634662", "0.6630227", "0.6630015", "0.6618335" ]
0.9339467
0
Test case for update_state3
def test_update_state3(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_state4(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_state(self):\n pass", "def test_update_team_state(self):\n pass", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update_case(self):\n pass", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def test_update_asset_state(self):\n pass", "def test_update_node_state_smartfail(self):\n pass", "def update_to_state(self, game_state):\n pass", "def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)", "def test_update_scenario(self):\n pass", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def test_add_or_update_state_for_none_state_key(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n has_value, val = _run(state_manager.try_get_state('state1'))\n self.assertTrue(has_value)\n self.assertEqual('value1', val)\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)", "def test_update9(self):\n pass", "def test_update_node_state_servicelight(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def update_state(self, dstate):\n pass", "def test_update_deployment_state(self):\n pass", "def update(self, new_gameStateData):\r\n pass", "def test_state_after_failure(self):\n pass", "def update_state(self, context):\n pass", "async def test_state_update(hass):\n assert await setup_multimatic(hass)\n assert_entities_count(hass, 11)\n\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"off\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"off\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes.get(\"start_date\") is None\n assert state.attributes.get(\"end_date\") is None\n assert state.attributes.get(\"temperature\") is None\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"off\")\n\n dhw = SystemManagerMock.data[\"get_dhw\"]\n dhw.circulation.time_program = time_program(SettingModes.ON, None)\n dhw.circulation.operating_mode = OperatingModes.AUTO\n\n hvac_status = SystemManagerMock.data[\"get_hvac_status\"]\n hvac_status.boiler_status.status_code = \"F11\"\n hvac_status.online = \"OFFLINE\"\n hvac_status.update = \"UPDATE_PENDING\"\n hvac_status.errors = [\n Error(\"device\", \"title\", \"status_code\", \"descr\", datetime.datetime.now())\n ]\n\n rooms = SystemManagerMock.data[\"get_rooms\"]\n rooms[0].devices = [Device(\"Device 1\", \"123456789\", \"VALVE\", True, True)]\n rooms[0].time_program = time_program(None, 20)\n rooms[0].temperature = 22\n rooms[0].target_high = 24\n rooms[0].operating_mode = OperatingModes.AUTO\n rooms[0].child_lock = True\n rooms[0].window_open = True\n\n new_holiday_mode = active_holiday_mode()\n SystemManagerMock.data[\"get_holiday_mode\"] = new_holiday_mode\n SystemManagerMock.data[\"get_quick_mode\"] = QuickModes.HOTWATER_BOOST\n\n await goto_future(hass)\n\n assert_entities_count(hass, 11)\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"on\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes[\"start_date\"] == new_holiday_mode.start_date.isoformat()\n assert state.attributes[\"end_date\"] == new_holiday_mode.end_date.isoformat()\n assert state.attributes[\"temperature\"] == new_holiday_mode.target\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_quick_mode\")\n assert state.attributes[\"quick_mode\"] == QuickModes.HOTWATER_BOOST.name\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n\n SystemManagerMock.data[\"get_holiday_mode\"] = HolidayMode(False)\n\n await goto_future(hass)\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"on\")", "def test_add_or_update_state_for_removed_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n _run(state_manager.remove_state('state1'))\n\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)" ]
[ "0.8868809", "0.87086105", "0.8690531", "0.84261096", "0.7308455", "0.6883479", "0.6845802", "0.6839111", "0.6818367", "0.68052644", "0.67649287", "0.6679476", "0.66562235", "0.66383916", "0.6637185", "0.6608426", "0.6582253", "0.65740657", "0.6563326", "0.65477777", "0.65477777", "0.65477777", "0.6529901", "0.6459128", "0.6440037", "0.6393522", "0.63477993", "0.6341353", "0.6312532", "0.6285402" ]
0.94126165
0
Test case for update_state4
def test_update_state4(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_state3(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_state(self):\n pass", "def test_update_team_state(self):\n pass", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def test_update_case(self):\n pass", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def update_state(self, dstate):\n pass", "def test_update_scenario(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update9(self):\n pass", "def test_update_node_state_smartfail(self):\n pass", "def update_to_state(self, game_state):\n pass", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update_node_state_servicelight(self):\n pass", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def update_state(self, context):\n pass", "def update(self, new_gameStateData):\r\n pass", "def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)", "def test_update_deployment_state(self):\n pass", "def test_update_asset_state(self):\n pass", "async def test_state_update(hass):\n assert await setup_multimatic(hass)\n assert_entities_count(hass, 11)\n\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"off\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"off\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes.get(\"start_date\") is None\n assert state.attributes.get(\"end_date\") is None\n assert state.attributes.get(\"temperature\") is None\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"off\")\n\n dhw = SystemManagerMock.data[\"get_dhw\"]\n dhw.circulation.time_program = time_program(SettingModes.ON, None)\n dhw.circulation.operating_mode = OperatingModes.AUTO\n\n hvac_status = SystemManagerMock.data[\"get_hvac_status\"]\n hvac_status.boiler_status.status_code = \"F11\"\n hvac_status.online = \"OFFLINE\"\n hvac_status.update = \"UPDATE_PENDING\"\n hvac_status.errors = [\n Error(\"device\", \"title\", \"status_code\", \"descr\", datetime.datetime.now())\n ]\n\n rooms = SystemManagerMock.data[\"get_rooms\"]\n rooms[0].devices = [Device(\"Device 1\", \"123456789\", \"VALVE\", True, True)]\n rooms[0].time_program = time_program(None, 20)\n rooms[0].temperature = 22\n rooms[0].target_high = 24\n rooms[0].operating_mode = OperatingModes.AUTO\n rooms[0].child_lock = True\n rooms[0].window_open = True\n\n new_holiday_mode = active_holiday_mode()\n SystemManagerMock.data[\"get_holiday_mode\"] = new_holiday_mode\n SystemManagerMock.data[\"get_quick_mode\"] = QuickModes.HOTWATER_BOOST\n\n await goto_future(hass)\n\n assert_entities_count(hass, 11)\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"on\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes[\"start_date\"] == new_holiday_mode.start_date.isoformat()\n assert state.attributes[\"end_date\"] == new_holiday_mode.end_date.isoformat()\n assert state.attributes[\"temperature\"] == new_holiday_mode.target\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_quick_mode\")\n assert state.attributes[\"quick_mode\"] == QuickModes.HOTWATER_BOOST.name\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n\n SystemManagerMock.data[\"get_holiday_mode\"] = HolidayMode(False)\n\n await goto_future(hass)\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"on\")", "def test_update_page_state(self):\n pageStateObj = PageState()\n response = self.client.open(\n '/rui_support/page-state/{tempIdentifier}'.format(tempIdentifier='tempIdentifier_example'),\n method='PATCH',\n data=json.dumps(pageStateObj),\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_update_one(self):\n pass", "def update(self):\n self._state = 23" ]
[ "0.9059846", "0.88674414", "0.88480985", "0.8702609", "0.7379894", "0.7227429", "0.7169814", "0.716223", "0.69861054", "0.6944493", "0.6881959", "0.6878889", "0.68730235", "0.68730235", "0.68730235", "0.6799802", "0.67940295", "0.67679036", "0.6767171", "0.6751696", "0.67292595", "0.66569245", "0.6625814", "0.6615217", "0.66085863", "0.65627503", "0.65539396", "0.6530851", "0.64849055", "0.6467537" ]
0.93870157
0
Test case for update_submission_service
def test_update_submission_service(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_submission(self):\n sub_response_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n sub = [sub for sub in Submission.query(hash_key=self.new_user.username, range_key_condition=Submission.sort.startswith('SUBMISSION_'))][0]\n sub_response_update = self.client.put(\n '/submission/{}'.format(str(sub.public_id)),\n headers=dict(\n Authorization=\"Token {}\".format(self.token)\n ),\n data=json.dumps(dict(\n submitted_texts=['updated_text1']\n )),\n content_type='application/json'\n )\n update_data = json.loads(sub_response_update.data.decode())\n upd_sub = Submission.get(hash_key=sub.username, range_key=sub.sort)\n self.assertTrue(update_data['status']=='success')\n self.assertTrue(upd_sub.text_count == 1)", "def test_update_submission(self):\n self.call_file_submission()\n # note: this is a quarterly test submission, so\n # updated dates must still reflect a quarter\n if CONFIG_BROKER[\"use_aws\"]:\n update_json = {\"existing_submission_id\": self.updateSubmissionId,\n \"award_financial\": \"updated.csv\",\n \"reporting_period_start_date\": \"04/2016\",\n \"reporting_period_end_date\": \"06/2016\"}\n else:\n # If local must use full destination path\n file_path = CONFIG_BROKER[\"broker_files\"]\n update_json = {\"existing_submission_id\": self.updateSubmissionId,\n \"award_financial\": os.path.join(file_path, \"updated.csv\"),\n \"reporting_period_start_date\": \"04/2016\",\n \"reporting_period_end_date\": \"06/2016\"}\n # Mark submission as published\n with create_app().app_context():\n sess = GlobalDB.db().session\n update_submission = sess.query(Submission).filter(Submission.submission_id == self.updateSubmissionId).one()\n update_submission.publish_status_id = PUBLISH_STATUS_DICT['published']\n sess.commit()\n update_response = self.app.post_json(\"/v1/submit_files/\", update_json,\n headers={\"x-session-id\": self.session_id})\n self.assertEqual(update_response.status_code, 200)\n self.assertEqual(update_response.headers.get(\"Content-Type\"), \"application/json\")\n\n json = update_response.json\n self.assertIn(\"updated.csv\", json[\"award_financial_key\"])\n submission_id = json[\"submission_id\"]\n submission = sess.query(Submission).filter(Submission.submission_id == submission_id).one()\n self.assertEqual(submission.cgac_code, \"SYS\") # Should not have changed agency name\n self.assertEqual(submission.reporting_start_date.strftime(\"%m/%Y\"), \"04/2016\")\n self.assertEqual(submission.reporting_end_date.strftime(\"%m/%Y\"), \"06/2016\")\n self.assertEqual(submission.publish_status_id, PUBLISH_STATUS_DICT['updated'])", "def test_add_submission_service_to_project(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update_case(self):\n pass", "def test_update_item_using_post(self):\n pass", "def test_submit_asset_to_submission_service(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_submissions(self):\r\n # Basic case, things go well.\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"submitted\")\r\n\r\n # We post, but Software Secure doesn't like what we send for some reason\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_error):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")\r\n\r\n # We try to post, but run into an error (in this case a newtork connection error)\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_unavailable):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")", "def test_user_update_request(self):\n pass", "def test_remove_submission_service_from_project(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_review_modify(self):\n # Setup\n request_url = reverse(\n \"update-modify\",\n host=PUBLISH_HOST,\n kwargs={\n \"pk\": self.dataset_revision.dataset_id,\n \"pk1\": self.dataset_unpublished.organisation_id,\n },\n )\n # Test\n response = self.client.get(request_url)\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"publish/feed_form.html\")\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.UPLOAD_STEP\n )", "def test_get_submission(self):\n # creating a submission\n sub_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n # getting it from the service\n get_response = get_submissions(self, self.token)\n response_data = json.loads(get_response.data.decode())\n self.assertTrue(response_data['data'][0]['text_count']==2)\n self.assertTrue(isinstance(response_data['data'][0]['texts'], list))", "def test_post_update_sucess(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n payload = {\n 'title': 'Updated title',\n 'content': 'Updated content'\n }\n self.client.credentials(\n HTTP_AUTHORIZATION = 'Token ' + self.user1.auth_token.key\n )\n response = self.client.patch(url, payload)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n updated_post = Post.objects.filter(\n id=self.post1.id,\n author=self.user1,\n title=payload.get('title'),\n content=payload.get('content')\n )\n self.assertTrue(updated_post.exists())", "def test_update_one(self):\n pass", "def test_update_answer(self):\n self.app.post(\"/api/v2/answers/1/answer\", headers=self.headers,\n data=json.dumps(self.answer)) \n response = self.app.patch(\n \"/api/v2/answers/1/answer\", headers=self.headers, data=json.dumps(self.answer))\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result['status'], 200)", "def test_meeting_registrant_question_update(self):\n pass", "def test_cron_workflow_service_update_cron_workflow(self):\n pass", "def test_beneficiaries_update_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-update', kwargs={'pk': 1})\n response = self.client.post(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)", "def test_update_post_text(mocker, reddit_submission_obj):\n patched_task = mocker.patch(\n \"search.search_index_helpers.update_document_with_partial\"\n )\n update_post_text(reddit_submission_obj)\n assert patched_task.delay.called is True\n assert patched_task.delay.call_args[0] == (\n gen_post_id(reddit_submission_obj.id),\n {\n \"text\": reddit_submission_obj.selftext,\n \"plain_text\": render_article_text(reddit_submission_obj.article_content),\n },\n POST_TYPE,\n )", "def test_meeting_update(self):\n pass", "def test_form_editing(self):\n update = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': True,\n }\n\n form = self.form_cls(update, instance=self.entry)\n\n form.save()\n\n actual = models.Entry.objects.get(pk=self.entry.pk)\n self.assertEquals(actual.title, update['title'])\n self.assertEquals(actual.content.raw, update['content'])\n self.assertIsNotNone(actual.published_timestamp)", "def test_update_activity(self):\n pass", "def test_update(self):\n payload = {\n 'id': self.rout1.id,\n 'name': 'Tuesday routine',\n 'exercises': [self.exer1.id]\n }\n response = self.client.put(\n '/routines/{}/'.format(self.rout1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Routine.objects.get(id=self.rout1.id).name, payload['name'])", "def taco_test_post_param_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('POST', '/item/4', body=body)\n result = webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))\n # webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))\n debug.log('result', result)", "def test_partial_update_services(self):\n\n services = sample_services(user=self.user)\n services.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name='Transformer')\n\n payload = {'title' : 'sample service job' , 'tags' : [new_tag.id]}\n url = detail_url(services.id)\n self.client.patch(url, payload)\n\n services.refresh_from_db()\n\n self.assertEqual(services.title, payload['title'])\n tags = services.tags.all()\n\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)", "def test_update_goal(self):\n pass" ]
[ "0.7986529", "0.7391966", "0.72174513", "0.67534065", "0.6728488", "0.67201805", "0.6701196", "0.662485", "0.662485", "0.662485", "0.6623543", "0.656298", "0.6560935", "0.64342374", "0.6423602", "0.6408815", "0.62988925", "0.6281675", "0.6247902", "0.6242018", "0.6235286", "0.6185605", "0.6162292", "0.6158756", "0.61578006", "0.61455905", "0.6127432", "0.6124087", "0.6117566", "0.6115693" ]
0.9418527
0
Test case for update_system
def test_update_system(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_update9(self):\n pass", "def needs_update(self, system, environment_input):\n pass", "def test_duo_application_update(self):\n pass", "def test_update_case(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_smart_update(self):\n if os.getuid() != 0:\n return self.skipTest(\"root privileges required to opt in\")\n updater = AptMirrorUpdater()\n # Remove all existing package lists.\n updater.clear_package_lists()\n # Verify that package lists aren't available.\n assert not have_package_lists()\n # Run `apt-get update' to download the package lists.\n updater.smart_update()\n # Verify that package lists are again available.\n assert have_package_lists()", "def test_update_scenario(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_deployment(self):\n pass", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def test_update(app):\n\n assert False", "def update(self, system, environment_input):\n pass", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def test_update_bios_unit(self):\n pass", "def test_update(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.update(TOOLNAME,username,userpass)", "def test_get_system(self):\n pass", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def test_update_global_system_config(self):\n new_config = self._create_global_system_config()\n update_name = data_utils.rand_name('test')\n with self.override_role():\n self.config_client.update_global_system_config(\n new_config['uuid'],\n display_name=update_name)", "def test_arbitrary(self):\n self.executor.add_command('apt_get')\n output, _error = self.executor.apt_get.update().batch()\n self.assertEqual(output, 'update finished successfully')", "async def test_update_fails_if_out_of_date(coresys: CoreSys) -> None:\n coresys.core.state = CoreState.RUNNING\n with patch.object(\n type(coresys.supervisor), \"need_update\", new=PropertyMock(return_value=True)\n ), patch.object(\n type(coresys.os), \"available\", new=PropertyMock(return_value=True)\n ), pytest.raises(\n HassOSJobError\n ):\n await coresys.os.update()", "def test_dumb_update(self):\n if os.getuid() != 0:\n return self.skipTest(\"root privileges required to opt in\")\n updater = AptMirrorUpdater()\n # Remove all existing package lists.\n updater.clear_package_lists()\n # Verify that package lists aren't available.\n assert not have_package_lists()\n # Run `apt-get update' to download the package lists.\n updater.dumb_update()\n # Verify that package lists are again available.\n assert have_package_lists()", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_update_hyperflex_server_firmware_version(self):\n pass" ]
[ "0.7573081", "0.75620997", "0.7388021", "0.7388021", "0.7388021", "0.7367651", "0.73206335", "0.7276517", "0.72452503", "0.72151643", "0.7154748", "0.7105684", "0.70866823", "0.7051622", "0.69976836", "0.6959212", "0.69474953", "0.69256157", "0.6844956", "0.6821314", "0.6814249", "0.6806258", "0.6785655", "0.6785655", "0.6734213", "0.673085", "0.67289156", "0.6685543", "0.6666699", "0.6641966" ]
0.93254966
0
Test case for update_system_asset
def test_update_system_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_create_system_asset(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_retrieve_system_asset(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_delete_system_asset(self):\n pass", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_import_software_asset(self):\n pass", "def test_update_system(self):\n pass", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def test_get_test_asset(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def test_list_system_assets(self):\n pass", "def test_update_inventory(self):\n pass", "def test_delete_software_asset_bundle(self):\n pass", "def test_redeploy_container_asset(self):\n pass", "def test_update_asset_visibility_query(self):\n pass" ]
[ "0.8627617", "0.86174226", "0.847632", "0.83158314", "0.8198396", "0.8134645", "0.8015189", "0.78182375", "0.7675692", "0.7624564", "0.7601504", "0.7267885", "0.720338", "0.7172237", "0.7145844", "0.69633514", "0.6903347", "0.68385565", "0.6721758", "0.6652686", "0.6607123", "0.6565233", "0.65367347", "0.64426714", "0.63493085", "0.6312764", "0.63017446", "0.6243304", "0.623608", "0.61460817" ]
0.9345082
0
Test case for update_team
def test_update_team(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_team_state(self):\n pass", "def test_teams_partial_update(self):\n pass", "def test_assign_managing_team(self):\n pass", "def test_owners_can_edit_team_data(self):\n\n data = {\n 'description': 'Edited description',\n 'name': 'Edited Name'\n }\n response = self.client.patch(reverse('api:teams-detail', kwargs={'pk': self.team.id}), data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n team = Team.objects.get(id=self.team.id)\n self.assertEqual(team.name, data['name'])\n self.assertEqual(team.description, data['description'])", "def test_create_team(self):\n pass", "def test_update_case(self):\n pass", "def test_retrieve_team(self):\n pass", "def update(self, request, pk):\n print(\"Update a team\")\n serializer = data_serializers.UpdateTeamSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n new_team_entity = self.controller.update_team(request_data=request_data)\n\n serializer = data_serializers.PresentTeamSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def test_update_scenario(self):\n pass", "def test_teams_save_team_v1(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "async def update_team(new_data: UpdateTeam, team_id: str = Path(..., description=\"ID value of the desired team\"),\n db_handler: DBHandler = Depends(database_dependency)):\n try:\n updated_record = await db_handler.update_team(team_id=team_id, new_data=new_data)\n updated_record = init_BaseTeam(updated_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return updated_record", "def test_update_goal(self):\n pass", "def test_add_team_member(self):\n pass", "def test_meeting_update(self):\n pass", "def test_delete_team(self):\n pass", "def test_teams_save_team_member_v1(self):\n pass", "def test_handle_edit(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"brS\")\n team.platform = \"web\"\n team_attach = [team.get_attachment()]\n team.platform = \"iOS\"\n team.display_name = \"b-s\"\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team edit brs \"\n \"--name brS \"\n \"--platform web\", user)\n expect = {'attachments': team_attach,\n 'text': 'Team edited: brs, '\n 'name: brS, '\n 'platform: web'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_once_with(team)", "def test_add_team_manager_to_team(self):\n pass", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_unassign_managing_team(self):\n pass", "def test_update9(self):\n pass", "def test_handle_refresh_changed(self):\n test_user = User(user)\n test_user.permissions_level = Permissions.admin\n team = Team(\"TeamID\", \"TeamName\", \"android\")\n team_update = Team(\"TeamID\", \"new team name\", \"android\")\n team_update.add_member(test_user.github_id)\n team2 = Team(\"OTEAM\", \"other team2\", \"ios\")\n\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team, team2]\n self.gh.org_get_teams.return_value = [team_update, team2]\n attach = team_update.get_attachment()\n\n status = f\"1 teams changed, \" \\\n f\"0 added, \" \\\n f\"0 deleted. Wonderful.\"\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team refresh\", user)\n expect = {'attachments': [attach], 'text': status}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.query.assert_called_once_with(Team)", "def test_update_one(self):\n pass", "def test_teams_create(self):\n pass", "def team_update(token_user, team_id):\n team = Team.query.get(team_id)\n\n if team is None:\n abort(404, 'team not found')\n\n if not json_param_exists('name'):\n abort(400, 'one or more required parameter is missing')\n\n name = request.json['name']\n\n if not (token_user.has_permission('team.update.elevated') or\n (token_user.has_permission('team.update') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to modify team')\n\n team.name = name\n\n try:\n get_db().add(team)\n get_db().commit()\n except IntegrityError:\n abort(409, 'team name is already in use')\n\n return '', 204" ]
[ "0.85192895", "0.8296829", "0.77149934", "0.7379314", "0.7291607", "0.7266206", "0.72539693", "0.7237011", "0.7220214", "0.71904546", "0.71381044", "0.71381044", "0.71381044", "0.705516", "0.70339763", "0.7015948", "0.69919944", "0.6967077", "0.69559014", "0.68977255", "0.6894381", "0.68808424", "0.68808424", "0.68773437", "0.6816895", "0.67917544", "0.67790717", "0.6776135", "0.6765836", "0.6742495" ]
0.93950874
0
Test case for update_team_state
def test_update_team_state(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_team(self):\n pass", "def test_update_state(self):\n pass", "def test_update_state2(self):\n pass", "def test_update_state1(self):\n pass", "def test_update_state4(self):\n pass", "def test_update_state3(self):\n pass", "def test_teams_partial_update(self):\n pass", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_update_scenario(self):\n pass", "def update_to_state(self, game_state):\n pass", "def test_update_case(self):\n pass", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def test_assign_managing_team(self):\n pass", "def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)", "def test_update_goal(self):\n pass", "def update(self, new_gameStateData):\r\n pass", "def test_update_page_state(self):\n pageStateObj = PageState()\n response = self.client.open(\n '/rui_support/page-state/{tempIdentifier}'.format(tempIdentifier='tempIdentifier_example'),\n method='PATCH',\n data=json.dumps(pageStateObj),\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def test_api_state_change(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n hass.states.async_set(\"test.test\", \"not_to_be_set\")\n\n await mock_api_client.post(\n \"/api/states/test.test\", json={\"state\": \"debug_state_change2\"}\n )\n\n assert hass.states.get(\"test.test\").state == \"debug_state_change2\"", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_deployment_state(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_add_or_update_state_for_new_state(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.add, state.change_kind)", "def test_add_or_update_state_for_state_in_storage(self):\n def test_update_value(name, value):\n return f'{name}-{value}'\n\n state_manager = ActorStateManager(self._fake_actor)\n state_change_tracker = state_manager._get_contextual_state_tracker()\n val = _run(state_manager.add_or_update_state('state1', 'value1', test_update_value))\n self.assertEqual('state1-value1', val)\n state = state_change_tracker['state1']\n self.assertEqual(StateChangeKind.update, state.change_kind)", "def test_update_asset_state(self):\n pass", "def test_update_node_state_servicelight(self):\n pass", "def put_state(request):\n game = request.matchdict['game']\n GAMES[game] = GAMES[game].update(request.PUT['state'])\n return {'end': not GAMES[request.matchdict['game']]}", "def test_handle_refresh_changed(self):\n test_user = User(user)\n test_user.permissions_level = Permissions.admin\n team = Team(\"TeamID\", \"TeamName\", \"android\")\n team_update = Team(\"TeamID\", \"new team name\", \"android\")\n team_update.add_member(test_user.github_id)\n team2 = Team(\"OTEAM\", \"other team2\", \"ios\")\n\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team, team2]\n self.gh.org_get_teams.return_value = [team_update, team2]\n attach = team_update.get_attachment()\n\n status = f\"1 teams changed, \" \\\n f\"0 added, \" \\\n f\"0 deleted. Wonderful.\"\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team refresh\", user)\n expect = {'attachments': [attach], 'text': status}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.query.assert_called_once_with(Team)" ]
[ "0.81864196", "0.8020687", "0.7968662", "0.7889632", "0.7888545", "0.7871952", "0.7234962", "0.71040696", "0.67417157", "0.67114717", "0.66611534", "0.6657386", "0.66307276", "0.6600209", "0.65888625", "0.6569521", "0.65363485", "0.6495709", "0.64642227", "0.64642227", "0.64642227", "0.6435071", "0.6321005", "0.6309599", "0.62967706", "0.6288653", "0.62809384", "0.6251717", "0.62453544", "0.6240502" ]
0.94142497
0
Test case for update_template_profile_for_system_module
def test_update_template_profile_for_system_module(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_template_registration(self):\n pass", "def test_update_device_template(self):\n pass", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_update_hyperflex_node_profile(self):\n pass", "def test_update_system(self):\n pass", "def edit_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n edited = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n continue\n\n # get new server hardware type for edit\n enclosure_group = profile_template.enclgroup if getattr(profile_template, 'enclgroup', None) is not None else None\n sht_new = None\n if getattr(profile_template, 'new_sht_ref_server', None) is not None:\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.new_sht_ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_new = get_type_of_server_hardware(profile_template.new_sht_ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n elif getattr(profile_template, 'hardwareType', None) is not None:\n sht_new = profile_template.hardwareType\n\n # open Edit SPT dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.name)\n\n EditServerProfileTemplate.select_action_edit()\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfileTemplate.input_name(profile_template.newName) if getattr(profile_template, 'newName', None) is not None else None\n EditServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if sht_new is not None and sht_new not in sht_selected:\n logger.info(\"server hardware type '%s' is NOT consistent with current value '%s'\" % (sht_new, sht_selected))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(sht_new, enclosure_group, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfileTemplate.get_selected_enclosure_group()\n if enclosure_group is not None and enclosure_group not in eg_selected:\n logger.warn(\"enclosure group '%s' is NOT consistent with test data '%s'\" % (eg_selected, enclosure_group))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(enclosure_group, timeout=5, fail_if_false=False)\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n EditServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection().set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfileTemplate.Advanced.set(profile_template)\n\n EditServerProfileTemplate.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile_template.newName if getattr(profile_template, 'newName', None) is not None else profile_template.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to edit! all %s server profile template(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile template(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, %s profile template(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully edited - %s out of %s \" % (edited, total))\n return True", "def update_profile_from_template(profile):\n selenium2lib = ui_lib.get_s2l()\n if not select_server_profile(profile):\n ui_lib.fail_test(\"Failed to select profile %s\" % profile)\n\n logger._log_to_console_and_log_file(\"power off server before updating profile from template\")\n profile_attributes = get_server_profile_attributes(profile, None)\n if profile_attributes[\"server hardware\"] == \"unassigned\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Cannot power off Server Profile '%s' due to unassigned server hardware\" % profile)\n elif profile_attributes[\"server power\"].lower() == \"on\":\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF):\n logger._log_to_console_and_log_file(\"Powering off selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWEROFF_PRESS_HOLD)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"Off\", PerfConstants.PROFILE_POWER_VALIDATION)\n logger._log_to_console_and_log_file(\"Successfully powered off Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n ui_lib.fail_test(\"Power off option is not available in the Actions menu\")\n\n # Select update from template option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_UPDATE_FROM_TEMPLATE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MSG_TO_POWER_OFF_SERVER):\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n else:\n ui_lib.fail_test(\"Server should be powered off to update profile\")\n logger.debug(\"waiting for progress bar indicates to 'ok'\")\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_NOTIFICATION_OK, 300):\n logger._log_to_console_and_log_file(\"Server profile '%s' updated successfully from template\" % profile)\n return True\n else:\n ui_lib.fail_test(\"Failed to update server profile '%s' from template\" % profile)", "def test_update_software_components_for_system_module(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_retrieve_template_registration(self):\n pass", "def test_update_template_subscription(self):\n pass", "def test_patch_hyperflex_node_profile(self):\n pass", "def test_update_zr_location_profile(self):\n pass", "def test_register_template(self):\n pass", "def test_update_hyperflex_cluster_profile(self):\n pass", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True) \n \n self.get_ServicesPage(\"\",\"Firmware_update_Template\")\n \n self.logout()", "def test_share_template_registration(self):\n pass", "def test_creation_profile_5():\n assert tuple_NT[0][4] == LIST_dict[0]['current_location'], \"current_location' of profile is not getting stored properly\"", "def copy_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=8)\n\n total = len(profile_template_obj)\n source_not_exists = 0\n target_already_exists = 0\n copied = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile template with name '%s' ...\" % profile_template.source)\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.source, fail_if_false=False) is False:\n logger.warn(\"source server profile template '%s' does not exist\" % profile_template.source)\n source_not_exists += 1\n continue\n\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False) is False:\n logger.warn(\"target server profile template '%s' already exists!\" % profile_template.name)\n target_already_exists += 1\n continue\n\n # open Copy SP dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.source)\n\n CopyServerProfileTemplate.select_action_copy()\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfileTemplate.input_name(profile_template.name)\n CopyServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = CopyServerProfileTemplate.get_selected_server_hardware_type(profile_template.name)\n # if profile_template.hardwareType not in sht_selected:\n # logger.warn(\"server hardware type '%s' of server profile template '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile_template.name, profile_template.hardwareType))\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n CopyServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfileTemplate.Advanced.set(profile_template)\n\n CopyServerProfileTemplate.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile_template.source, profile_template.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - source_not_exists - target_already_exists == 0:\n logger.warn(\"no server profile template to copy! all %s server profile template(s) is either source-NOT-existing or target-ALREADY-existing, test is considered FAILED\" % (source_not_exists + target_already_exists))\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile template(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + source_not_exists + target_already_exists == total:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, test is considered FAILED\" % (source_not_exists, target_already_exists))\n return False\n else:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, %s template(s) left is failed being copied \" % (source_not_exists, target_already_exists, total - copied - source_not_exists - target_already_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully copied - %s out of %s \" % (copied, total))\n return True", "def test_template_feedback(self):\r\n pass", "def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):\n return self.profile_template.update(body, uri, api, headers)", "def test_update_hyperflex_sys_config_policy(self):\n pass", "def test_profile_route_uses_right_templates(self):\n self.add_testuser()\n response = self.client.get(\"/profile/testuser/\")\n self.assertTemplateUsed(response, \"layout.html\")", "def test_unregister_template(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_update_global_system_config(self):\n new_config = self._create_global_system_config()\n update_name = data_utils.rand_name('test')\n with self.override_role():\n self.config_client.update_global_system_config(\n new_config['uuid'],\n display_name=update_name)", "def test_create_template_subsciption(self):\n pass", "def test_unshare_template_registration(self):\n pass", "def test_update_subscription_template(self):\n pass" ]
[ "0.6785579", "0.6356617", "0.62918997", "0.6184703", "0.6153149", "0.6130606", "0.612474", "0.6082451", "0.6069823", "0.60412025", "0.58541983", "0.58381355", "0.58368003", "0.5815376", "0.5805497", "0.5766707", "0.57287014", "0.56950647", "0.56297696", "0.5629228", "0.5605053", "0.5581194", "0.55801797", "0.5562407", "0.55551493", "0.55353", "0.5526971", "0.5496275", "0.54952246", "0.54856616" ]
0.94910216
0
Test case for update_template_registration
def test_update_template_registration(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_template_subscription(self):\n pass", "def test_retrieve_template_registration(self):\n pass", "def test_register_template(self):\n pass", "def test_update_device_template(self):\n pass", "def test_share_template_registration(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_list_template_registrations(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_unshare_template_registration(self):\n pass", "def test_unregister_template(self):\n pass", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_create_template_subsciption(self):\n pass", "def test_update_template_profile_for_system_module(self):\n pass", "def test_update_reg_ex_config(self):\n pass", "def test_template_feedback(self):\r\n pass", "def test_get_template_subscription(self):\n pass", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)", "def test_create_subscription_template(self):\n pass", "def post_service_template_update(self, resource_id, resource_dict):\n pass", "def test_patch_namespaced_template(self):\n pass", "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "async def test_setup_duplicate_resource_template(hass: HomeAssistant) -> None:\n respx.get(\"http://localhost\") % HTTPStatus.OK\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"resource_template\": \"http://localhost\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0", "def test_create_device_template(self):\n pass", "def test_update_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.patch(f\"/templates/{template_id}\", json={})\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def test_otoroshi_controllers_adminapi_templates_controller_template_spec(self):\n pass", "def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')", "def test_delete_template_subscription(self):\n pass", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)" ]
[ "0.8083718", "0.79749024", "0.7708383", "0.7686474", "0.7682151", "0.753326", "0.7352316", "0.72988415", "0.70525295", "0.7021686", "0.6982262", "0.6621553", "0.6618101", "0.658619", "0.6572706", "0.6557499", "0.6534021", "0.6534021", "0.6345299", "0.6320398", "0.6245217", "0.62385386", "0.62273633", "0.62145513", "0.618972", "0.61332405", "0.61254317", "0.61122805", "0.6099141", "0.60679775" ]
0.9445199
0
Test case for update_template_subscription
def test_update_template_subscription(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_subscription_template(self):\n pass", "def test_update_subscription(self):\n pass", "def test_get_template_subscription(self):\n pass", "def test_create_subscription_template(self):\n pass", "def test_delete_template_subscription(self):\n pass", "def test_get_subscription_template(self):\n pass", "def test_update_template_registration(self):\n pass", "def test_list_template_subscriptions(self):\n pass", "def test_delete_subscription_template(self):\n pass", "def test_update_subscription(self):\n args = dict(trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )\n\n try:\n self.arb.update_subscription(**args)\n except KeyError:\n self.arb.update_subscription(subscription_id=u\"1234\", **args)", "def test_get_subscription_templates(self):\n pass", "def test_list_pending_template_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_issue_subscriptions(self):\n pass", "def test_update_device_template(self):\n pass", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_cmd_cs_subscription_update(self, mocker):\n\n mock_update_return = {\n 'subscription_id': SUBSCRIPTION_ID,\n 'account_id': 'a-123'\n }\n\n mock_subscription_client_update = mocker.patch.object(\n SubscriptionClient,\n \"update\",\n return_value=mock_update_return\n )\n\n declaration_file = 'decl.json'\n expected_config_file = os.path.join(os.getcwd(), declaration_file)\n result = self.runner.invoke(cli, ['subscription', 'update', '--subscription-id',\n SUBSCRIPTION_ID, '--declaration', 'decl.json'])\n assert result.output == json.dumps(mock_update_return, indent=4, sort_keys=True) + '\\n'\n assert mock_subscription_client_update.call_args[1]['config_file'] == expected_config_file", "def test_process_subscriptions(self):\n pass", "def test_get_subscription(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_delete_subscription(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def test_get_subscriptions(self):\n pass", "def test_update_subscription_premium(self):\n\n self.assertEqual(first=10, second=self.subscription.radius)\n url = reverse('subscription-detail', args=(self.subscription.id,))\n data = {\n 'type': 'premium',\n 'radius': 30,\n 'swipes_count': 0\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.subscription.refresh_from_db()\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=30, second=self.subscription.radius)", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_share_template_registration(self):\n pass" ]
[ "0.93085617", "0.81070775", "0.79437876", "0.7854699", "0.75393814", "0.750173", "0.7486028", "0.73872113", "0.73769885", "0.71420544", "0.70529866", "0.6996687", "0.68146324", "0.671821", "0.6618665", "0.6602217", "0.6570982", "0.6490833", "0.64818263", "0.640444", "0.6366519", "0.6336007", "0.62782395", "0.62448984", "0.6187954", "0.6182269", "0.61629486", "0.6129705", "0.6057586", "0.6033503" ]
0.9456401
0
Test case for update_test_asset
def test_update_test_asset(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_asset(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update_asset_content(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_software_asset_bundle(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_get_test_asset(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update_scenario(self):\n pass", "def test_import_test_asset(self):\n pass", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_delete_asset(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_case(self):\n pass", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_create_system_asset(self):\n pass", "def test_update_activity(self):\n pass", "def test_update_inventory(self):\n pass", "def test_update(app):\n\n assert False", "def test_import_software_asset(self):\n pass" ]
[ "0.9405167", "0.8970323", "0.8844878", "0.8524712", "0.8446598", "0.8428697", "0.81937164", "0.7833235", "0.75604", "0.7455418", "0.74237746", "0.7359769", "0.7320598", "0.72027475", "0.71906775", "0.71747935", "0.70215386", "0.70215386", "0.70215386", "0.69742155", "0.6935405", "0.6882545", "0.68537253", "0.682961", "0.67568296", "0.6705176", "0.6689076", "0.66399246", "0.6620423", "0.66173255" ]
0.95299613
0
Test case for update_test_asset_content
def test_update_test_asset_content(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_asset_content(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_software_asset_content(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update_software_asset_bundle(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update_software_asset_install_script(self):\n pass", "def test_get_test_asset(self):\n pass", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def test_update_test_asset_impact_level(self):\n pass", "def test_update_content_no_file(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(Content.objects.count(), 1)\n content = Content.objects.first()\n last_uploaded_time = content.last_uploaded_time\n updated_data = {\n 'name': 'Updated Content Name',\n 'description': 'New description'\n }\n url = reverse('content-detail', args=[content.pk])\n response = self.client.patch(url, updated_data, format='json')\n content = Content.objects.first()\n self.assertEqual(last_uploaded_time, content.last_uploaded_time)", "def test_import_test_asset(self):\n pass", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")", "def test_updates_static_version(self):\n scripts.update_static_asset_version.main()", "def test_delete_asset(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_update_scenario(self):\n pass", "def _update_asset(request, course_key, asset_key):\r\n if request.method == 'DELETE':\r\n # Make sure the item to delete actually exists.\r\n try:\r\n content = contentstore().find(asset_key)\r\n except NotFoundError:\r\n return JsonResponse(status=404)\r\n\r\n # ok, save the content into the trashcan\r\n contentstore('trashcan').save(content)\r\n\r\n # see if there is a thumbnail as well, if so move that as well\r\n if content.thumbnail_location is not None:\r\n # We are ignoring the value of the thumbnail_location-- we only care whether\r\n # or not a thumbnail has been stored, and we can now easily create the correct path.\r\n thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.name)\r\n try:\r\n thumbnail_content = contentstore().find(thumbnail_location)\r\n contentstore('trashcan').save(thumbnail_content)\r\n # hard delete thumbnail from origin\r\n contentstore().delete(thumbnail_content.get_id())\r\n # remove from any caching\r\n del_cached_content(thumbnail_location)\r\n except:\r\n logging.warning('Could not delete thumbnail: %s', thumbnail_location)\r\n\r\n # delete the original\r\n contentstore().delete(content.get_id())\r\n # remove from cache\r\n del_cached_content(content.location)\r\n return JsonResponse()\r\n\r\n elif request.method in ('PUT', 'POST'):\r\n if 'file' in request.FILES:\r\n return _upload_asset(request, course_key)\r\n else:\r\n # Update existing asset\r\n try:\r\n modified_asset = json.loads(request.body)\r\n except ValueError:\r\n return HttpResponseBadRequest()\r\n contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])\r\n # Delete the asset from the cache so we check the lock status the next time it is requested.\r\n del_cached_content(asset_key)\r\n return JsonResponse(modified_asset, status=201)", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_locking(self):\r\n def verify_asset_locked_state(locked):\r\n \"\"\" Helper method to verify lock state in the contentstore \"\"\"\r\n asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.txt')\r\n content = contentstore().find(asset_location)\r\n self.assertEqual(content.locked, locked)\r\n\r\n def post_asset_update(lock, course):\r\n \"\"\" Helper method for posting asset update. \"\"\"\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)\r\n\r\n # Load the toy course.\r\n module_store = modulestore('direct')\r\n _, course_items = import_from_xml(\r\n module_store,\r\n 'common/test/data/',\r\n ['toy'],\r\n static_content_store=contentstore(),\r\n verbose=True\r\n )\r\n course = course_items[0]\r\n verify_asset_locked_state(False)\r\n\r\n # Lock the asset\r\n resp_asset = post_asset_update(True, course)\r\n self.assertTrue(resp_asset['locked'])\r\n verify_asset_locked_state(True)\r\n\r\n # Unlock the asset\r\n resp_asset = post_asset_update(False, course)\r\n self.assertFalse(resp_asset['locked'])\r\n verify_asset_locked_state(False)", "def test_auto_add_assets_to_story(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())", "def test_update_metadata(self):\n pass" ]
[ "0.9501882", "0.87986636", "0.87571913", "0.87021345", "0.79243267", "0.7862141", "0.76514953", "0.7385671", "0.7317416", "0.72261894", "0.6899784", "0.6894357", "0.6863201", "0.68026686", "0.67575234", "0.66775644", "0.6651309", "0.6642442", "0.6635494", "0.660337", "0.6571381", "0.6430744", "0.64000255", "0.6392156", "0.6375978", "0.6375978", "0.6375978", "0.636109", "0.63394773", "0.6334868" ]
0.9525743
0
Test case for update_test_asset_impact_level
def test_update_test_asset_impact_level(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_software_asset_impact_level(self):\n pass", "def test_update_impact_level(self):\n pass", "def test_update_asset_state(self):\n pass", "def test_update_test_asset(self):\n pass", "def test_update_asset(self):\n pass", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_update_software_asset(self):\n pass", "def test_update_system_asset(self):\n pass", "def test_update_scenario(self):\n pass", "def test_update_test_asset_content(self):\n pass", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def test_update_asset_content(self):\n pass", "def test_change_brightness_back_to_10():", "def test_update_software_asset_content(self):\n pass", "def test_update_goal_metric(self):\n pass", "def test_update_inventory(self):\n pass", "def test_update_risk_profile_using_put(self):\n pass", "def test_set_and_get_led_brightness_level(self):", "def test_update_risk(self):\n test_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d\")\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n created_at = test_date\n updated_at = test_date\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": created_at,\n \"updated_at\": updated_at,\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n risk = all_models.Risk.query.get(risk_id)\n\n response = self.api.put(risk, risk.id, new_values)\n\n self.assert200(response)\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)", "def test_client_risk_assessment_partial_update(self):\n pass", "def test_update_review_status(self):\n risk = factories.RiskFactory()\n new_value = all_models.Review.STATES.REVIEWED\n\n self.api.put(risk, risk.id, {\n \"review_status\": new_value,\n \"review_status_display_name\": \"some status\"\n })\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertEquals(risk.review_status, new_value)", "def test_damage(self):\n\n self.sold.experience = 27\n self.assertEqual(self.sold.damage, 0.32)", "def test_damage(self):\n\n for op in self.veh.operators:\n op.experience = 10\n self.assertEqual(self.veh.damage, 0.4)", "def test_update_goal(self):\n pass", "def test_update_software_asset_install_script(self):\n pass", "def test_edit_asset_type(self):\n get_asset = Asset.objects.get(asset_code=\"IC001\")\n get_asset.asset_code = \"IC003\"\n get_asset.save()\n self.assertEqual(self.all_assets.count(), 1)\n get_asset = Asset.objects.get(asset_code=\"IC003\")\n self.assertEqual(get_asset.asset_code, \"IC003\")", "def test_update_case(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_UpdateHealthAt0 (self) :\n\t\t\n\t\tself.person2.updateHealth ()\n\t\tself.assertEqual(self.person2.getHealth(), \\\n\t\t100 + self.healthEffect)", "def test_update_software_asset_bundle(self):\n pass" ]
[ "0.90864426", "0.88729835", "0.673349", "0.6672863", "0.66703254", "0.6534632", "0.6362856", "0.6294142", "0.6159111", "0.61350924", "0.6133756", "0.61003006", "0.6054757", "0.6051059", "0.59986806", "0.59780127", "0.5968827", "0.5818764", "0.58010066", "0.5774684", "0.57573354", "0.57490575", "0.57386094", "0.5734436", "0.5733662", "0.5720051", "0.56895727", "0.5688006", "0.5678431", "0.5669034" ]
0.94940317
0
Test case for update_virt_realm
def test_update_virt_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_virtualization_realm(self):\n pass", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test_deallocate_virt_realm(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_disable_virt_realm_remote_access(self):\n pass", "def test_vault_update_vault_section(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_powerup(self):\n self.assertIdentical(self.realm, IRealm(self.store))", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_update_virtual_account_by_id(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_vault_update_vault_item(self):\n pass", "def test_update_hyperflex_server_model(self):\n pass", "def test_update_hyperflex_app_catalog(self):\n pass", "def test_update_case(self):\n pass", "def test_ipam_vrfs_update(self):\n pass", "def test_update_system(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_control_acl_update(self):\n with factories.single_commit():\n control = factories.ControlFactory()\n person = factories.PersonFactory()\n control.add_person_with_role_name(person, \"Admin\")\n access_control_list = {\n \"Admin\": [\n {\n \"email\": \"[email protected]\",\n \"name\": \"user1\",\n },\n {\n \"email\": \"[email protected]\",\n \"name\": \"user2\",\n },\n ]\n }\n self.setup_people(access_control_list)\n\n response = self.api.put(control, control.id, {\n \"access_control_list\": access_control_list,\n })\n\n self.assert200(response)\n control = all_models.Control.query.get(control.id)\n self.assert_obj_acl(control, access_control_list)", "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "def test_update(sqlite_db):\n updated_pass = \"TheUpdatedPassword\"\n site = \"www.example.com\"\n response = smm.update_passwd(site, updated_pass)\n assert response\n assert smm.read_passwd(site) == updated_pass\n bad_response = smm.update_passwd(\"NotASite\", updated_pass)\n assert not bad_response", "def test_update_hyperflex_proxy_setting_policy(self):\n pass" ]
[ "0.8676987", "0.7930964", "0.77614", "0.70606196", "0.6879447", "0.686318", "0.68215346", "0.6770379", "0.66104203", "0.6395189", "0.6332107", "0.6267194", "0.6243547", "0.6209109", "0.6196717", "0.6183286", "0.6130521", "0.6086439", "0.5971164", "0.59483135", "0.5940871", "0.5794731", "0.5792457", "0.5792457", "0.5792457", "0.578557", "0.57781225", "0.5776754", "0.5775316", "0.5762141" ]
0.9600858
0
Test case for update_virt_realm_remote_access_config
def test_update_virt_realm_remote_access_config(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_enable_virt_realm_remote_access(self):\n pass", "def test_update_virt_realm(self):\n pass", "def test_disable_virt_realm_remote_access(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_update_proxy():\n result = False\n\n proxy = {\n 'name': 'proxy',\n 'address': 'proxy2.ntnxlab.local',\n 'port': 8080,\n 'http': True,\n 'https': True,\n 'socks': False,\n 'username': '',\n 'password': '',\n }\n\n cluster_obj = prism.Cluster(api_client=_api())\n config_obj = prism.Config(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n for each_uuid in clusters:\n config_obj.set_proxy(address=proxy['address'], port=proxy['port'], name=proxy['name'], http=proxy['http'], https=proxy['https'],\n username=proxy['username'], password=proxy['password'], socks=proxy['socks'], clusteruuid=each_uuid)\n cluster_proxy = config_obj.get_proxy(clusteruuid=each_uuid)\n\n if proxy['address'] == cluster_proxy[0]['address']:\n result = True\n\n assert result", "def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0", "def test_patch_role_remote(self):\n # Create source site and remote project\n source_site = self.make_site(\n name=REMOTE_SITE_NAME,\n url=REMOTE_SITE_URL,\n mode=SITE_MODE_SOURCE,\n description=REMOTE_SITE_DESC,\n secret=REMOTE_SITE_SECRET,\n )\n self.make_remote_project(\n project_uuid=self.project.sodar_uuid,\n project=self.project,\n site=source_site,\n level=SODAR_CONSTANTS['REMOTE_LEVEL_READ_ROLES'],\n )\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'role': PROJECT_ROLE_GUEST}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_update_hyperflex_sys_config_policy(self):\n pass", "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "def test_update_hyperflex_proxy_setting_policy(self):\n pass", "def test_config(setup_debug, tmp_path):\n os.chdir(tmp_path)\n \n ssh_tunnels = SSHTunnels(users=[\"bbeeson\"])\n c0 = (TEST_DATA / \"config\").read_text()\n # run and add 'queen'\n c1 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n # run and do nothing\n c2 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n assert len(c1) > len(c0)\n assert len(c1) == len(c2)\n \n # c_ref = (TEST_DATA / \"test_ssh_config2\").read_text()\n # should have just added queen\n #assert c2 == c_ref", "def test_update_hyperflex_local_credential_policy(self):\n pass", "def test_302_proxy_server_config(self, auth_api_version=None):\n if self._get_openstack_release() >= self.xenial_queens:\n auth_api_version = auth_api_version or '3'\n else:\n auth_api_version = auth_api_version or '2.0'\n u.log.debug(\"Checking swift proxy-server config auth_api_version={}...\"\n \"\".format(auth_api_version))\n unit = self.swift_proxy_sentry\n conf = '/etc/swift/proxy-server.conf'\n keystone_relation = self.keystone_sentry.relation(\n 'identity-service', 'swift-proxy:identity-service')\n swift_proxy_relation = unit.relation(\n 'identity-service', 'keystone:identity-service')\n swift_proxy_ip = swift_proxy_relation['private-address']\n auth_host = keystone_relation['auth_host']\n auth_protocol = keystone_relation['auth_protocol']\n\n expected = {\n 'DEFAULT': {\n 'bind_port': '8070',\n 'user': 'swift',\n 'log_name': 'swift',\n 'log_facility': 'LOG_LOCAL0',\n 'log_level': 'INFO',\n 'log_headers': 'False',\n 'log_address': '/dev/log'\n },\n 'pipeline:main': {\n 'pipeline': 'gatekeeper healthcheck proxy-logging cache '\n 'swift3 s3token container_sync bulk tempurl '\n 'slo dlo formpost authtoken keystoneauth '\n 'staticweb container-quotas account-quotas '\n 'proxy-logging proxy-server'\n },\n 'app:proxy-server': {\n 'use': 'egg:swift#proxy',\n 'allow_account_management': 'true',\n 'account_autocreate': 'true',\n 'node_timeout': '60',\n 'recoverable_node_timeout': '30'\n },\n 'filter:tempauth': {\n 'use': 'egg:swift#tempauth',\n 'user_system_root': 'testpass .admin https://{}:8080/v1/'\n 'AUTH_system'.format(swift_proxy_ip)\n },\n 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},\n 'filter:cache': {\n 'use': 'egg:swift#memcache',\n 'memcache_servers': '{}:11211'.format(swift_proxy_ip)\n },\n 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},\n 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},\n 'filter:proxy-logging': {'use': 'egg:swift#proxy_logging'},\n 'filter:staticweb': {'use': 'egg:swift#staticweb'},\n 'filter:bulk': {'use': 'egg:swift#bulk'},\n 'filter:slo': {'use': 'egg:swift#slo'},\n 'filter:dlo': {'use': 'egg:swift#dlo'},\n 'filter:formpost': {'use': 'egg:swift#formpost'},\n 'filter:tempurl': {'use': 'egg:swift#tempurl'},\n 'filter:container_sync': {'use': 'egg:swift#container_sync'},\n 'filter:gatekeeper': {'use': 'egg:swift#gatekeeper'},\n 'filter:keystoneauth': {\n 'use': 'egg:swift#keystoneauth',\n 'operator_roles': 'Member,Admin'\n },\n 'filter:authtoken': {\n 'auth_uri': '{}://{}:{}'.format(\n auth_protocol,\n auth_host,\n keystone_relation['service_port']),\n 'delay_auth_decision': 'true',\n 'signing_dir': '/var/cache/swift',\n 'cache': 'swift.cache'\n },\n 'filter:swift3': {'use': 'egg:swift3#swift3'}\n }\n if auth_api_version == '2.0':\n expected['filter:authtoken'].update({\n 'admin_tenant_name': keystone_relation['service_tenant'],\n 'admin_user': keystone_relation['service_username'],\n 'admin_password': keystone_relation['service_password'],\n })\n\n if self._get_openstack_release() >= self.xenial_queens:\n expected['pipeline:main'] = {\n 'pipeline': 'catch_errors gatekeeper healthcheck proxy-logging'\n ' cache authtoken swift3 s3token container_sync bulk tempurl'\n ' slo dlo formpost keystoneauth staticweb'\n ' versioned_writes container-quotas account-quotas'\n ' proxy-logging proxy-server'\n }\n elif self._get_openstack_release() >= self.trusty_mitaka:\n expected['pipeline:main'] = {\n 'pipeline': 'catch_errors gatekeeper healthcheck proxy-logging'\n ' cache swift3 s3token container_sync bulk tempurl slo dlo'\n ' formpost authtoken keystoneauth staticweb'\n ' versioned_writes container-quotas account-quotas'\n ' proxy-logging proxy-server'\n }\n\n s3_token_auth_settings_legacy = {\n 'auth_port': keystone_relation['auth_port'],\n 'auth_host': keystone_relation['auth_host'],\n 'service_host': keystone_relation['service_host'],\n 'service_port': keystone_relation['service_port'],\n 'auth_protocol': keystone_relation['auth_protocol'],\n 'auth_token': keystone_relation['admin_token'],\n 'admin_token': keystone_relation['admin_token']\n }\n\n if self._get_openstack_release() >= self.xenial_queens:\n expected['filter:authtoken'].update({\n 'paste.filter_factory': 'keystonemiddleware.auth_token:'\n 'filter_factory',\n })\n expected['filter:authtoken'].update({\n 'auth_url': '{}://{}:{}'.format(\n auth_protocol,\n auth_host,\n keystone_relation['auth_port']),\n 'auth_plugin': 'password',\n 'username': keystone_relation['service_username'],\n 'password': keystone_relation['service_password'],\n 'project_domain_name': keystone_relation['service_domain'],\n 'user_domain_name': keystone_relation['service_domain'],\n 'project_name': keystone_relation['service_tenant'],\n })\n expected['filter:s3token'] = {\n 'use': 'egg:swift3#s3token',\n 'auth_uri': '{}://{}:{}'.format(\n auth_protocol,\n auth_host,\n keystone_relation['auth_port']),\n 'auth_version': '3'\n }\n elif self._get_openstack_release() >= self.trusty_kilo:\n # Kilo and later\n expected['filter:authtoken'].update({\n 'paste.filter_factory': 'keystonemiddleware.auth_token:'\n 'filter_factory',\n })\n if auth_api_version == '3':\n expected['filter:authtoken'].update({\n 'auth_url': '{}://{}:{}'.format(\n auth_protocol,\n auth_host,\n keystone_relation['auth_port']),\n 'auth_plugin': 'password',\n 'username': keystone_relation['service_username'],\n 'password': keystone_relation['service_password'],\n 'project_domain_name': keystone_relation['service_domain'],\n 'user_domain_name': keystone_relation['service_domain'],\n 'project_name': keystone_relation['service_tenant'],\n })\n else:\n expected['filter:authtoken'].update({\n 'identity_uri': '{}://{}:{}'.format(\n auth_protocol,\n auth_host,\n keystone_relation['auth_port']),\n })\n expected['filter:s3token'] = {\n # No section commonality with J and earlier\n 'paste.filter_factory': 'keystoneclient.middleware.s3_token'\n ':filter_factory',\n }\n expected['filter:s3token'].update(s3_token_auth_settings_legacy)\n\n if self._get_openstack_release() >= self.trusty_mitaka:\n expected['filter:s3token']['paste.filter_factory'] = \\\n 'keystonemiddleware.s3_token:filter_factory'\n\n # NOTE(hopem): this will need extending for newer releases once\n # swift-plugin-s3 is updated in UCA. See LP: #1738063\n else:\n # Juno and earlier\n expected['filter:authtoken'].update({\n 'paste.filter_factory': 'keystoneclient.middleware.'\n 'auth_token:filter_factory',\n 'auth_host': auth_host,\n 'auth_port': keystone_relation['auth_port'],\n 'auth_protocol': auth_protocol,\n })\n expected['filter:s3token'] = {\n # No section commonality with K and later\n 'paste.filter_factory': 'keystoneclient.middleware.'\n 's3_token:filter_factory',\n }\n expected['filter:s3token'].update(s3_token_auth_settings_legacy)\n\n for section, pairs in expected.items():\n ret = u.validate_config_data(unit, conf, section, pairs)\n if ret:\n message = \"proxy-server config error: {}\".format(ret)\n amulet.raise_status(amulet.FAIL, msg=message)", "def update_remote_access(self, remote_access_id, info):\n if remote_access_id and info:\n response = self._request(\"PATCH\", [ROUTE_REMOTE_ACCESSES, remote_access_id], info)\n logging.debug(\"Update remote access::{}\".format(response.text))\n return self.verif_response(response)\n\n logging.error(\"Error update remote access\")\n return False", "def test_get_virtualization_realm(self):\n pass", "def test_update_hyperflex_ucsm_config_policy(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_update_hyperflex_node_config_policy(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "async def test_reauth_flow_update_configuration(opp, aioclient_mock):\n config_entry = await setup_deconz_integration(opp, aioclient_mock)\n\n result = await opp.config_entries.flow.async_init(\n DECONZ_DOMAIN,\n data=config_entry.data,\n context={\"source\": SOURCE_REAUTH},\n )\n\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"link\"\n\n new_api_key = \"new_key\"\n\n aioclient_mock.post(\n \"http://1.2.3.4:80/api\",\n json=[{\"success\": {\"username\": new_api_key}}],\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n aioclient_mock.get(\n f\"http://1.2.3.4:80/api/{new_api_key}/config\",\n json={\"bridgeid\": BRIDGEID},\n headers={\"content-type\": CONTENT_TYPE_JSON},\n )\n\n result = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={}\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert config_entry.data[CONF_API_KEY] == new_api_key", "def test_register_virtualization_realm(self):\n pass", "def test__get_component_server_mirror(self):\n MockConfigRegistry._EXTRA = {\n 'local/repository': 'yes',\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/server': 'a.example.net',\n 'repository/online/component/a/port': '4711',\n }\n self.u.ucr_reinit()\n s = self.u._get_component_server('a', for_mirror_list=True)\n self.assertEqual('a.example.net', s.mock_server)\n self.assertEqual('4711', s.mock_port)", "def test_update_node_driveconfig(self):\n pass", "def test_update_global_system_config(self):\n new_config = self._create_global_system_config()\n update_name = data_utils.rand_name('test')\n with self.override_role():\n self.config_client.update_global_system_config(\n new_config['uuid'],\n display_name=update_name)", "def test_patch_hyperflex_vcenter_config_policy(self):\n pass", "def test_update_reg_ex_config(self):\n pass", "def test_auth0_config_admin(testapp, registry):\n _test_auth_config(testapp, registry)", "def test_control_acl_update(self):\n with factories.single_commit():\n control = factories.ControlFactory()\n person = factories.PersonFactory()\n control.add_person_with_role_name(person, \"Admin\")\n access_control_list = {\n \"Admin\": [\n {\n \"email\": \"[email protected]\",\n \"name\": \"user1\",\n },\n {\n \"email\": \"[email protected]\",\n \"name\": \"user2\",\n },\n ]\n }\n self.setup_people(access_control_list)\n\n response = self.api.put(control, control.id, {\n \"access_control_list\": access_control_list,\n })\n\n self.assert200(response)\n control = all_models.Control.query.get(control.id)\n self.assert_obj_acl(control, access_control_list)", "def edit_config_verify(self,\n raw_response: Any,\n *args,\n **kwargs) -> bool:\n pass" ]
[ "0.78669727", "0.7865796", "0.72010916", "0.71079874", "0.63052976", "0.61185884", "0.606297", "0.60460615", "0.60045266", "0.59853214", "0.5889328", "0.58515453", "0.58470666", "0.5844665", "0.58144546", "0.58085984", "0.5804732", "0.5778759", "0.5767351", "0.57399875", "0.57337606", "0.5710992", "0.5650279", "0.56350404", "0.56102145", "0.56059337", "0.55978173", "0.55689096", "0.54912305", "0.548341" ]
0.95664346
0
Test case for update_virtualization_realm
def test_update_virtualization_realm(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_virt_realm(self):\n pass", "def test_set_virtualization_realm_active(self):\n pass", "def test_register_virtualization_realm(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def test_allocate_virtualization_realm(self):\n pass", "def test_update_virt_realm_remote_access_config(self):\n pass", "def test_invalidate_template_cache_in_virtualization_realm(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_remove_virt_realm(self):\n pass", "def test_update_virtual_account_by_id(self):\n pass", "def test_update_virtualization_realm_maximum_impact_level(self):\n pass", "def test_set_project_default_virtualization_realm(self):\n pass", "def test_vault_update_vault_section(self):\n pass", "def test_determine_valid_virtualization_realms(self):\n pass", "def test_get_virtualization_realms(self):\n pass", "def test_ipam_vrfs_update(self):\n pass", "def test_get_deployment_runs_in_virtualization_realm(self):\n pass", "def test_list_virtualization_realm_templates(self):\n pass", "def test_enable_virt_realm_remote_access(self):\n pass", "def test_modify_virtual_service(self):\n pass", "def test_vault_update_vault_item(self):\n pass", "def test_update_hyperflex_server_model(self):\n pass", "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "def test_ipam_vrfs_partial_update(self):\n pass", "def test_deallocate_virt_realm(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_ipam_roles_partial_update(self):\n pass", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_update_case(self):\n pass", "def test_update_hyperflex_proxy_setting_policy(self):\n pass" ]
[ "0.87042093", "0.7929986", "0.7839932", "0.7683304", "0.7411108", "0.7224155", "0.7057522", "0.6883899", "0.68807685", "0.6845209", "0.6744579", "0.6676983", "0.66720814", "0.66179353", "0.6586227", "0.6571328", "0.6536677", "0.6480477", "0.6478711", "0.6407935", "0.6303377", "0.6263661", "0.62069887", "0.61898875", "0.61748654", "0.614575", "0.6040808", "0.60333365", "0.602118", "0.6015862" ]
0.95131004
0
Test case for update_virtualization_realm_maximum_impact_level
def test_update_virtualization_realm_maximum_impact_level(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_impact_level(self):\n pass", "def test_update_hyperflex_feature_limit_internal(self):\n pass", "def test_update_software_asset_impact_level(self):\n pass", "def test_update_test_asset_impact_level(self):\n pass", "def test_update_hyperflex_feature_limit_external(self):\n pass", "def test_techs_unit_capacity_max_systemwide_milp_constraint(self):\n\n override_max = {\n \"links.a,b.exists\": True,\n \"techs.test_conversion_plus.constraints.units_max_systemwide\": 2,\n \"nodes.b.techs.test_conversion_plus.constraints\": {\n \"units_max\": 2,\n \"energy_cap_per_unit\": 5,\n },\n }\n m = build_model(override_max, \"conversion_plus_milp,two_hours,investment_costs\")\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"unit_capacity_systemwide_milp_constraint\")\n assert (\n m._backend_model.unit_capacity_systemwide_milp_constraint[\n \"test_conversion_plus\"\n ].upper()\n == 2\n )", "def test_patch_hyperflex_feature_limit_internal(self):\n pass", "def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit", "def test_get_damage_out_of_limit(self):\n self.veh.health = 0.24\n for op in self.veh.operators:\n op.health = 0.1\n self.veh.get_damage(0.5)\n self.assertEqual(self.veh.health, 0)\n self.assertEqual(self.veh.operators[0].health, 0.05)\n self.assertEqual(self.veh.operators[1].health, 0.05)", "def test_update_virtualization_realm(self):\n pass", "def test_techs_unit_capacity_max_systemwide_transmission_milp_constraint(self):\n override_transmission = {\n \"links.a,b.exists\": True,\n \"techs.test_transmission_elec.constraints\": {\n \"units_max_systemwide\": 1,\n \"lifetime\": 25,\n },\n \"techs.test_transmission_elec.costs.monetary\": {\n \"purchase\": 1,\n \"interest_rate\": 0.1,\n },\n }\n m = build_model(\n override_transmission, \"simple_supply,two_hours,investment_costs\"\n )\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"unit_capacity_systemwide_milp_constraint\")\n assert (\n m._backend_model.unit_capacity_systemwide_milp_constraint[\n \"test_transmission_elec\"\n ].upper()\n == 2\n )", "def maximum_level(self, question_type):\n\t\treturn 2", "def test_operate_resource_cap_max(self, on):\n\n if on is False:\n override = {}\n else:\n override = {\"techs.test_supply_plus.constraints.resource_cap_max\": 1e6}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n if on is False:\n assert check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert np.isinf(\n m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item()\n )\n elif on is True:\n assert not check_error_or_warning(\n warning, \"Resource capacity constraint defined and set to infinity\"\n )\n assert m._model_data.resource_cap.loc[\"a\", \"test_supply_plus\"].item() == 1e6", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def test_maxv_keyword(self):\n # Set maxv to 200\n byt = bytscl(self.array2, maxv=200)\n control = numpy.sum(self.array2 >= 200)\n total = numpy.sum(byt == 255)\n self.assertEqual(total, control)", "def test_maxIndex(self):\t\t\n self.assertEqual(attempt.maxIndexZ, 113)\n self.assertEqual(attempt.maxIndexW, 134)", "def test_maximum_grain_size_value(self):\n self.assertEqual(self.maximum_grain_size_value, 0.5)", "def test_get_max_score(self):\r\n max_score = self.peer_grading.max_score()\r\n self.assertEquals(max_score, None)", "def test_patch_hyperflex_feature_limit_external(self):\n pass", "def testMaxTargets(self):\n\n self.assertEqual('Maxtargets: %s' % inventory_base.DEFAULT_MAXTARGETS,\n self.inv._CmdMaxTargets('maxtargets', []))\n self.inv._CmdMaxTargets('maxtargets', ['10'])\n self.assertEqual(10, self.inv._maxtargets)", "def test_update_virt_realm(self):\n pass", "def test_get_hyperflex_feature_limit_internal_by_moid(self):\n pass", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def max_value(board, max_util, min_util, depth):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n nodes_generated += 1\r\n max_depth = max(max_depth,depth)\r\n \r\n if cutoff_search(board, depth):\r\n return evaluation(board)\r\n v = -1000\r\n moves = legal_moves(board,1)\r\n for move in moves:\r\n temp_board = camelot_board.Camelot(list(board.white),list(board.black))\r\n state = action(temp_board, move, 1)\r\n v = max(v, min_value(state, max_util, min_util, depth + 1))\r\n if v >= min_util:\r\n max_prune += 1\r\n return v\r\n max_util = max(max_util, v)\r\n return v", "def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def _updateLevel(self, level, lastUpdate, time, timeout):\n\t\ttimeoutsPassed = (time - lastUpdate) / timeout\n\t\treturn max(0, level - timeoutsPassed)" ]
[ "0.6894916", "0.64250696", "0.6368963", "0.62384456", "0.6109848", "0.58428806", "0.5751304", "0.5743156", "0.56969416", "0.56509143", "0.559787", "0.55536795", "0.5535449", "0.55049676", "0.54743236", "0.54658407", "0.54521674", "0.54473025", "0.54327923", "0.54280585", "0.5408041", "0.53655463", "0.5341528", "0.5327524", "0.5320271", "0.5315357", "0.5309864", "0.53016514", "0.5283725", "0.52748406" ]
0.95279
0
Test case for update_visibility_query
def test_update_visibility_query(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query2(self):\n pass", "def test_update_visibility_query1(self):\n pass", "def test_update_visibility_query4(self):\n pass", "def test_update_visibility_query3(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_deployment_visibility_query(self):\n pass", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def test_ipam_vlans_update(self):\n pass", "def test_ipam_vlans_partial_update(self):\n pass", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()", "def toggleTableVisibility(id, isVisible):\n table = SavedSearch.objects(id=id).first()\n if not table:\n return {'success': False,\n 'message': \"Error finding table. Please refresh and try again\"}\n message = table.name+ \" is now \"\n if isVisible:\n message += \"visible\"\n else:\n message += \"hidden\"\n table.isPinned = isVisible\n table.save()\n return {'success': True,'message': message}", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "def _update_can_read_query(\n query, user, workspace_filter=None, user_filter=None\n):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = django_raw_query.add_access_criteria(\n query, accessible_workspaces, user, workspace_filter, user_filter\n )\n return query", "def validate(self, visibility_field):\r\n if self.original_query.filter is wtypes.Unset:\r\n self.filter_expr = None\r\n else:\r\n self.filter_expr = json.loads(self.original_query.filter)\r\n self._validate_filter(self.filter_expr)\r\n self._replace_isotime_with_datetime(self.filter_expr)\r\n self._convert_operator_to_lower_case(self.filter_expr)\r\n self._normalize_field_names_for_db_model(self.filter_expr)\r\n\r\n self._force_visibility(visibility_field)\r\n\r\n if self.original_query.orderby is wtypes.Unset:\r\n self.orderby = None\r\n else:\r\n self.orderby = json.loads(self.original_query.orderby)\r\n self._validate_orderby(self.orderby)\r\n self._convert_orderby_to_lower_case(self.orderby)\r\n self._normalize_field_names_in_orderby(self.orderby)\r\n\r\n if self.original_query.limit is wtypes.Unset:\r\n self.limit = None\r\n else:\r\n self.limit = self.original_query.limit\r\n\r\n if self.limit is not None and self.limit <= 0:\r\n msg = _('Limit should be positive')\r\n raise ClientSideError(msg)", "def test_updates_tool_visibility(self):\n url = course_home_url(self.course)\n response = self.client.get(url)\n self.assertNotContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)\n\n self.create_course_update(TEST_UPDATE_MESSAGE)\n url = course_home_url(self.course)\n response = self.client.get(url)\n self.assertContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)", "def test_client_nationlity_update(self):\n pass", "def test_client_nationlity_partial_update(self):\n pass", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def test_wiki_visibility(self):\r\n\r\n wiki_tab = tabs.WikiTab()\r\n self.assertTrue(wiki_tab.is_hideable)\r\n wiki_tab.is_hidden = True\r\n self.assertTrue(wiki_tab['is_hidden'])\r\n self.check_tab_json_methods(wiki_tab)\r\n self.check_tab_equality(wiki_tab, wiki_tab.to_json())\r\n wiki_tab['is_hidden'] = False\r\n self.assertFalse(wiki_tab.is_hidden)", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_update_non_shareable(self):\n self.create_common_users_and_groups()\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = sync.get_all_users_and_groups()\n\n # Need to remove the common users since they don't have emails.\n auag.remove_user(\"guest\")\n auag.remove_user(\"su\")\n auag.remove_user(\"system\")\n auag.remove_user(\"tsadmin\")\n\n # Change Group 1 and Group 2 and verify change took.\n group1 = auag.get_group(\"Group 1\")\n group1.visibility = Visibility.NON_SHAREABLE\n group2 = auag.get_group(\"Group 2\")\n group2.visibility = Visibility.DEFAULT\n\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n\n # verify changes\n auag = sync.get_all_users_and_groups()\n self.assertEqual(\n auag.get_group(\"Group 1\").visibility, Visibility.NON_SHAREABLE\n )\n self.assertEqual(\n auag.get_group(\"Group 2\").visibility, Visibility.DEFAULT\n )\n self.assertEqual(\n auag.get_group('Group \"3\"').visibility, Visibility.NON_SHAREABLE\n )", "def artifact_status_put_req(artifact_id, user_id, visibility):\n if visibility not in get_visibilities():\n return {'status': 'error',\n 'message': 'Unknown visibility value: %s' % visibility}\n\n pd = Artifact(int(artifact_id))\n access_error = check_access(pd.study.id, user_id)\n if access_error:\n return access_error\n user = User(str(user_id))\n status = 'success'\n msg = 'Artifact visibility changed to %s' % visibility\n # Set the approval to private if needs approval and admin\n if visibility == 'private':\n if not qiita_config.require_approval:\n pd.visibility = 'private'\n # Set the approval to private if approval not required\n elif user.level == 'admin':\n pd.visibility = 'private'\n # Trying to set approval without admin privileges\n else:\n status = 'error'\n msg = 'User does not have permissions to approve change'\n else:\n pd.visibility = visibility\n\n return {'status': status,\n 'message': msg}", "def test_step_visibility(self, _step: PropertyMock):\n _step.return_value = MagicMock(is_visible=True)\n es = exposed.ExposedStep()\n self.assertTrue(es.visible)\n es.visible = False\n self.assertFalse(es.visible)", "def test_update_queryset_ttl_success_case(self):", "def add_visibility_criteria(self, visibility):\n if visibility == VISIBILITY_PUBLIC:\n self.criteria.append({'workspace':\n {'$in': [ObjectId(workspace_id)\n for workspace_id\n in workspace_api.get_all_public_workspaces().values_list('id')]}})\n elif visibility == VISIBILITY_ALL:\n # NOTE: get all data, no restriction needed\n logger.info(\"add_visibility_criteria case not implemented.\")\n elif visibility == VISIBILITY_USER:\n # TODO: get only user data\n logger.info(\"add_visibility_criteria case not implemented.\")", "def test_visible_whitelisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False" ]
[ "0.9338", "0.9300291", "0.92219377", "0.9215671", "0.8477806", "0.8302129", "0.5916474", "0.5878879", "0.582108", "0.5800395", "0.5718068", "0.56690055", "0.5650589", "0.5625769", "0.55971676", "0.5532673", "0.5529284", "0.55100787", "0.55096596", "0.54805464", "0.5469038", "0.5439252", "0.5437126", "0.5434058", "0.5391432", "0.53898644", "0.5384333", "0.5375159", "0.53713685", "0.5360667" ]
0.9467312
0
Test case for update_visibility_query1
def test_update_visibility_query1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query2(self):\n pass", "def test_update_visibility_query3(self):\n pass", "def test_update_visibility_query(self):\n pass", "def test_update_visibility_query4(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_deployment_visibility_query(self):\n pass", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_ipam_vlans_partial_update(self):\n pass", "def test_ipam_vlans_update(self):\n pass", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "def toggleTableVisibility(id, isVisible):\n table = SavedSearch.objects(id=id).first()\n if not table:\n return {'success': False,\n 'message': \"Error finding table. Please refresh and try again\"}\n message = table.name+ \" is now \"\n if isVisible:\n message += \"visible\"\n else:\n message += \"hidden\"\n table.isPinned = isVisible\n table.save()\n return {'success': True,'message': message}", "def test_visible_whitelisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_updates_tool_visibility(self):\n url = course_home_url(self.course)\n response = self.client.get(url)\n self.assertNotContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)\n\n self.create_course_update(TEST_UPDATE_MESSAGE)\n url = course_home_url(self.course)\n response = self.client.get(url)\n self.assertContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def test_wiki_visibility(self):\r\n\r\n wiki_tab = tabs.WikiTab()\r\n self.assertTrue(wiki_tab.is_hideable)\r\n wiki_tab.is_hidden = True\r\n self.assertTrue(wiki_tab['is_hidden'])\r\n self.check_tab_json_methods(wiki_tab)\r\n self.check_tab_equality(wiki_tab, wiki_tab.to_json())\r\n wiki_tab['is_hidden'] = False\r\n self.assertFalse(wiki_tab.is_hidden)", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False", "def _update_can_read_query(\n query, user, workspace_filter=None, user_filter=None\n):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = django_raw_query.add_access_criteria(\n query, accessible_workspaces, user, workspace_filter, user_filter\n )\n return query", "def test_visible_ramp(self):\n total_number = 100000\n expected_percentage = .10\n self.feature_test.set_percentage(expected_percentage * 100)\n # Generate a range of user ids and map these ids to the feature\n # test result.\n user_ids = list(range(1, total_number + 1))\n visibility_map = [\n self.feature_test.is_visible(user_id)\n for user_id\n in user_ids\n ]\n # Count the number of success conditions.\n visibility_count = visibility_map.count(True)\n # This should match 10%.\n actual_percentage = visibility_count / float(total_number)\n self.assertAlmostEqual(\n actual_percentage, expected_percentage, delta=.012\n )", "def add_visibility_criteria(self, visibility):\n if visibility == VISIBILITY_PUBLIC:\n self.criteria.append({'workspace':\n {'$in': [ObjectId(workspace_id)\n for workspace_id\n in workspace_api.get_all_public_workspaces().values_list('id')]}})\n elif visibility == VISIBILITY_ALL:\n # NOTE: get all data, no restriction needed\n logger.info(\"add_visibility_criteria case not implemented.\")\n elif visibility == VISIBILITY_USER:\n # TODO: get only user data\n logger.info(\"add_visibility_criteria case not implemented.\")", "def test_visible_white_and_blacklisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "def validate(self, visibility_field):\r\n if self.original_query.filter is wtypes.Unset:\r\n self.filter_expr = None\r\n else:\r\n self.filter_expr = json.loads(self.original_query.filter)\r\n self._validate_filter(self.filter_expr)\r\n self._replace_isotime_with_datetime(self.filter_expr)\r\n self._convert_operator_to_lower_case(self.filter_expr)\r\n self._normalize_field_names_for_db_model(self.filter_expr)\r\n\r\n self._force_visibility(visibility_field)\r\n\r\n if self.original_query.orderby is wtypes.Unset:\r\n self.orderby = None\r\n else:\r\n self.orderby = json.loads(self.original_query.orderby)\r\n self._validate_orderby(self.orderby)\r\n self._convert_orderby_to_lower_case(self.orderby)\r\n self._normalize_field_names_in_orderby(self.orderby)\r\n\r\n if self.original_query.limit is wtypes.Unset:\r\n self.limit = None\r\n else:\r\n self.limit = self.original_query.limit\r\n\r\n if self.limit is not None and self.limit <= 0:\r\n msg = _('Limit should be positive')\r\n raise ClientSideError(msg)", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):" ]
[ "0.937762", "0.93267894", "0.9324084", "0.9290966", "0.835963", "0.824345", "0.6060144", "0.596832", "0.5955842", "0.59169525", "0.5877949", "0.58511436", "0.5710895", "0.5647203", "0.5517191", "0.5514228", "0.5498384", "0.54875207", "0.5476975", "0.5461666", "0.54572135", "0.5455003", "0.54480416", "0.5448013", "0.54421926", "0.54189795", "0.54179335", "0.54179335", "0.54179335", "0.54179335" ]
0.9444555
0
Test case for update_visibility_query2
def test_update_visibility_query2(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query1(self):\n pass", "def test_update_visibility_query3(self):\n pass", "def test_update_visibility_query(self):\n pass", "def test_update_visibility_query4(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_deployment_visibility_query(self):\n pass", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def test_ipam_vlans_update(self):\n pass", "def test_ipam_vlans_partial_update(self):\n pass", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "def toggleTableVisibility(id, isVisible):\n table = SavedSearch.objects(id=id).first()\n if not table:\n return {'success': False,\n 'message': \"Error finding table. Please refresh and try again\"}\n message = table.name+ \" is now \"\n if isVisible:\n message += \"visible\"\n else:\n message += \"hidden\"\n table.isPinned = isVisible\n table.save()\n return {'success': True,'message': message}", "def _update_can_read_query(\n query, user, workspace_filter=None, user_filter=None\n):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = django_raw_query.add_access_criteria(\n query, accessible_workspaces, user, workspace_filter, user_filter\n )\n return query", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def test_wiki_visibility(self):\r\n\r\n wiki_tab = tabs.WikiTab()\r\n self.assertTrue(wiki_tab.is_hideable)\r\n wiki_tab.is_hidden = True\r\n self.assertTrue(wiki_tab['is_hidden'])\r\n self.check_tab_json_methods(wiki_tab)\r\n self.check_tab_equality(wiki_tab, wiki_tab.to_json())\r\n wiki_tab['is_hidden'] = False\r\n self.assertFalse(wiki_tab.is_hidden)", "def test_updates_tool_visibility(self):\n url = course_home_url(self.course)\n response = self.client.get(url)\n self.assertNotContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)\n\n self.create_course_update(TEST_UPDATE_MESSAGE)\n url = course_home_url(self.course)\n response = self.client.get(url)\n self.assertContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):" ]
[ "0.9375693", "0.92956054", "0.928914", "0.9252121", "0.8373007", "0.81762373", "0.6068012", "0.59162986", "0.5861192", "0.58495826", "0.5846796", "0.58382684", "0.5765924", "0.56569463", "0.5589683", "0.556219", "0.553944", "0.55303895", "0.54985386", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862", "0.5469862" ]
0.94485813
0
Test case for update_visibility_query3
def test_update_visibility_query3(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query4(self):\n pass", "def test_update_visibility_query1(self):\n pass", "def test_update_visibility_query2(self):\n pass", "def test_update_visibility_query(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_deployment_visibility_query(self):\n pass", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()", "def test_ipam_vlans_partial_update(self):\n pass", "def test_ipam_vlans_update(self):\n pass", "def add_visibility_criteria(self, visibility):\n if visibility == VISIBILITY_PUBLIC:\n self.criteria.append({'workspace':\n {'$in': [ObjectId(workspace_id)\n for workspace_id\n in workspace_api.get_all_public_workspaces().values_list('id')]}})\n elif visibility == VISIBILITY_ALL:\n # NOTE: get all data, no restriction needed\n logger.info(\"add_visibility_criteria case not implemented.\")\n elif visibility == VISIBILITY_USER:\n # TODO: get only user data\n logger.info(\"add_visibility_criteria case not implemented.\")", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def test_wiki_visibility(self):\r\n\r\n wiki_tab = tabs.WikiTab()\r\n self.assertTrue(wiki_tab.is_hideable)\r\n wiki_tab.is_hidden = True\r\n self.assertTrue(wiki_tab['is_hidden'])\r\n self.check_tab_json_methods(wiki_tab)\r\n self.check_tab_equality(wiki_tab, wiki_tab.to_json())\r\n wiki_tab['is_hidden'] = False\r\n self.assertFalse(wiki_tab.is_hidden)", "def _update_can_read_query(\n query, user, workspace_filter=None, user_filter=None\n):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = django_raw_query.add_access_criteria(\n query, accessible_workspaces, user, workspace_filter, user_filter\n )\n return query", "def test_visible_whitelisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):" ]
[ "0.92220104", "0.91974825", "0.91601187", "0.91086525", "0.83378476", "0.8022144", "0.60569686", "0.5969384", "0.5952087", "0.58775544", "0.57781094", "0.5748466", "0.55692554", "0.5544237", "0.55263317", "0.550208", "0.5493266", "0.5482827", "0.5482148", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119", "0.5479119" ]
0.9428289
0
Test case for update_visibility_query4
def test_update_visibility_query4(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_visibility_query3(self):\n pass", "def test_update_visibility_query1(self):\n pass", "def test_update_visibility_query(self):\n pass", "def test_update_visibility_query2(self):\n pass", "def test_update_asset_visibility_query(self):\n pass", "def test_update_deployment_visibility_query(self):\n pass", "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_ipam_vlans_partial_update(self):\n pass", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def test_ipam_vlans_update(self):\n pass", "def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()", "def test_visibility(self):\r\n self.assertFalse(self.net.environment\\\r\n .are_visible(self.net.pos[self.node1],\r\n self.net.pos[self.node2]))\r\n self.assertTrue(self.net.environment\\\r\n .are_visible(self.net.pos[self.node2],\r\n self.net.pos[self.node3]))", "def toggleTableVisibility(id, isVisible):\n table = SavedSearch.objects(id=id).first()\n if not table:\n return {'success': False,\n 'message': \"Error finding table. Please refresh and try again\"}\n message = table.name+ \" is now \"\n if isVisible:\n message += \"visible\"\n else:\n message += \"hidden\"\n table.isPinned = isVisible\n table.save()\n return {'success': True,'message': message}", "def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]", "def test_visible_whitelisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "def test_visible_widgets(plugin_dialog):\n\n assert plugin_dialog.direct_entry_edit.isVisible()\n assert plugin_dialog.direct_entry_btn.isVisible()", "def add_visibility_criteria(self, visibility):\n if visibility == VISIBILITY_PUBLIC:\n self.criteria.append({'workspace':\n {'$in': [ObjectId(workspace_id)\n for workspace_id\n in workspace_api.get_all_public_workspaces().values_list('id')]}})\n elif visibility == VISIBILITY_ALL:\n # NOTE: get all data, no restriction needed\n logger.info(\"add_visibility_criteria case not implemented.\")\n elif visibility == VISIBILITY_USER:\n # TODO: get only user data\n logger.info(\"add_visibility_criteria case not implemented.\")", "def test_wiki_visibility(self):\r\n\r\n wiki_tab = tabs.WikiTab()\r\n self.assertTrue(wiki_tab.is_hideable)\r\n wiki_tab.is_hidden = True\r\n self.assertTrue(wiki_tab['is_hidden'])\r\n self.check_tab_json_methods(wiki_tab)\r\n self.check_tab_equality(wiki_tab, wiki_tab.to_json())\r\n wiki_tab['is_hidden'] = False\r\n self.assertFalse(wiki_tab.is_hidden)", "def test_nonVisibilityAffected(self):\n self.assertEquals(visibles(self.observer.idea, iimaginary.IThing), [])\n # XXX need another test: not blocked out from ...", "def _update_can_read_query(\n query, user, workspace_filter=None, user_filter=None\n):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = django_raw_query.add_access_criteria(\n query, accessible_workspaces, user, workspace_filter, user_filter\n )\n return query", "def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True", "def _force_visibility(self, visibility_field):\r\n authorized_project = acl.get_limited_to_project(pecan.request.headers)\r\n is_admin = authorized_project is None\r\n if not is_admin:\r\n self._restrict_to_project(authorized_project, visibility_field)\r\n self._check_cross_project_references(authorized_project,\r\n visibility_field)", "def test_visible_ramp(self):\n total_number = 100000\n expected_percentage = .10\n self.feature_test.set_percentage(expected_percentage * 100)\n # Generate a range of user ids and map these ids to the feature\n # test result.\n user_ids = list(range(1, total_number + 1))\n visibility_map = [\n self.feature_test.is_visible(user_id)\n for user_id\n in user_ids\n ]\n # Count the number of success conditions.\n visibility_count = visibility_map.count(True)\n # This should match 10%.\n actual_percentage = visibility_count / float(total_number)\n self.assertAlmostEqual(\n actual_percentage, expected_percentage, delta=.012\n )", "def test_empty_private(self):\n self.do_visible(True, None, False, is_admin=True)", "def test_client_nationlity_partial_update(self):\n pass", "def validate(self, visibility_field):\r\n if self.original_query.filter is wtypes.Unset:\r\n self.filter_expr = None\r\n else:\r\n self.filter_expr = json.loads(self.original_query.filter)\r\n self._validate_filter(self.filter_expr)\r\n self._replace_isotime_with_datetime(self.filter_expr)\r\n self._convert_operator_to_lower_case(self.filter_expr)\r\n self._normalize_field_names_for_db_model(self.filter_expr)\r\n\r\n self._force_visibility(visibility_field)\r\n\r\n if self.original_query.orderby is wtypes.Unset:\r\n self.orderby = None\r\n else:\r\n self.orderby = json.loads(self.original_query.orderby)\r\n self._validate_orderby(self.orderby)\r\n self._convert_orderby_to_lower_case(self.orderby)\r\n self._normalize_field_names_in_orderby(self.orderby)\r\n\r\n if self.original_query.limit is wtypes.Unset:\r\n self.limit = None\r\n else:\r\n self.limit = self.original_query.limit\r\n\r\n if self.limit is not None and self.limit <= 0:\r\n msg = _('Limit should be positive')\r\n raise ClientSideError(msg)", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):" ]
[ "0.93174714", "0.9263223", "0.9257318", "0.9255275", "0.83029866", "0.8144092", "0.6182312", "0.58731127", "0.58637685", "0.58312774", "0.575975", "0.57541287", "0.56747186", "0.5560299", "0.55397314", "0.5516548", "0.54876906", "0.54816324", "0.5463897", "0.5455646", "0.54554963", "0.54540074", "0.5421714", "0.5410326", "0.5405494", "0.5397353", "0.53936905", "0.5389691", "0.5389691", "0.5389691" ]
0.9397431
0
Test case for upload_file
def test_upload_file(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upload_file1(self):\n pass", "def test_upload(self):\n with self.client:\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.jpg\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))\n self.assertIn('foto.jpg', [photo.filename for photo in Photo.query.all()])", "def test_upload_file(self):\n data = dict(additional_metadata='additional_metadata_example',\n file='file_example')\n response = self.client.open(\n '/pet/{petId}/uploadImage'.format(pet_id=789),\n method='POST',\n data=data,\n content_type='multipart/form-data')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_local_uploader_upload_correct_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return True, \\\r\n as this extension is allowed\")\r\n assert res is True, err_msg", "def _upload_test_file(self, user, project,testfilename=\"\"): \n \n if testfilename == \"\":\n testfilename = self.giverandomfilename(user)\n \n url = reverse(\"comicmodels.views.upload_handler\", \n kwargs={\"site_short_name\":self.testproject.short_name})\n \n factory = RequestFactory()\n request = factory.get(url)\n request.user = user\n \n fakefile = File(StringIO(\"some uploaded content for\" + testfilename))\n \n fakecontent = \"some uploaded content for\" + testfilename\n request.FILES['file'] = SimpleUploadedFile(name=testfilename,\n content=fakecontent)\n \n request.method = \"POST\"\n \n # Some magic code to fix a bug with middleware not being found,\n # don't know what this does but if fixes the bug.\n from django.contrib.messages.storage.fallback import FallbackStorage\n setattr(request, 'session', 'session')\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n \n response = upload_handler(request, project.short_name) \n \n self.assertEqual(response.status_code, 302, \"Uploading file %s as \"\n \"user %s to project %s did not load to expected 302 \"\n % (testfilename, user.username, project.short_name))\n \n errors = self._find_errors_in_page(response) \n if errors:\n self.assertFalse(errors, \"Error uploading file '%s':\\n %s\" % (testfilename, errors.group(1)))\n \n return response", "def test_upload(self):\n fake_file_name = 'fake_file_name'\n\n backend = self.test_init_valid()\n backend.upload(fake_file_name)\n\n backend.vault.concurrent_create_archive_from_file.assert_called_once_with(filename=fake_file_name, description='')", "async def test_upload_file(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id/servers.dat') \\\n .mock(return_value=Response(204))\n with mock.patch('__main__.open', new=mock.mock_open(read_data='test')) as file:\n file.return_value = json.dumps('test').encode()\n await provisioning_client.upload_provisioning_profile_file('id', 'servers.dat', file())\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id/servers.dat'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'", "def test_upload_wrong_file_type(self):\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.doc\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n if os.path.exists(PHOTOS_SAVE_PATH):\n self.assertNotIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))", "def test_upload(api):\n # upload the file to file.io servers\n uploaded_file = api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )\n\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n with pytest.raises(APIConnectionError):\n while True:\n api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )", "def test_local_uploader_upload_fails(self, mock):\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as there is an exception\")\r\n assert res is False, err_msg", "def test_upload_dir_contents_one_file(self):\n self._test_upload_dir_contents(filenames=['file1'])", "def test_file_field():", "def test_local_uploader_upload_wrong_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.txt')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as this extension is not allowed\")\r\n assert res is False, err_msg", "def test_post_file(self):\n self._register_uri(httpretty.POST)\n with open(self.test_file, 'rb') as in_file:\n response = self.client.post(self.test_endpoint,\n files={\"file\": in_file})\n self.assertEqual(response, self.test_data)\n body = str(self._last_request().body)\n self.assertIn(\"Content-Disposition: form-data; \"+\n \"name=\\\"file\\\"; filename=\\\"test_file.txt\\\"\", body)\n self.assertIn(\"Test File\", str(body))", "def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def upload_file(self, file_path, file_name, output_path):", "def test_upload(self):\n package = make_package()\n datastr = 'foobar'\n data = StringIO(datastr)\n self.storage.upload(package, data)\n filename = self.storage.get_path(package)\n self.assertTrue(os.path.exists(filename))\n with open(filename, 'r') as ifile:\n self.assertEqual(ifile.read(), 'foobar')", "def upload(self, filename, file_path):\n return", "def test_mutation_file_upload(client):\n query = '''mutation M {\n updatePerson(id: 1,avatar: \"cat1.jpg\") {\n person {\n id name age avatar\n }\n }\n }\n '''\n data = {\n 'query': query,\n 'file': (open('files/cat1.jpg'), 'cat1.jpg'),\n }\n response = client.post(\n '/graphql', data=data,\n content_type='multipart/form-data',\n )\n\n expected_response = '{\"data\":{\"updatePerson\":{\"person\":{\"id\":1,\"name\":null,\"age\":34.0,\"avatar\":\"/files/cat1.jpg\"}}}}'\n assert response.data == expected_response", "def test_files_for_upload_service(self, mocker):\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"}, ai_service='x')\n headers = {'x-rh-identity': 'ABC'}\n self.client.post(self.url, json=payload, headers=headers)\n\n files = {\n 'upload': (\n mocker.ANY, mocker.ANY,\n 'application/vnd.redhat.x.aiservice+tgz'\n )\n }\n self._retryable.assert_called_once_with(\n 'post',\n 'http://upload:8080/api/ingress/v1/upload',\n files=files,\n headers=mocker.ANY\n )", "def test_import_upload(self):\r\n self._login_admin()\r\n\r\n # verify we get the form\r\n res = self.app.get('/admin/import')\r\n self.assertTrue(\r\n '<form' in res.body,\r\n 'Should have a form in the body for submitting the upload')\r\n\r\n res = self._upload()\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"302 Found\",\r\n msg='Import status is 302 redirect by home, ' + res.status)\r\n\r\n # now verify that we've got our record\r\n imp = ImportQueueMgr.get_ready()\r\n imp = imp[0]\r\n self.assertTrue(imp, 'We should have a record')\r\n self.assertTrue(imp.file_path.endswith('admin.delicious.html'))\r\n self.assertEqual(imp.status, 0, 'start out as default status of 0')", "def test_file_upload(self):\n\n with tempfile.NamedTemporaryFile() as test_file:\n test_file.write(\n u'date,category,employee name,employee address,expense description,pre-tax amount,tax name,tax amount\\n')\n test_file.write(\n u'12/1/2013,Travel,Don Draper,\"783 Park Ave, New York, NY 10021\",Taxi ride, 350.00 ,NY Sales tax, 31.06\\n')\n test_file.flush()\n response = self.send_file_upload_request(view_name='csv_import_view', filename=test_file.name)\n\n actual_import_logs = ImportLog.objects.all()\n self.assertEqual(1, len(actual_import_logs))\n\n actual_import_log = actual_import_logs[0]\n expected_file_name = os.path.basename(test_file.name)\n self.assertEqual(expected_file_name, actual_import_log.file_name)\n\n expense_items = ExpenseItem.objects.all()\n self.assertEqual(1, len(expense_items))\n self.assertEqual('Don Draper', expense_items[0].employee.name)\n\n self.assertEqual('{\"upload_id\": 1}', response.content)", "def test_upload_file(self):\n\n uploadFile = os.path.join(testdatadir, \"upload.data\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(link).read()\n )\n\n # Re-upload slightly different file.\n uploadFile2 = os.path.join(testdatadir, \"upload2.data\")\n r = gracedb.writeFile(\n eventId,\n filename=\"upload.data\",\n filecontents=open(uploadFile2, 'r'))\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link2 = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(link2).read()\n )\n\n self.assertNotEqual(link, link2)", "def test_upload_text(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n response = util.upload_file(client, DEFAULT_USER, file)\n assert response.status == \"201 CREATED\"\n assert response.json[\"filetype\"] == \"DOCUMENT\"\n assert response.json[\"upload_name\"] == file.filename\n assert response.json[\"uploaded_by\"][\"id\"] == 1\n assert response.json[\"size\"] == len(file.contents)\n assert response.json[\"hash\"] == hashlib.md5(file.contents).hexdigest()\n\n # Access the file via URL\n response_get = client.get(response.json[\"url\"])\n assert response_get.status == \"200 OK\"\n assert list(response_get.response)[0] == file.contents", "def test_upload_step__valid_file(self):\n # Set Up\n self.go_to_step(FeedUpdateWizard.UPLOAD_STEP)\n\n # Test\n # Re-uploading the same file, this should be irrelevant\n with open(f\"{ETL_TEST_DATA_DIR}ea_20-1A-A-y08-1.xml\", \"r\") as fp:\n response = self.client.post(\n self.WIZARD_URL,\n {\n self.WIZARD_CURRENT_STEP: FeedUpdateWizard.UPLOAD_STEP,\n self.SELECTED_ITEM: self.ITEM_UPLOAD_FILE,\n \"upload_file\": fp,\n \"submit\": \"submit\",\n },\n )\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.COMMENT_STEP\n )", "def test_upload_bad_file(self):\n url = image_upload_url(self.reteta.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_upload_image(self):\n with open('apps/upload/tests/media/test.jpg') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n file = json_r['file']\n eq_('test.jpg', file['name'])\n eq_(90, file['width'])\n eq_(120, file['height'])\n name = '098f6b.jpg'\n message = 'Url \"%s\" does not contain \"%s\"' % (file['url'], name)\n assert (name in file['url']), message\n\n eq_(1, ImageAttachment.objects.count())\n image = ImageAttachment.objects.all()[0]\n eq_('pcraciunoiu', image.creator.username)\n eq_(150, image.file.width)\n eq_(200, image.file.height)\n eq_('question', image.content_type.model)\n eq_(1, image.object_id)", "def test_upload(self):\n image = file(os.path.join(self.path, 'images', 'riker.gif'),\n 'rb')\n rv = self.post(url='/queue/',\n content={'image': (image, 'riker.gif')},\n token=self.user_token)\n self.assertStatus(rv, 200)\n\n # just check if the file is there\n fullpath = os.path.join(self.queue_dir, 'riker.gif')\n self.assertTrue(os.path.exists(fullpath))\n return", "def test_upload_sync(created_test_helper, setup_teardown_file_operations):\n # get current test params\n test_params = created_test_helper.get_test_data(TESTLOC)\n\n # Upload a PNG file < 1MB in size\n upload_file_response = created_test_helper.upload(\n file_name=test_params[\"test_file\"])\n\n # Validate\n # 1. id of file uploaded\n # 2. thumb nail urls and their links\n # 3. file id of uploaded file present in file list operation\n created_test_helper.validate_upload(\n file_name=test_params[\"test_file\"],\n upload_file_response=upload_file_response,\n file_type=\"images\")", "def test_upload_video(self):\n with self.client:\n path = '../data/example.mp4'\n path = os.path.join(os.path.dirname(__file__), path)\n with open(os.path.abspath(path), 'rb') as file:\n data = dict(file=(file, 'example.mp4'))\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=data)\n file.close()\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('example.mp4', os.listdir(VIDEOS_SAVE_PATH))\n self.assertIn('example.mp4', [video.filename for video in Video.query.all()])" ]
[ "0.8654897", "0.83011705", "0.8254178", "0.7827278", "0.7787636", "0.7720736", "0.7510016", "0.7478717", "0.74171174", "0.741625", "0.73933595", "0.7335599", "0.7333242", "0.73178065", "0.73138374", "0.72533", "0.72144806", "0.7214282", "0.7207461", "0.71678394", "0.7156008", "0.7128871", "0.71204346", "0.7119702", "0.71129626", "0.71125424", "0.71017903", "0.7082985", "0.7066568", "0.706545" ]
0.90826064
0
Test case for upload_file1
def test_upload_file1(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upload_file(self):\n pass", "def test_upload_dir_contents_one_file(self):\n self._test_upload_dir_contents(filenames=['file1'])", "def test_upload_file(self):\n data = dict(additional_metadata='additional_metadata_example',\n file='file_example')\n response = self.client.open(\n '/pet/{petId}/uploadImage'.format(pet_id=789),\n method='POST',\n data=data,\n content_type='multipart/form-data')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_local_uploader_upload_correct_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return True, \\\r\n as this extension is allowed\")\r\n assert res is True, err_msg", "def test_upload(self):\n fake_file_name = 'fake_file_name'\n\n backend = self.test_init_valid()\n backend.upload(fake_file_name)\n\n backend.vault.concurrent_create_archive_from_file.assert_called_once_with(filename=fake_file_name, description='')", "def test_upload(self):\n with self.client:\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.jpg\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))\n self.assertIn('foto.jpg', [photo.filename for photo in Photo.query.all()])", "def test_upload_duplicate(client: FlaskClient):\n file = get_example_file(ExampleFileType.Png)\n response1 = util.upload_file(client, DEFAULT_USER, file)\n response2 = util.upload_file(client, DEFAULT_USER, file)\n\n assert response1.status == \"201 CREATED\"\n assert response2.status == \"200 OK\"\n assert response1.json == response2.json", "def upload_file(self, file_path, file_name, output_path):", "def test_upload_dir_contents_multiple_files(self):\n self._test_upload_dir_contents(filenames=['file1', 'file2'])", "def _upload_test_file(self, user, project,testfilename=\"\"): \n \n if testfilename == \"\":\n testfilename = self.giverandomfilename(user)\n \n url = reverse(\"comicmodels.views.upload_handler\", \n kwargs={\"site_short_name\":self.testproject.short_name})\n \n factory = RequestFactory()\n request = factory.get(url)\n request.user = user\n \n fakefile = File(StringIO(\"some uploaded content for\" + testfilename))\n \n fakecontent = \"some uploaded content for\" + testfilename\n request.FILES['file'] = SimpleUploadedFile(name=testfilename,\n content=fakecontent)\n \n request.method = \"POST\"\n \n # Some magic code to fix a bug with middleware not being found,\n # don't know what this does but if fixes the bug.\n from django.contrib.messages.storage.fallback import FallbackStorage\n setattr(request, 'session', 'session')\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n \n response = upload_handler(request, project.short_name) \n \n self.assertEqual(response.status_code, 302, \"Uploading file %s as \"\n \"user %s to project %s did not load to expected 302 \"\n % (testfilename, user.username, project.short_name))\n \n errors = self._find_errors_in_page(response) \n if errors:\n self.assertFalse(errors, \"Error uploading file '%s':\\n %s\" % (testfilename, errors.group(1)))\n \n return response", "def test_upload(api):\n # upload the file to file.io servers\n uploaded_file = api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )\n\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n with pytest.raises(APIConnectionError):\n while True:\n api.upload(\n tag='test_upload',\n expiry='1w',\n path='tests/test_file.txt'\n )", "def test_local_uploader_upload_wrong_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.txt')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as this extension is not allowed\")\r\n assert res is False, err_msg", "def test_local_uploader_upload_fails(self, mock):\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as there is an exception\")\r\n assert res is False, err_msg", "async def test_upload_file(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id/servers.dat') \\\n .mock(return_value=Response(204))\n with mock.patch('__main__.open', new=mock.mock_open(read_data='test')) as file:\n file.return_value = json.dumps('test').encode()\n await provisioning_client.upload_provisioning_profile_file('id', 'servers.dat', file())\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id/servers.dat'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'", "def test_files_for_upload_service(self, mocker):\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"}, ai_service='x')\n headers = {'x-rh-identity': 'ABC'}\n self.client.post(self.url, json=payload, headers=headers)\n\n files = {\n 'upload': (\n mocker.ANY, mocker.ANY,\n 'application/vnd.redhat.x.aiservice+tgz'\n )\n }\n self._retryable.assert_called_once_with(\n 'post',\n 'http://upload:8080/api/ingress/v1/upload',\n files=files,\n headers=mocker.ANY\n )", "def test_upload_file(self):\n\n uploadFile = os.path.join(testdatadir, \"upload.data\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(link).read()\n )\n\n # Re-upload slightly different file.\n uploadFile2 = os.path.join(testdatadir, \"upload2.data\")\n r = gracedb.writeFile(\n eventId,\n filename=\"upload.data\",\n filecontents=open(uploadFile2, 'r'))\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link2 = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(link2).read()\n )\n\n self.assertNotEqual(link, link2)", "def test_upload_wrong_file_type(self):\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.doc\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n if os.path.exists(PHOTOS_SAVE_PATH):\n self.assertNotIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))", "def test_upload_sync(created_test_helper, setup_teardown_file_operations):\n # get current test params\n test_params = created_test_helper.get_test_data(TESTLOC)\n\n # Upload a PNG file < 1MB in size\n upload_file_response = created_test_helper.upload(\n file_name=test_params[\"test_file\"])\n\n # Validate\n # 1. id of file uploaded\n # 2. thumb nail urls and their links\n # 3. file id of uploaded file present in file list operation\n created_test_helper.validate_upload(\n file_name=test_params[\"test_file\"],\n upload_file_response=upload_file_response,\n file_type=\"images\")", "def test_upload_step__valid_file(self):\n # Set Up\n self.go_to_step(FeedUpdateWizard.UPLOAD_STEP)\n\n # Test\n # Re-uploading the same file, this should be irrelevant\n with open(f\"{ETL_TEST_DATA_DIR}ea_20-1A-A-y08-1.xml\", \"r\") as fp:\n response = self.client.post(\n self.WIZARD_URL,\n {\n self.WIZARD_CURRENT_STEP: FeedUpdateWizard.UPLOAD_STEP,\n self.SELECTED_ITEM: self.ITEM_UPLOAD_FILE,\n \"upload_file\": fp,\n \"submit\": \"submit\",\n },\n )\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.COMMENT_STEP\n )", "def test_file_upload_file_with_the_same_name_already_exists(\n staff_api_client, media_root, site_settings\n):\n # given\n image_file1, image_name1 = create_image()\n path = default_storage.save(image_file1._name, image_file1)\n\n image_file, image_name = create_image()\n assert image_file1 != image_file\n assert image_name == image_name1\n assert image_file._name == image_file1._name\n\n variables = {\"image\": image_name}\n body = get_multipart_request_body(\n FILE_UPLOAD_MUTATION, variables, image_file, image_name\n )\n\n # when\n response = staff_api_client.post_multipart(body)\n\n # then\n content = get_graphql_content(response)\n data = content[\"data\"][\"fileUpload\"]\n errors = data[\"errors\"]\n\n domain = site_settings.site.domain\n assert not errors\n assert data[\"uploadedFile\"][\"contentType\"] == \"image/png\"\n file_url = data[\"uploadedFile\"][\"url\"]\n assert file_url != f\"http://{domain}/media/{image_file._name}\"\n assert file_url != f\"http://{domain}/media/{path}\"\n assert default_storage.exists(file_url.replace(f\"http://{domain}/media/\", \"\"))", "def test_file_can_be_uploaded_and_viewed_by_correct_users(self):\n \n project = self.testproject \n \n name1 = self.giverandomfilename(self.root)\n name2 = self.giverandomfilename(self.projectadmin)\n name3 = self.giverandomfilename(self.participant)\n name4 = self.giverandomfilename(self.participant2)\n \n resp1 = self._upload_test_file(self.root,self.testproject,name1)\n resp2 = self._upload_test_file(self.projectadmin,self.testproject,name2)\n resp3 = self._upload_test_file(self.participant,self.testproject,name3)\n resp4 = self._upload_test_file(self.participant2,self.testproject,name4)\n \n # root and projectadmin should see all files\n self.uploaded_files_are_all_shown_on_uploadpage([name1,name2,name3,name4],self.root)\n self.uploaded_files_are_all_shown_on_uploadpage([name1,name2,name3,name4],self.projectadmin)\n \n # participant1 sees only his or her own file\n self.uploaded_files_are_all_shown_on_uploadpage([name3],self.participant)\n self.uploaded_files_are_not_shown_on_uploadpage([name1,name2,name4],self.participant)\n \n # participant2 also sees only his or her own file\n self.uploaded_files_are_all_shown_on_uploadpage([name4],self.participant2)\n self.uploaded_files_are_not_shown_on_uploadpage([name1,name2,name3],self.participant2)", "def test_send_file(self):\n enc_file, add_data = self.send_file(self.nonce1, self.nonce2)\n self.assertNotEqual(len(add_data.keys()), 0)", "def test_mutation_file_upload(client):\n query = '''mutation M {\n updatePerson(id: 1,avatar: \"cat1.jpg\") {\n person {\n id name age avatar\n }\n }\n }\n '''\n data = {\n 'query': query,\n 'file': (open('files/cat1.jpg'), 'cat1.jpg'),\n }\n response = client.post(\n '/graphql', data=data,\n content_type='multipart/form-data',\n )\n\n expected_response = '{\"data\":{\"updatePerson\":{\"person\":{\"id\":1,\"name\":null,\"age\":34.0,\"avatar\":\"/files/cat1.jpg\"}}}}'\n assert response.data == expected_response", "async def post_multipart(self, part1, part_2, test):", "def test_rackspace_uploader_upload_wrong_file(self, mock, mock2):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n mycf.upload_file.return_value = True\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n file = FileStorage(filename='test.docs')\r\n err_msg = \"Upload file should return False\"\r\n res = u.upload_file(file, container='user_3')\r\n assert res is False, err_msg", "def upload(self, filename, file_path):\n return", "def test_rackspace_uploader_upload_correct_file(self, mock, mock2):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n mycf.upload_file.return_value=True\r\n mycf.get_object.side_effect = NoSuchObject\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n file = FileStorage(filename='test.jpg')\r\n err_msg = \"Upload file should return True\"\r\n assert u.upload_file(file, container='user_3') is True, err_msg\r\n calls = [call.get_container('user_3'),\r\n call.get_container().get_object('test.jpg')]\r\n mycf.assert_has_calls(calls, any_order=True)", "def test_import_upload(self):\r\n self._login_admin()\r\n\r\n # verify we get the form\r\n res = self.app.get('/admin/import')\r\n self.assertTrue(\r\n '<form' in res.body,\r\n 'Should have a form in the body for submitting the upload')\r\n\r\n res = self._upload()\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"302 Found\",\r\n msg='Import status is 302 redirect by home, ' + res.status)\r\n\r\n # now verify that we've got our record\r\n imp = ImportQueueMgr.get_ready()\r\n imp = imp[0]\r\n self.assertTrue(imp, 'We should have a record')\r\n self.assertTrue(imp.file_path.endswith('admin.delicious.html'))\r\n self.assertEqual(imp.status, 0, 'start out as default status of 0')", "def test_file_field():", "def test_upload_bad_file(self):\n url = image_upload_url(self.reteta.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.8374572", "0.80956906", "0.7675646", "0.7519246", "0.7453362", "0.745179", "0.7296041", "0.7186259", "0.71607894", "0.71505725", "0.7089428", "0.70805234", "0.7062851", "0.70404625", "0.70201254", "0.69955754", "0.6981875", "0.693843", "0.6902441", "0.68695647", "0.6860993", "0.6860614", "0.68309546", "0.68228346", "0.68151057", "0.6783717", "0.67753124", "0.6766412", "0.6762398", "0.67319894" ]
0.9006532
0
Test case for validate_credentials
def test_validate_credentials(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credentials(self):\r\n data = self._deep_clean('[email protected]')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def test_expired_credentials():\n pass", "def test_correct_credentials(self):\n with self.subTest(\"Valid credentials\"):\n valid_credentials = self._encode_basic_credentials(\n self.web_user.username, \"my_password\"\n )\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"Basic {valid_credentials}\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Invalid credentials\"):\n invalid_credentials = self._encode_basic_credentials(\n self.web_user.username, \"not_the_correct_password\"\n )\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"Basic {invalid_credentials}\"\n )\n self.assertEqual(response.status_code, 401)", "def validate_credentials(self, data):\n try:\n boolean_param_list = []\n get_service_data = app.config.get('JWT_CONFIG').get('CREDENTIAL')\n token_identity_param = app.config.get('JWT_CONFIG').get('TOKEN_IDENTITY_PARAM')\n expires_delta = app.config.get('JWT_CONFIG').get('TOKEN_EXPIRY')\n expires_delta = eval(expires_delta) if isinstance(expires_delta, str) else expires_delta\n credentials = data.get('credentials')\n identity_credentials_keys = list(get_service_data.keys())\n for key in identity_credentials_keys:\n if get_service_data[key] != credentials[key]:\n boolean_param_list.append(False)\n else:\n boolean_param_list.append(True)\n\n if False in boolean_param_list:\n return {'msg': \"Incorrect Credentials\"}, 401\n else:\n access_token = self.auth_token_generate(\n identity_param_val=credentials[token_identity_param], expires_delta=expires_delta)\n return {'access_token': access_token}, 200\n except Exception as e:\n print(e)\n return {'msg': \"Incorrect Credentials\"}, 401", "def test_missing_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with self.assertRaises(exceptions.CredentialNotFound):\n twine.validate_credentials()", "def test_validate_login_info(self):\n assert(PatientService().validate_login_info(self.valid_health_card_nb, self.password) > 0)\n assert(-1 == PatientService().validate_login_info(self.valid_health_card_nb, self.password + \"INVALID\"))", "def test_oms_credentials(*args, **kwargs):\n\treturn {'status':'success'}", "def test_invalid_password(self):\n pass", "def check_credentials_validation(credentials):\n spec = {'_id': credentials['username'], 'password': credentials['password']}\n if not current_app.mongo.observer.users.find_one(spec):\n raise Unauthorized('invalid credentials')", "def test_authentication_success():\n d = Dexcom(USERNAME, PASSWORD)\n d._validate_account()\n d._validate_session_id()", "def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]", "def authenticate(credentials):", "def test_credential_boolean_parsing_failure():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": \"bogus\"}\n with pytest.raises(CredentialError):\n Credentials(init_dict)", "def test_invalid_credentials(self):\n self.user = {\"username\": \"testuserother\", \"password\": \"testpassword\"}\n response = self.app.post(\"/auth/login/\", data=self.user)\n self.assertEqual(response.status_code, 403)\n\n output = json.loads(response.data.decode('utf-8'))\n self.assertIn(\"Error: Invalid username and/or password.\",\n output[\"message\"])\n\n self.user = {\"username\": \"testuser\", \"password\": \"invalid\"}\n response = self.app.post(\"/auth/login/\", data=self.user)\n self.assertEqual(response.status_code, 403)\n\n output = json.loads(response.data.decode('utf-8'))\n self.assertIn(\"Error: Invalid username and/or password.\",\n output[\"message\"])", "def test_valid_keys(client):\n response=client.post(\"/signin\",data=dict(username=TestSignin.email, password=TestSignin.password), content_type=\"multipart/form-data\")\n data=json.loads(response.data)\n assert response.status_code==400\n assert data[\"error\"] == \"Please provide email and password as keys\"", "def test_authenticate_no_credentials(self):\n \n self.assertRaises(\n ValueError, \n self.authenticator.authenticate\n )", "def test_ApiWillAuthenticate_ValidCredentials_Successfully(self):\n api = Api(self.userId, self.testToken)\n self.assertTrue(api.connected())", "async def test_validate_login(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"test-user\", password=\"test-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth valid\\n\"\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"test-user\", password=\"invalid-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth invalid\\n\"\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"invalid-user\", password=\"test-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth invalid\\n\"", "def validate(self, credentials):\n user = authenticate(**credentials)\n if user and user.is_active:\n return user\n raise serializers.ValidationError('Incorrect Credentials')", "def test_create_valid_user(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check that the object has actually been created properly.\n user = get_user_model().objects.get(**response.data)\n self.assertTrue(user.check_password(credentials['password']))\n\n # Check that the HTTP response does not include the password.\n self.assertNotIn('password', response.data)", "def test_authenticate_random_credentials(self):\n \n self.assertRaises(\n TypeError, \n self.authenticator.authenticate,\n foo='bar'\n )", "def test_credential_partial_loads():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": 0}\n creds = Credentials(init_dict)\n assert creds.url == \"http://example.com\"\n assert creds.token is None\n assert creds.org_key is None\n assert not creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None", "def test_init(self):\n self.assertEqual(self.new_credentials.account,\"Instagram\")\n self.assertEqual(self.new_credentials.username,\"bensongathu\")\n self.assertEqual(self.new_credentials.password,\"vcxz4321\")", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def test_ApiWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n api = Api(self.userId, \"\")\n self.assertFalse(api.connected())", "def test_valid_credentials(self):\n self.tc_id = \"Ts_009\"\n self.tc_desc = \"Verify user is able to register into the application with existing email id\"\n self.tc_step = \"TC Start\"\n\n registration = Registrationwithoutemailid(self.driver)\n\n self.tc_step = \"Launch the url\"\n registration.launchUrl(self.url)\n\n self.tc_step = \"Enter the basic registration details\"\n registration.clickVendorLogin()\n registration.clickRegisterButton()\n registration.enterBasicRegistrationDetails(self.firstname,self.lastname,self.vendorname,self.fnsnumber,self.primaryemail,self.primaryphno,self.psw,self.cpsw,self.continfo)\n registration.basicRegButton()\n # self.assertEqual(registration.verifyReg(), \"Public Info\", \"Registration Failed\")", "def invalid_credentials( form , field ): \n\tusername_entered = form.username.data\n\tpassword_entered = field.data \n\tuser_object = User.query.filter_by(username = username_entered).first()\n\tif user_object is None : \n\t\traise ValidationError(\"Username or Password is incorrect !\")\n\telif not pbkdf2_sha256.verify(password_entered , user_object.password) : \n\t\traise ValidationError(\"Username or Password is incorrect !\")", "def test_creds_not_found():\n assert_equal(find_credentials({'foo': 'bar'}), (None, None))", "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")" ]
[ "0.79102504", "0.72926986", "0.7287409", "0.7267114", "0.7098882", "0.70336324", "0.7020181", "0.7004465", "0.6962174", "0.6867665", "0.6861228", "0.6837545", "0.68326384", "0.6817637", "0.67701995", "0.6745321", "0.67394525", "0.6719857", "0.66962415", "0.6694112", "0.66937566", "0.6693401", "0.66898507", "0.6677334", "0.6655496", "0.6650515", "0.66221356", "0.66189873", "0.66071635", "0.6577579" ]
0.93079054
0
insert fictions into database
def to_db(self): bulk = conn_db().initialize_ordered_bulk_op() for fiction in self.fictions: bulk.find({'id': fiction.id}).upsert().update({'$set': fiction.__dict__}) bulk.execute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_insert(self, table_name: str, data: dict):\n \n query = 'INSERT OR IGNORE INTO {0} ({1}) VALUES ({2})'.format(\n table_name, ', '.join(data.keys()), ', '.join('?'*len(data)))\n self.db.execute(query, list(data.values()))", "def db_insert(db_in, table, dict_in):\n connection = db_in.connection.cursor()\n keys = []\n values = []\n for key, value in dict_in.items():\n keys.append(key)\n # Escape quotes\n if isinstance(value, str):\n values.append(\"'\" + value.replace(\"'\", \"''\") + \"'\")\n else:\n values.append(\"'\" + str(value) + \"'\")\n\n # Update entries for each key and value.\n for key in keys:\n # Attempt to add column, fail silently if it exists.\n try:\n connection.execute('ALTER TABLE %s ADD COLUMN %s' % (table.name, key))\n except sqlite3.OperationalError:\n pass\n\n connection.execute('INSERT INTO %s(%s) VALUES (%s);' % (table.name, \",\".join(keys), \",\".join(values)))\n return_entry = Entry(connection.lastrowid, db_in, table, dict_in)\n db_in.connection.commit()\n return return_entry", "def insert_values():\n pass", "def insert_into_db(self, database):\n\n # insert person\n keys = \"\"\n values = \"\"\n for key, value in self.person.items():\n # location\n if key == \"location\":\n # ensure location is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * FROM p21_cdm.location WHERE city='{value['city']}' \n AND zip='{value['zip']}') THEN INSERT INTO p21_cdm.location (city, zip) \n VALUES ('{value['city']}', '{value['zip']}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.person (location_id, {keys[:-1]}) \n VALUES((SELECT location_id \n FROM p21_cdm.location\n WHERE city='{self.person['location']['city']}' \n and zip='{self.person['location']['zip']}'), \n {values[:-1]})\"\"\")\n\n # insert visits\n for visit in self.visits:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n for key, value in visit.items():\n if key == \"care_site_name\":\n # ensure care site is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * \n FROM p21_cdm.care_site \n WHERE care_site_name='{value}') \n THEN INSERT INTO p21_cdm.care_site (care_site_name) \n VALUES ('{value}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.visit_occurrence (care_site_id, {keys[:-1]}) \n VALUES((SELECT care_site_id\n FROM p21_cdm.care_site\n WHERE care_site_name='{visit['care_site_name']}'),\n {values[:-1]}) \n RETURNING visit_occurrence_id\"\"\")\n\n # insert measurements, observations, conditions & procedures\n for data, tablename in [(self.measurements, \"measurement\"),\n (self.observations, \"observation\"),\n (self.conditions, \"condition_occurrence\"),\n (self.procedures, \"procedure_occurrence\")]:\n for entry in data:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n\n for key, value in entry.items():\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n entry[\"sql_id\"] = database.select(f\"\"\"INSERT INTO p21_cdm.{tablename}({keys[:-1]})\n VALUES({values[:-1]}) RETURNING {tablename}_id\"\"\")[0][0]\n\n # insert fact_relationships in both directions\n for table1, entry1, table2, entry2 in self.fact_relations:\n # 44818890 = Finding associated with (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table1}','{entry1['sql_id']}','{table2}','{entry2['sql_id']}','44818890')\"\"\")\n # 44818792 = Associated with finding (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table2}','{entry2['sql_id']}','{table1}','{entry1['sql_id']}','44818792')\"\"\")\n\n # make transactions persistent\n database.commit()", "def insert_db():\n populate_tables()", "def insert_into_cit_db(dic, name):\n ndate = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n try:\n s = serialize_via_marshal(dic)\n write_message(\"size of \"+name+\" \"+str(len(s)))\n #check that this column really exists\n testres = run_sql(\"select object_name from rnkCITATIONDATA where object_name = %s\",\n (name,))\n if testres:\n run_sql(\"UPDATE rnkCITATIONDATA SET object_value = %s where object_name = %s\",\n (s, name))\n else:\n #there was no entry for name, let's force..\n run_sql(\"INSERT INTO rnkCITATIONDATA(object_name,object_value) values (%s,%s)\",\n (name,s))\n run_sql(\"UPDATE rnkCITATIONDATA SET last_updated = %s where object_name = %s\",\n (ndate,name))\n except:\n register_exception(prefix=\"could not write \"+name+\" into db\", alert_admin=True)", "def insert_movie_data(self, movie_people_dict):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.executemany(\n 'INSERT INTO movie_table VALUES(?,?);',\n movie_people_dict.items()\n )\n self._close_connection(conn)", "def insert_dict(self, dict_data):\n fields = ', '.join([f for f in dict_data])\n values = ', '.join([str(dict_data[f]) for f in dict_data])\n sql = (\"INSERT INTO snapshot_log (timestamp, \" + fields +\n \") VALUES (NOW(), \" + values + \" )\")\n\n cur = self.cursor()\n try:\n cur.execute(sql)\n #self.conn.commit()\n except sqlc.Error as e:\n print (\"Error #{0}: {1}\\nCouldn't insert\\nsql={2}\"\n .format(e.errno, e.msg, sql))\n except Exception as e:\n print (\"Error: {0}\\nCouldn't insert\\nsql={1}\"\n .format(e.message, sql))\n finally:\n self.close()", "def run_insert_example():\n table = \"actors\"\n insert_values = {\n 'id': 3,\n 'name': \"Matt\",\n 'last_name': \"Smith\",\n 'country': \"England\"}\n print querify.insert_from_dict(table, insert_values)\n\n insert_col_list = [\"id\", \"name\", \"last_name\", \"country\"]\n insert_val_list = [\n [1, \"Chris\", \"Eccleston\", \"England\"],\n [2, \"David\", \"Tennant\", \"Scotland\"],\n [3, \"Matt\", \"Smith\", \"England\"]]\n print querify.insert_from_list(table, insert_col_list, insert_val_list)", "def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)", "def insert(self, value):\n # Build insert query\n into_sql = ''\n col_sql = ''\n val_sql = []\n for key, val in value.items():\n into_sql += ', {}'.format(key)\n col_sql += ', ?'\n val_sql.append(val)\n # Execute query\n self.execute(\"insert into {} ({}) values ({})\".format(self.name, into_sql[2:], col_sql[2:]), val_sql)", "async def insert(self, args: Dict[str, Any]):\n keys = \", \".join(args.keys())\n values = \", \".join([f\"${i + 1}\" for i in range(len(args))])\n\n conn: Connection\n async with self.db_pool.acquire() as conn:\n await conn.execute(\n f\"INSERT INTO {self.table_name} \"\n f\"({keys}) VALUES ({values})\",\n *args.values(),\n )", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def persist(data):\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(INSERT_SQL, (data[\"name\"], data[\"gender\"], data[\"age\"]))\n conn.commit()\n cursor.close()", "def insertKWARGS(dic,idnum):\n for c in dic.keys():\n try: db.addCol(c,'varchar','keyword') # If this is the first time this keyword has appeared, add the column\n except: pass \n cols,qMarks = 'keyword_job,'+','.join(dic.keys()),'?'+',?'*len(dic) # Column names in dictionary\n command = 'INSERT into keyword (%s) VALUES (%s) '%(cols,qMarks) # SQL insert command\n binds = [idnum]+dic.values() # Values in dictionary\n db.sqlexecute(command,binds)", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def insert(self, items=''):\n cur = self.conn.cursor()\n\n format_args = {'table': self.__name__,\n 'items': ', '.join(items.keys()),\n 'values': ', '.join([':'+key for key in items.keys()])}\n \n insert_query = \"\"\"\n INSERT INTO {table} ({items}) VALUES ({values})\n \"\"\".format(**format_args)\n\n cur.execute(insert_query, items)", "def insert_dict(self, data, table):\n logging.info(f'Inserting dictionary data into `{table}`...')\n logging.debug(f'Data:\\n{data}')\n\n try:\n column_names = []\n params = []\n\n for column_name, value in data.items():\n column_names.append(f'`{column_name}`')\n params.append(value)\n logging.info(f\"column_nameeeeeeeeeeeeeeeeeeee - {column_name}\")\n logging.info(f\"Valueeeeeeeeeee - {value}\")\n\n logging.info(f\"Length of paraaaaaaaaaaams - {len(params)}\")\n logging.debug(f'Column names: {column_names}')\n logging.debug(f'Params: {params}')\n\n columns_string = ', '.join(column_names)\n param_placeholders = ', '.join(['%s'] * len(column_names))\n\n query = f'INSERT INTO {table} ({columns_string}) VALUES ({param_placeholders})'\n\n return self.execute(query, params=params)\n except:\n logging.exception('Error inserting data.')\n return False", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def insert_data(self, table_name, data):\n for data_point in data:\n query = \"INSERT INTO %s(%s) VALUES (%s)\"\n\n fields = \", \".join(data_point.keys())\n values = \", \".join([self.pack_data(value) for value in data_point.values()])\n self.cursor.execute(query % (table_name, fields, values))\n self.db_connection.commit()", "def insertFromDict(table, dict):\n sql = 'INSERT INTO ' + table\n sql += ' ('\n sql += ', '.join(dict)\n sql += ') VALUES ('\n sql += ', '.join(map(dictValuePad, dict))\n sql += ');'\n return sql", "def save_to_db(self, data, db_operations):\n self.from_dict(data)\n self._id = str(db_operations.insert_one(self.to_dict()).inserted_id)", "def test_insert_many_dict(self):\n table = 'test_insert'\n columns = ['col1', 'col2', 'col3']\n\n in_vals = [{'col1': 1, 'col2': 2, 'col3': 3},\n {'col1': 4, 'col2': 5, 'col3': 6},\n {'col1': 7, 'col2': 8, 'col3': 9}\n ]\n out_vals = [tuple([row[col] for col in columns]) for row in in_vals]\n\n with self.dbh.table_recreate(table, columns, 'integer'):\n try:\n self.dbh.insert_many(table, columns, in_vals)\n res = self.dbh.select_all(table, columns)\n except Exception:\n self.dbh.rollback()\n raise\n\n self.assertEqual(res, out_vals)", "def dbinsert(db,table,**assignments):\n cols = \"\"\n vals = \"\"\n values = []\n for k,v in assignments.items():\n if cols!=\"\": cols += \",\"\n cols += k\n if vals!=\"\": vals += \",\"\n vals += \"?\"\n values.append(v)\n cmd = \"insert or replace into \"+table+\" ( \"+cols+\" ) values ( \"+vals+\" ) \"\n params = list(values)\n # print cmd,params\n cur = db.cursor()\n cur.execute(cmd,params)\n cur.close()\n del cur", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)", "def populate_table(self, data):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"\"\"\n INSERT INTO film (title, film_id, year, director, cast, rating, poster_url) \n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n cur.execute(sql, data)\n db.commit()\n except:\n print(\"An error occurred when saving the data!\")\n\n db.close()", "def insert_to_database(self, db):\n \n self.remove_bad_characters()\n print(\"Inserting \"+self.categorie_name+\" to database.\")\n db.query(\"INSERT INTO categorie (categorie_name) VALUES (:categorie_name)\", \\\n categorie_name=self.categorie_name)", "def savedict(self, obj, table):\n if not isinstance(obj, dict): return False\n\n keys = ['`%s`' % key for key in obj.keys()]\n values = [None if value == '' else value for value in obj.values()]\n\n sql = 'REPLACE INTO %s (%s) VALUES (%s)' % (table, ','.join(keys), ','.join(['%s'] * len(values)))\n self.execute(sql, values)", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()" ]
[ "0.7035051", "0.6983024", "0.6933141", "0.69023114", "0.68670267", "0.6790983", "0.6777337", "0.6773737", "0.673298", "0.65766793", "0.6556748", "0.64888746", "0.648687", "0.6472251", "0.6463803", "0.64130276", "0.64077246", "0.6368387", "0.63239247", "0.6321916", "0.63211185", "0.63073504", "0.6304172", "0.62844175", "0.6264861", "0.6262935", "0.6246363", "0.6246346", "0.6227484", "0.6217377" ]
0.70036274
1
get fictions from db
def from_db(self, if_print=True): for fiction in conn_db().find(): if if_print: print(fiction) self.fictions.append(QdFictionInfo(fiction))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDict(name):\n df = Dictionary.objects.select_related().get(name=name)\n return df", "def get_all():\n all = {}\n for k, v in DB.MUSIC.iteritems():\n all[k] = v.__dict__\n for k, v in DB.VIDEOS.iteritems():\n all[k] = v.__dict__\n for k, v in DB.IMAGES.iteritems():\n all[k] = v.__dict__\n\n return all", "def read_relationship_map():\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM relationshipmap\")\n relation_dictionary = {}\n for row in c:\n relation_dictionary[row[\"key\"]] = (row[\"key\"], row[\"value\"], row[\"relationshiptype\"])\n conn.close()\n return relation_dictionary\n except:\n return {}", "def db_values(self, db):", "def getFlightDict():\n table = 'flights'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n flight = airlineClasses.Flight()\n flight.id = row[0]\n flight.flightnum = row[1]\n flight.departCity = row[2]\n flight.arriveCity = row[3]\n flight.departTime = row[4]\n flight.departDay = row[5]\n flight.arriveTime = row[6]\n flight.arriveDay = row[7]\n flight.cost = row[8]\n flight.code = row[9]\n d[flight.id] = flight\n \n curs.close()\n connection.close()\n return d", "def GetAvailableDicts(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectDictTables'])\r\n\t\t\tDicts = self.DB_Cursor.fetchall()\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to return dictionaries: %s\"%detail)\r\n\t\treturn Dicts", "def read_db():\n f_result = []\n result = execute_query('select sitename, id from {} order by sitename;'.format(TABLES[0]))\n sites = [(x['sitename'], x['id']) for x in result]\n for sitename, site_id in sites:\n sitedict = {'name': sitename}\n querystring = 'select settname, settval from {} order by settname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[1]), (site_id,))\n sitedict['settings'] = {x: y for x, y in cur.fetchall()}\n querystring = 'select dirname, id from {} order by dirname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (site_id,))\n sitedirs = [(x['dirname'], x['id']) for x in cur.fetchall()]\n sitedict['docs'] = []\n # if we keep the site_id in the docstats table we could restrict this to one db-query\n # and filter the result set inside the loop\n # although this should also be possible with a subselect or something like that\n for dirname, dir_id in sitedirs:\n dirlist = []\n querystring = 'select * from {} order by docname where dir_id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dir_id,))\n for resultdict in cur:\n resultdict['dirname'] = dirname\n dirlist.append(resultdict)\n sitedict['docs'].append(dirlist)\n f_result.append(sitedict)\n return f_result", "def get_query(query):\n global database\n res = database.conn.execute(query)\n out = res.fetchall()\n return [dict(zip(i.keys(), i)) for i in out]", "def get_list():\n hash_map_list = model.hash_table.find()\n data = dict(success=True, hash_map_list=hash_map_list)\n return data", "def retrieve_from_db(self):\n pass", "def get_dict_from_db(key, fields=[]):\n dataStr = get_from_db(key=key)\n dataObj = json.loads(dataStr)\n if not fields:\n return dataObj\n else:\n return {field: dataObj[field] for field in fields}", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def to_dict_query(self) -> list:\n return [row.to_dict() for row in self.all()]", "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "def get_all(self):\n return self.db", "def get_file_contents(db_cursor):\n\n db_cursor.execute(\"\"\"SELECT * FROM data\"\"\")\n db_rows = db_cursor.fetchall()\n return {row[0]: row[1] for row in db_rows if row != []}", "def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db", "def mapdata():\n return getmapdata(db, MyTable)", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def load_user_map_from_db():\n user_map = {}\n\n try:\n users = get_users_from_table()\n for user in users:\n user_dict = {\n \"user_id\" : user[0],\n \"username\" : user[1],\n \"id_last_message_sent\" : user[2],\n \"id_last_message_stickered\" : user[3],\n \"count_since_last_stickered\" : user[4],\n \"is_new_user\" : False\n }\n us = user_store.UserStore(data_dict=user_dict)\n user_map[us.get_user_id()] = us\n print(\"user_map loaded\")\n except IOError:\n print(\"Database load failed. Loading empty user_map.\")\n \n return user_map", "def _get_critter_db() -> Dict[CritterType, List[CritterImage]]:\n with open(os.path.join('critters', 'names.json')) as fp:\n critter_data = json.load(fp)\n\n critter_db = collections.defaultdict(list)\n for critter_name, icon_name, critter_type_str in critter_data:\n critter_type = CritterType.from_str(critter_type_str)\n critter = CritterImage(critter_name, critter_type, icon_name)\n critter_db[critter_type].append(critter)\n return critter_db", "def all_measurements_lookup(client):\n dbs_dict = db_lookup(client)\n m_list_dict = []\n for db in dbs_dict:\n m_list_dict.append({db['name']: measurements_lookup(client, db['name'])})\n # print(\"def all_measurements_lookup 1: \", m_list_dict[:10])\n return m_list_dict", "def get_db_items(self, key):\n return self._extension_data['db_items'][key]", "def get_dicts(self, clean=False):\n return list(self.iter_dicts(clean=clean))", "def get_db():\n with open(db_file) as f:\n db = json.load(f)\n return db", "def readDB():\n if not os.path.exists(filenameDB):\n return { }\n \n with open(filenameDB, \"r\") as csvfile:\n rows = csv.reader(csvfile)\n if rows:\n db = { }\n for r in rows:\n if len(r)==2 and isinstance(r[0],str) and isinstance(r[1],str):\n db[r[1]] = r[0]\n return db\n return { }", "def get_all(cls):\n\t\treturn [el._to_dict() for el in Book.query.all()]", "def findall():\n\n dataset = {\n \"curso\": [],\n \"materia\": [],\n \"professor\": [],\n \"horas\": [],\n \"ids\": []\n }\n request_data_get = cursos_collections.find()\n\n for result in request_data_get:\n dataset['curso'].append(result[\"curso\"])\n dataset['materia'].append(result[\"materia\"])\n dataset['professor'].append(result[\"professor\"])\n dataset['horas'].append(result[\"horas\"])\n dataset['ids'].append(str(result[\"_id\"]))\n\n return dataset", "def getFeatureDicts(self):\n pass", "def load_db(db_file):\n db = {}\n logging.info('loading weighted vectors from {0}'.format(db_file))\n with open(db_file, 'r') as f:\n for line in f:\n j = json.loads(line)\n db.update(j)\n return db" ]
[ "0.6867659", "0.6352403", "0.61903954", "0.6165739", "0.6163699", "0.61169934", "0.60999084", "0.60981524", "0.60294837", "0.6003415", "0.5990331", "0.5963921", "0.59552884", "0.59165853", "0.5881678", "0.5866133", "0.58506846", "0.58490765", "0.5799774", "0.5777243", "0.5760112", "0.5711625", "0.5709083", "0.57090604", "0.57083815", "0.5703912", "0.569958", "0.56941897", "0.5657814", "0.56554323" ]
0.6969962
0
print all fiction brief info
def print(self): for fiction in self.fictions: print(fiction.__dict__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def printDict(myDict):\n for key in myDict:\n print(f\"Version: --> {myDict[key]['version']} \")\n print(f\"Accuracy: --> {myDict[key]['accuracy']}\")\n print(f\"Time --> {myDict[key]['time_per_target']}\")\n print(f\"Penalty --> {myDict[key]['target_w_penalty']}\")\n print(f\"ID --> {myDict[key]['assessed_by']}\")\n print(f\"# --> {myDict[key]['attempt']}\")\n\n print()", "def display(self):\r\n\t\tfor key, value in self.__dict__.items():\r\n\t\t\tprint(key.upper(), value, sep=': ')\r\n\r\n\t\tprint(\"\")", "def printDicts():\n for k in key:\n print k, key[k]\n \n for f in freq:\n print f, freq[f]\n \n for e in english:\n print e, english[e]", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def print_verb_dict(verb):\n for keys in verb:\n print(f'{keys}: {verb[keys]}')", "def display(self):\n # type: ()->None\n print('============')\n for key, value in self._ifAttributes.items():\n if isinstance(value, list):\n print(key + ': ')\n for item in value:\n print('\\t' + item)\n elif isinstance(value, dict):\n print(key + ': ')\n for item in value.keys():\n print('\\t' + item + ': ' + value[item])\n else:\n print(key + ': ' + str(value))\n print('============')", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def print_dict(data):\n print data", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def printsection(section):\n print('===')\n for key in section.keys():\n print(\"Key: %s\" % key)\n for item in section[key]:\n print(' %s' % item)", "def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))", "def printDict(self):\n print str(self)", "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)", "def print_all_items_in_dict(all_items):\n if config.output.csv:\n print_all_items_in_dict_for_csv(all_items)\n else:\n print_all_items_in_dict_for_human(all_items)", "def print_data():\r\n\r\n d = data()\r\n for i in d:\r\n for key, value in i.items():\r\n print(key, \" : \", value)\r\n print()", "def show(list_of_dicts, key):\n print(\"\\nHere are the stocks I have considered for you:\")\n for i in list_of_dicts: # iterates through list_of_dicts and prints Name and Market Cap\n print(f\" - {i['Name']} - {key} is {i[key]} \")", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def display(self):\n for value, prob in self.items():\n print(value, prob)", "def print_car(car):\n for key, value in car.items():\n print(f\"{key}: {value}\")", "def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))", "def display_bag_info(bag_name):\n\n \"\"\" Get the bag file summary info \"\"\"\n bag_info = yaml.load(subprocess.Popen(\n ['rosbag', 'info', '--yaml', bag_name], stdout=subprocess.PIPE).communicate()[0])\n\n \"\"\" Get the topics in the bag \"\"\"\n bag_topics = bag_info['topics']\n bag = rosbag.Bag(bag_name)\n\n \"\"\" For every topic in the bag, display its fields. Only do this once per topic \"\"\"\n for topic in bag_topics:\n for _, msg, _ in bag.read_messages(topics=topic['topic']):\n \"\"\" Recursively list the fields in each message \"\"\"\n print_topic_fields(topic['topic'], msg, 0)\n print('')\n break\n\n bag.close()\n\n sys.stdout.write(\"Found %u topics\\n\" % len(bag_topics))", "def printResults(self):\n for key in self.mDict.keys():\n print ('for {:d}, entries = {:d} and exits = {:d}'.format (key, self.mDict.get(key).get ('entries'), self.mDict.get(key).get ('exits')))", "def printAll(self):\n for item in self.items:\n try:\n print(\"=========Post code: \" + item['code'] + \"=============\")\n print(item['caption']['text'])\n except TypeError:\n print(\"(Empty caption text)\")", "def printAllDictionaries( TM ):\n words = TM[0]\n wordlengths = TM[1]\n stems = TM[2]\n sentencelengths = TM[3]\n\n print(\"\\nWords:\\n\", words)\n print(\"\\nWord lengths:\\n\", wordlengths)\n print(\"\\nStems:\\n\", stems)\n print(\"\\nSentence lengths:\\n\", sentencelengths)\n print(\"\\n\\n\")", "def show_info(self):\n print 'Querying the station for the configuration...'\n config = self.station.getConfig()\n for key in sorted(config):\n print '%s: %s' % (key, config[key])", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())" ]
[ "0.689096", "0.689096", "0.6777161", "0.6774963", "0.6760851", "0.67451143", "0.6715383", "0.65789264", "0.65494156", "0.65452164", "0.6516783", "0.6516783", "0.6511981", "0.64922225", "0.6485285", "0.6480634", "0.64678776", "0.6416992", "0.6409101", "0.6396959", "0.63939697", "0.6371346", "0.6345708", "0.6329264", "0.63230807", "0.63029987", "0.6297862", "0.6296401", "0.62795293", "0.62655395" ]
0.7619195
0
test if QdPageHandler working
def test(): page_handler = QdHandler(order_id=5, style=2) page_handler.set_page_range(1, 2) page_handler.take_shortcut() page_handler.handle()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_page(self):\r\n raise NotImplementedError", "def _check_ready(self, _widget, __event=None, __page=0):\r\n\r\n if self.cmbHardware.get_active() > 0:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n\r\n return False", "def _verify_page(self):", "def query_handler(self, handler_name=\"\"):\n\t\treturn False", "def setup_page(self):\n raise NotImplementedError", "def test_qtwebenginecore():\n from qtpy import QtWebEngineCore\n\n assert QtWebEngineCore.QWebEngineHttpRequest is not None", "def prepare(self):\n return HandlerReady()", "def is_browser_on_page(self):", "def _check_ready(self, _widget, __event=None, page=0):\r\n# WARNING: Refactor _check_ready; current McCabe Complexity metric = 12.\r\n if self.cmbSoftware.get_active() > 0:\r\n self.cmbDetectMethod.set_sensitive(True)\r\n self.txtTestProcedure.set_sensitive(True)\r\n self.txtTestCase.set_sensitive(True)\r\n self.txtExecutionTime.set_sensitive(True)\r\n else:\r\n self.cmbDetectMethod.set_sensitive(False)\r\n self.txtTestProcedure.set_sensitive(False)\r\n self.txtTestCase.set_sensitive(False)\r\n self.txtExecutionTime.set_sensitive(False)\r\n\r\n if page == 2 and self.cmbSoftware.get_active() <= 0:\r\n if(self.txtIncidentDate.get_text() != '' and\r\n self.cmbReportedBy.get_active_text() != '' and\r\n self.cmbCategory.get_active() > 0 and\r\n self.cmbHardware.get_active() > 0):\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n elif page == 2 and self.cmbSoftware.get_active() > 0:\r\n if(self.txtIncidentDate.get_text() != '' and\r\n self.cmbReportedBy.get_active_text() != '' and\r\n self.cmbCategory.get_active() > 0 and\r\n self.cmbHardware.get_active() > 0 and\r\n self.cmbDetectMethod.get_active() > 0 and\r\n self.txtExecutionTime.get_text() != ''):\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n elif page == 3:\r\n if(self.txtDescription.get_text() != '' and\r\n self.txtDetails.get_text(*self.txtDetails.get_bounds()) != ''):\r\n self.assistant.set_page_complete(self.fxdPageDescription, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageDescription,\r\n False)\r\n\r\n return False", "def start(self):\n print \"starting to crawler qsbk's page(Enter Q or q to quit)\"\n print\n self.enable = True\n self.load_page()\n # a variabel to control counts\n nowpage = 0\n while self.enable:\n if len(self.stories) > 0:\n # get a page stories\n page_stories = self.stories[0]\n nowpage += 1\n del self.stories[0]\n # print stories\n self.print_one_story(page_stories, nowpage)", "def call_q(self, _):\n return False", "def call_q(self, _):\n return False", "def call_q(self, _):\n return False", "def test_questions_page(self):\n # import pdb\n # pdb.set_trace()\n\n result = self.client.get('/questions')\n self.assertIn('<h2>Submit A Question</h2>', result.data)\n\n print \"DONE WITH QUESTIONS PAGE CHECK\"", "def handler(self, *args, **kwargs):\n return True", "def test_client_load_pages_request(self):\n is_present = hasattr(self.httpbin_3, 'test_requests_patch_method')\n\n self.assertTrue(is_present)", "def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()", "def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)", "def test_page(self):\n result = self.test_client.page\n\n assert result == 1", "def test():\n test_app()\n test_pagebrowser()", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()", "def create_page(self):", "def __setSoup( self, url = None, data = None, headers = {} ):\r\n if url:\r\n self.currenturi = url\r\n try:\r\n log.info(self.log_msg( 'for uri %s'%(self.currenturi) ))\r\n res = self._getHTML( data = data, headers=headers )\r\n if res:\r\n self.rawpage = res[ 'result' ]\r\n else:\r\n log.info(self.log_msg('self.rawpage not set.... so Sorry..'))\r\n return False\r\n self._setCurrentPage()\r\n return True\r\n except Exception, e:\r\n log.exception(self.log_msg('Page not for :%s'%url))\r\n raise e", "def test_models_edx_ui_textbook_pdf_page_scrolled_with_valid_statement(statement):\n assert statement.event_type == \"textbook.pdf.page.scrolled\"\n assert statement.name == \"textbook.pdf.page.scrolled\"", "def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()", "def testOnHelp(self):\n webbrowser.open = MagicMock()\n\n # invoke the tested method\n self.widget.onHelp()\n\n # see that webbrowser open was attempted\n webbrowser.open.assert_called_once()", "def _setSoup(self, url=None, data=None, headers={}):\n if url:\n self.currenturi = url\n try:\n log.info(self.log_msg( 'for uri %s' %(self.currenturi) ))\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('self.rawpage not set.... so Sorry..'))\n return False\n self._setCurrentPage()\n return True\n except Exception, e:\n log.exception(self.log_msg('Page not for :%s' %uri))\n raise e", "def __setSoup( self, url = None, data=None, headers={}):\n\n if url:\n self.currenturi = url\n try:\n log.info(self.log_msg( 'for uri %s'%(self.currenturi) ))\n res = self._getHTML( data = data, headers=headers )\n if res:\n self.rawpage = res[ 'result' ]\n else:\n log.info(self.log_msg('self.rawpage not set.... so Sorry..'))\n return False\n self._setCurrentPage()\n return True\n except Exception, e:\n log.exception(self.log_msg('Page not for :%s'%url))\n raise e", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)" ]
[ "0.6118105", "0.6037043", "0.6025137", "0.6019778", "0.5941908", "0.5930006", "0.5732747", "0.57003534", "0.5554986", "0.55528724", "0.5485152", "0.5485152", "0.5485152", "0.54748863", "0.5464814", "0.54115254", "0.5389929", "0.5387075", "0.5340214", "0.5308082", "0.5297789", "0.52791286", "0.52764285", "0.5266422", "0.5233207", "0.5229686", "0.52227175", "0.518307", "0.5150658", "0.5143773" ]
0.72151715
0
Checks whether the code is running in a build environment.
def in_build(self): return self.is_valid_platform() and not self['ENVIRONMENT']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkBuildStatus(self):\n pass", "def is_devel(self):\r\n\r\n return self.is_debug()", "def _IsBuildRunning(build_data):\n current_step = build_data.get('currentStep')\n if (current_step and current_step.get('isStarted') and\n current_step.get('results') is None):\n return True\n return False", "def in_runtime(self):\n\n return self.is_valid_platform() and self['ENVIRONMENT']", "def check_build(self, bld_num):\n # QQQ In future this should be replaced with a query to the\n # build database\n bld_dir = os.path.join(self.ver_dir, str(bld_num))\n for plat in self.plats.keys():\n if self.plats[plat]:\n # QQQ Assumes format of filename unique to couchbase-server\n files = glob.glob(\"{}/couchbase-server-enterprise?{}*{}*\".format(\n bld_dir, self.version, plat\n ))\n files = [x for x in files if not (x.endswith(\".md5\") or x.endswith(\".sha256\"))]\n if len(files) == 0:\n print (\"Platform {} is missing\".format(plat))\n return False\n return True", "def is_building(self):\n return self._is_name_type(self.BUILDING)", "def test_build_tools(self):\n #raise AssertionError(\"%s not implemented\" % sys._getframe().f_code.co_name)\n if self.status: self.status.Warning(\"By default build tools is Xilinx this can be changed in demo/nysa_platform.py\")\n if find_xilinx_path() is None:\n return False\n return True", "def exists(_env):\n detector = DetectCompiler()\n if detector['icx'] is None:\n return False\n return True", "def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")", "def _in_travis(): # pragma: no cover\n return 'TRAVIS' in os.environ", "def poetry_build(self) -> bool:\n return self.run_poetry([\"build\", \"-vvv\"])", "def req_build(container):\n try:\n return 'dockerfile' in self.kard.env.get_container(container)\n except KeyError:\n return False", "def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True", "def is_debug_environment():\n return find_loader('cli') is None", "def IsClangWinBuild(build_dir, target):\n if not IsWindows():\n return False\n\n gn_file = os.path.join(build_dir, target, 'args.gn')\n if not os.path.isfile(gn_file):\n print 'WARNING: Unable to find the args.gn file.'\n return False\n # Matches e.g. \"gn_arg = value\"\n gn_arg_re = re.compile(r'^(?P<flag>[^= ]+)\\s*=\\s*(?P<value>[^ \\n]+)$')\n for line in open(gn_file):\n m = gn_arg_re.match(line)\n if m and m.group('flag') == 'is_clang':\n return m.group('value') == 'true'\n return False", "def _check_dist_env() -> bool:\n env_required = (\n os.environ.get(\"MASTER_PORT\"),\n os.environ.get(\"MASTER_ADDR\"),\n os.environ.get(\"WORLD_SIZE\"),\n os.environ.get(\"RANK\"),\n )\n return all(env is not None for env in env_required)", "def in_travis():\n return os.getenv(IN_TRAVIS_ENV) == 'true'", "def _global_development_mode() -> bool:\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n and \"__pypackages__\" not in __file__\n )", "def check(self):\n with working_dir(self.build_directory):\n self._if_ninja_target_execute(\"test\", parallel=False)", "def running_on_ci() -> bool:\r\n return os.environ.get(\"JANITOR_CI_MACHINE\") is not None", "def is_built(args, task_name: str, artifact_name: str) -> bool:\n if task_name not in args._artifacts:\n return False\n\n for a in args._artifacts[task_name]:\n if a.name == artifact_name and a.built:\n return True\n elif a.name == artifact_name and not a.built:\n return False\n return False", "def on_production(self):\n\n if not self.is_valid_platform() and not self.in_build():\n return False\n prod_branch = 'production' if self.on_dedicated() else 'master'\n return self['BRANCH'] == prod_branch", "def check_env():\n job_file = [\n x for x in sys.argv if x.endswith('py') and not x.endswith('__main__.py')]\n spark_submit = True if len(job_file) > 0 else False\n return spark_submit", "def in_travis_pr():\n # NOTE: We're a little extra cautious and make sure that the\n # PR environment variable is an integer.\n try:\n int(os.getenv(TRAVIS_PR_ENV, ''))\n return True\n except ValueError:\n return False", "def test_is_production_env(self) -> None:\n os.environ.update({\"NC_MODE\": \"production\"})\n is_develop = is_development_env()\n self.assertFalse(is_develop)", "def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()", "def is_valid(project: Project) -> bool:\n return bool(compileall.compile_dir(project.root, quiet=1))", "def _cmake_needed():\n if \"NOX_INSTALL_CMAKE\" in os.environ:\n return True\n\n return shutil.which(\"cmake\") is None", "def is_to_be_built_or_is_installed(self, shutit_module_obj):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tif cfg[shutit_module_obj.module_id]['shutit.core.module.build']:\n\t\t\treturn True\n\t\treturn self.is_installed(shutit_module_obj)", "def is_running(self):\n data = self._poll()\n return data.get('building', False)" ]
[ "0.7263732", "0.6854827", "0.68405396", "0.67520434", "0.65567434", "0.6506505", "0.6453986", "0.6399513", "0.63529325", "0.6346068", "0.633142", "0.6297582", "0.629469", "0.6252214", "0.62071383", "0.6195085", "0.6168373", "0.61623913", "0.61273426", "0.607173", "0.60236305", "0.60074514", "0.5978897", "0.5962782", "0.5953746", "0.5940429", "0.59291285", "0.59032625", "0.5898392", "0.58638096" ]
0.7952058
0
Checks whether the code is running in a runtime environment.
def in_runtime(self): return self.is_valid_platform() and self['ENVIRONMENT']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_runtime_phase():\n return os.getenv('FAIRING_RUNTIME', None) != None", "def exists(_env):\n detector = DetectCompiler()\n if detector['icx'] is None:\n return False\n return True", "def is_debug_environment():\n return find_loader('cli') is None", "def check(self, runtime):\n return True", "def is_jvm_app(self):\r\n return False", "def running_under_virtualenv():\n return _running_under_venv() or _running_under_regular_virtualenv()", "def check_environment():\n if 'OS_USERNAME' not in os.environ:\n print \"Error gathering facts! Please ensure that the openstack\" +\\\n \" credentials of an admin user are set as environment\" + \\\n \" variables.\"\n sys.exit(-1)\n if not find_executable('nova'):\n return False\n if not find_executable('openstack'):\n return False\n if not find_executable('glance'):\n return False\n if not find_executable('cinder'):\n return False\n return True", "def isEnvironment(self, environment):\n # Deprecated!\n return hasattr(environment, 'execute')", "def on_appengine():\n runtime = os.environ.get('SERVER_SOFTWARE', '')\n return (runtime.startswith('Development/') or\n runtime.startswith('Google App Engine/'))", "def is_venv():\n return (hasattr(sys, 'real_prefix') or\n (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))", "def _running_under_venv():\n return sys.prefix != getattr(sys, \"base_prefix\", sys.prefix)", "def running_in_ipython() -> bool:\n try:\n from IPython import get_ipython\n\n return get_ipython() is not None\n except ImportError:\n return False", "def _global_development_mode() -> bool:\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n and \"__pypackages__\" not in __file__\n )", "def is_env_active():\n\n if sys.prefix == sys.base_prefix:\n print(\"Virtual environment is not active, exiting...\\n\")\n sys.exit(1)\n\n print(\"Virtual environment is active, proceeding...\\n\")", "def in_build(self):\n\n return self.is_valid_platform() and not self['ENVIRONMENT']", "def exists(_env):\n return True", "def exists(_env):\n return True", "def is_system(self) -> bool:", "def _running_under_regular_virtualenv():\n # pypa/virtualenv case\n return hasattr(sys, \"real_prefix\")", "def _venv_status(self):\n return (hasattr(sys, 'real_prefix') or \n (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def _is_running_in_notebook():\n\n # apparently get_ipython is lost when this gets called from a callback of\n # an ipython widget. See https://github.com/jupyter/jupyter/issues/299\n try:\n from IPython import get_ipython\n except ImportError:\n return False\n\n try:\n shell = get_ipython().__class__.__name__\n # ZMQInteractiveShell is the standard Jupyter Kernel\n # Interpreter is used by pyiodide\n if shell in [\"ZMQInteractiveShell\", \"Interpreter\"]:\n return True\n elif shell == \"TerminalInteractiveShell\":\n return False\n else:\n return False\n except NameError:\n return False", "def isSciServerComputeEnvironment():\n if os.path.isfile(KeystoneTokenPath):\n return True\n else:\n return False", "def check_env():\n logger.debug(\"Checking enviroment\")\n if os.getuid() != 0:\n exit_on_error(\"twindb-register-storage.py must be run by root\")\n logger.debug(\"Enviroment is OK\")\n return True", "def is_supported():\n return not isinstance(_the_app, StubApp)", "def is_running(program):\n return program in get_running()", "def is_devel(self):\r\n\r\n return self.is_debug()", "def _is_rr_present() -> bool:\n\n # this is ugly but I couldn't find a better way to do it\n # feel free to refactor it\n globals_list_literal_str = gdb.execute(\"python print(list(globals().keys()))\", to_string=True)\n interpreter_globals = ast.literal_eval(globals_list_literal_str)\n\n return \"RRCmd\" in interpreter_globals and \"RRWhere\" in interpreter_globals", "def test_if_ipython():\n try:\n return __IPYTHON__\n except NameError:\n return False", "def check_env():\n job_file = [\n x for x in sys.argv if x.endswith('py') and not x.endswith('__main__.py')]\n spark_submit = True if len(job_file) > 0 else False\n return spark_submit" ]
[ "0.7346108", "0.68984044", "0.687593", "0.6638651", "0.6591511", "0.6528236", "0.6488509", "0.64428127", "0.6350121", "0.62396896", "0.62145877", "0.6193956", "0.6167281", "0.61381817", "0.61291146", "0.6126704", "0.6126704", "0.61229163", "0.6114886", "0.6100531", "0.6083196", "0.60711735", "0.6071082", "0.6055976", "0.6038058", "0.5998883", "0.5994052", "0.5991814", "0.59884286", "0.5976747" ]
0.7856098
0
Return the routes definition.
def routes(self): if self.in_build(): raise BuildTimeVariableAccessException( 'Routes are not available during the build phase.' ) if not self._routesDef: raise NotValidPlatformException( 'No routes are defined. Are you sure you are running on Platform.sh?' ) return self._routesDef
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRoutes(self):\n pass", "def routes(self):\n return self._routes", "def get_routes():\n output = [f'{\"S. No.\":6}\\t{\"Endpoint\":50}\\t{\"Method\":8}\\n']\n\n for index, rule in enumerate(app.url_map.iter_rules()):\n for i, method in enumerate(rule.methods):\n output.append(f'{index + 1 if i == 0 else \"\":<6}\\t{rule.rule:50}\\t{method:10}')\n\n try:\n output.append(f'\\n{eval(rule.endpoint).__doc__}\\n')\n except NameError:\n output.append('\\n')\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes_info():\n routes = []\n for rule in app.url_map.iter_rules():\n try:\n if rule.endpoint != 'static':\n if hasattr(app.view_functions[rule.endpoint], 'import_name'):\n import_name = app.view_functions[rule.endpoint].import_name\n obj = import_string(import_name)\n routes.append({rule.rule: \"%s\\n%s\" % (\",\".join(list(rule.methods)), obj.__doc__)})\n else:\n routes.append({rule.rule: app.view_functions[rule.endpoint].__doc__})\n except Exception as exc:\n routes.append({rule.rule: \n \"(%s) INVALID ROUTE DEFINITION!!!\" % rule.endpoint})\n route_info = \"%s => %s\" % (rule.rule, rule.endpoint)\n app.logger.error(\"Invalid route: %s\" % route_info, exc_info=True)\n # func_list[rule.rule] = obj.__doc__\n\n return jsonify(code=200, data=routes)", "def routes(self) -> pulumi.Output[Sequence['outputs.RouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def available_routes():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"/api/v1.0/start/end\"\r\n )", "def routes(self) -> dict:\n return dict(self._routes)", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> pulumi.Output[Sequence['outputs.VirtualHubRouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def static_routes(self):\n return self._static_routes", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def show_routes(self):\n routelist= [(handler.regex.pattern, handler.handler_class) for handler in self.handlers[0][1]]\n print(55*\"-\")\n print(\" Routing table (order matters) :\")\n print(55*\"-\")\n for elem in routelist:\n print('{0:<20} {1:<30} '.format(elem[0], str(elem[1])))", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover", "def get_routes():\n\n return Db().get_line_ids()", "def add_routes(self):\n pass", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def list_route(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_route\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/routes'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1RouteList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def routes_available():\n return json.dumps(\n [\"%s\" % rule for rule in app.url_map.iter_rules()],\n indent=4,\n separators=(\",\", \": \"),\n )", "def getRoutes(request):\n routes = {\n 'Item list': '/api/v1/items/',\n 'Item details': '/api/v1/item/<int:pk>/',\n\n 'JWT': '/api/v1/users/login/',\n }\n\n return Response(routes)", "def _get_route_map(self):\n return self.__route_map", "def build_routes_file(routes, name):\n top = dict()\n top[\"file-type\"] = \"routes\"\n top[\"name\"] = name\n top[\"routes\"] = routes\n return top", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)", "def routes(self, body):\n pass" ]
[ "0.7470069", "0.71484", "0.70910865", "0.70422316", "0.7021956", "0.6939264", "0.69363356", "0.69363356", "0.6816645", "0.6773322", "0.6649526", "0.6649526", "0.65506256", "0.64129853", "0.634352", "0.6330275", "0.6308447", "0.6307612", "0.6283916", "0.6283713", "0.62551844", "0.62408155", "0.6182533", "0.61742663", "0.6157267", "0.61464685", "0.6140919", "0.6136725", "0.61272967", "0.6117135" ]
0.7488494
0
Returns the primary route. The primary route is the one marked primary in `routes.yaml`, or else the first nondirect route in that file if none are marked.
def get_primary_route(self): for (url, route) in self.routes().items(): if route["primary"]: return route raise KeyError("No primary route found. This isn't supposed to happen.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primary(self) -> Optional['outputs.GetTrafficPolicyDocumentRulePrimaryResult']:\n return pulumi.get(self, \"primary\")", "def route(self, route):\n return self._routes.get(route, None)", "def get_route(self, endpoint: str):\n for route in self.tree_routes:\n if route.get_endpoint_name() == endpoint:\n return route\n\n return None", "def primary_step(self) -> Optional[pulumi.Input['PrimaryStepArgs']]:\n return pulumi.get(self, \"primary_step\")", "def primary_step(self) -> 'outputs.PrimaryStepResponse':\n return pulumi.get(self, \"primary_step\")", "def _get_default_route(self):\n\t\twith open(\"/proc/net/route\") as fh:\n\t\t\tfor line in fh:\n\t\t\t\tfields = line.strip().split()\n\t\t\t\tif fields[1] != '00000000' or not int(fields[3], 16) & 2:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn socket.inet_ntoa(struct.pack(\"=L\", int(fields[2], 16)))\n\n\t\t\treturn \"1.2.3.4\"", "def primary(self):\n primary_k = self.__class__.get_primary()\n return getattr(self, primary_k)", "def route(self) -> Optional[RoadMap.Route]:\n return self._route", "def primary(self) -> str:\n return pulumi.get(self, \"primary\")", "def first_segment(self):\n\t\tseg_sort = sorted(self.segments, key=lambda x: stringutil.extract_numbers(x.filename))\n\t\tif seg_sort:\n\t\t\treturn seg_sort[0]\n\t\telse:\n\t\t\treturn None", "def _find_route(self, route_path_or_name):\n for route in self.router.routes:\n if route.path == route_path_or_name or route.name == route_path_or_name:\n return route", "def primary(self) -> 'outputs.CSIPowerMaxRevProxySpecConfigLinkConfigPrimary':\n return pulumi.get(self, \"primary\")", "def firstPath(self, toNative=True):\n return self.paths(toNative=toNative)[0]", "def primary_step_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_step_id\")", "def first_path(self):\n\t\treturn self.args[1]", "def shortest_route(self, with_return=False):\n return min(self.__routes(with_return))", "def _get_lsp_config_path_select_primary(self):\n return self.__lsp_config_path_select_primary", "def route_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"route_key\")", "def get_primary(cls):\n if not hasattr(cls, '_primary'):\n for field in cls.fields():\n attr = object.__getattribute__(cls, field)\n if type(attr) is PrimaryField:\n cls._primary = field\n return field\n raise Exception('No PrimaryField set!')\n return cls._primary", "def get_primary_navigation(self):\n nav_data = self.get_navigation_data()\n return nav_data.get('primary_navigation', [])", "def get_route(self, srcif, daddr):\n routes = self.lookup_routes(daddr)\n # Rules go here\n if routes:\n routes = self.longest_prefix_match(routes)\n # 1. Highest Preference\n routes = self.get_highest_preference(routes)\n # 2. Self Origin\n routes = self.get_self_origin(routes)\n # 3. Shortest ASPath\n routes = self.get_shortest_as_path(routes)\n # 4. IGP > EGP > UNK\n routes = self.get_origin_routes(routes)\n # 5. Lowest IP Address\n routes = self.get_lowest_ip(routes)\n # Final check: enforce peering relationships\n routes = self.filter_relationships(srcif, routes)\n return routes[0] if len(routes) > 0 else None", "def get_route_by_id(self, route_id):\n route = self.admin_repository.get_route_by_id(route_id)\n if route:\n print('''Route Id: {}\\nRoute: {}\\n\n '''.format(route[0], route[1]))\n return route\n else:\n print(\"Invalid Route Id\")\n return False", "def get_shortest_route(routes):\n route = sorted(routes, key=lambda dist: dist[2]).pop(0)\n return route", "def primary_step_id(self) -> str:\n return pulumi.get(self, \"primary_step_id\")", "def get_primary(genesis_file: str,\n wallet_name: str = DEFAULT_CHAOS_WALLET_NAME,\n wallet_key: str = DEFAULT_CHAOS_WALLET_KEY,\n pool: str = DEFAULT_CHAOS_POOL,\n timeout: Union[str,int] = DEFAULT_CHAOS_LEDGER_TRANSACTION_TIMEOUT,\n ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE,\n compile_stats: bool = True) -> str:\n primary = None\n if compile_stats:\n detect_primary(genesis_file,\n wallet_name=wallet_name,\n wallet_key=wallet_key,\n pool=pool,\n timeout=timeout,\n ssh_config_file=ssh_config_file)\n\n output_dir = get_chaos_temp_dir()\n with open(\"{}/primaries\".format(output_dir), 'r') as primaries:\n primary_dict = json.load(primaries)\n primary = primary_dict.get(\"current_primary\", None)\n\n return primary", "def primary_preprocessor(self):\n preprocessor = None\n for preprocessor in self.preprocessors:\n if getattr(preprocessor, '_is_primary', False):\n return preprocessor\n return preprocessor", "def get_route(self, route_id):\n\n if not self._routesDef:\n raise NotValidPlatformException(\n 'No routes are defined. Are you sure you are running on Platform.sh?'\n )\n\n for (url, route) in self.routes().items():\n if route['id'] == route_id:\n route['url'] = url\n return route\n raise KeyError('No such route id found: {}'.format(route_id))", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def get_primary_address(self):\n return self._primary_address" ]
[ "0.6220994", "0.6125105", "0.61188316", "0.59157336", "0.5758306", "0.5748899", "0.5729264", "0.5714864", "0.57052", "0.56843084", "0.56525487", "0.5616904", "0.5484802", "0.54736876", "0.54260397", "0.54205555", "0.53271264", "0.53102225", "0.5299336", "0.5258867", "0.5228813", "0.5144246", "0.5140805", "0.5107456", "0.510605", "0.5100801", "0.5073383", "0.5066135", "0.5066135", "0.5064698" ]
0.83614963
0
Returns just those routes that point to a valid upstream. The method is similar to routes(), but filters out redirect routes that are rarely useful for app configuration. If desired it can also filter to just those routes whose upstream is a given application name. To retrieve routes that point to the
def get_upstream_routes(self, app_name=None): if app_name: # On Dedicated, the upstream name sometimes is `app:http` instead of just `app`. # If no name is specified then don't bother checking. return { url: route for url, route in self.routes().items() if route["type"] == "upstream" and app_name == route["upstream"].split(":")[0] } else: return { url: route for url, route in self.routes().items() if route["type"] == "upstream" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def web_app_routing(self) -> Optional[pulumi.Input['ManagedClusterIngressProfileWebAppRoutingArgs']]:\n return pulumi.get(self, \"web_app_routing\")", "def get_self_origin(self, routes):\n outroutes = []\n all_non_self = True\n for route in routes:\n if route[SORG]:\n outroutes.append(route)\n all_non_self = False\n if all_non_self:\n return routes\n return outroutes", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def generate_app_routes(conf: T.Dict[T.Text, T.Any]) -> T.List[web.RouteDef]:\n app_routes = [\n web.get(\"/api/verify\", rest.verify),\n web.get(\"/api/interpolate\", rest.interpolate),\n ]\n if conf.get(\"graphiql\"):\n app_routes.append(graphql.get_view(graphiql=True))\n else:\n app_routes.append(graphql.get_view(graphiql=False))\n return app_routes", "def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers", "def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes", "def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def getRoutes(self):\n pass", "def get_self_origin(self, routes):\n outroutes = []\n # find all routes with self originating routes\n for r in routes:\n if r[MESG][SORG] == True:\n outroutes.append(r)\n \n # need to return default routes if we have 0 self originating routes\n if outroutes:\n return outroutes\n else:\n return routes", "def static_routes(self):\n return self._static_routes", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]]:\n return pulumi.get(self, \"routes\")", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def get_self_origin(self, routes):\n outroutes = []\n if len(routes) == 0:\n return routes\n\n for route in routes:\n if route[\"msg\"][\"selfOrigin\"]:\n outroutes.append(route)\n \n if len(outroutes) == 0:\n outroutes = routes\n \n return outroutes", "def _get_app_endpoints():\n endpoints = {\n (r'/', handlers.HeartbeatRequestHandler),\n (r'/1/issue/retrieve', handlers.RetrieveRequestHandler),\n (r'/1/issue/search', handlers.SearchRequestHandler),\n (r'/1/issue/search/setup', handlers.SearchSetupRequestHandler),\n }\n\n log(\"Endpoint to handler mappings:\")\n for url, handler in sorted(endpoints, key=lambda ep: ep[0]):\n log(\"{0} ---> {1}\".format(url, handler))\n\n return endpoints", "def get_self_origin(self, routes):\n # TODO\n outroutes = {}\n\n for ip in routes.keys():\n if routes[ip][SORG]:\n outroutes[ip] = routes[ip]\n\n return outroutes", "def routes(self):\n return self._routes", "def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))", "def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)", "def get_origin_routes(self, routes):\n outroutes = []\n current_best = \"UNK\"\n # iterate through routes in given list updating the current best if a better\n # option is discovered\n for route in routes:\n if route[ORIG] == current_best:\n outroutes.append(route)\n elif (route[ORIG] == \"EGP\" and current_best != \"IGP\") or route[ORIG] == \"IGP\":\n # if the current best is worse than EGP and the current is EGP,\n # update best and start a new list\n # if the current best is worse than IGP and the current is IGP,\n # update best and start a new list\n current_best = route[ORIG]\n outroutes = [route]\n\n return outroutes", "def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)", "def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList", "def filter_relationships(self, recRelation, routes, src, is_forward=False, is_update=False):\n dict_entry = \"src\" if is_update else \"peer\"\n outroutes = []\n if recRelation == CUST:\n if is_forward:\n return routes\n\n for val in routes:\n if val[dict_entry] != src: \n outroutes.append(val) \n\n return outroutes\n for val in routes: \n ip = val[dict_entry]\n relation = self.relations[ip] \n if relation == CUST: \n outroutes.append(val)\n \n return outroutes", "def routes_available():\n return json.dumps(\n [\"%s\" % rule for rule in app.url_map.iter_rules()],\n indent=4,\n separators=(\",\", \": \"),\n )", "def remote_route(self):\r\n proxy = self.environ.get('HTTP_X_FORWARDED_FOR')\r\n if proxy: return [ip.strip() for ip in proxy.split(',')]\r\n remote = self.environ.get('REMOTE_ADDR')\r\n return [remote] if remote else []", "def filter_relationships(self, srcif, routes):\n def filt(route):\n dst = route[DEST][:-1]\n dst += '2'\n return self.relations[srcif] == CUST or self.relations[dst] == CUST\n\n outroutes = routes.copy()\n outroutes = list(filter(filt, outroutes))\n return outroutes", "def routes(self) -> pulumi.Output[Sequence['outputs.VirtualHubRouteTableRoute']]:\n return pulumi.get(self, \"routes\")" ]
[ "0.55596536", "0.54866225", "0.54663724", "0.53465295", "0.53465295", "0.52618855", "0.5247886", "0.5211618", "0.51734406", "0.5159288", "0.51349944", "0.5096475", "0.5084896", "0.5080969", "0.5080969", "0.5061923", "0.50489724", "0.5036067", "0.50321364", "0.4956283", "0.49212748", "0.49109864", "0.4877113", "0.48539135", "0.48476523", "0.4822002", "0.4758339", "0.47473308", "0.4728487", "0.47274795" ]
0.7827612
0
Get route definition by route ID.
def get_route(self, route_id): if not self._routesDef: raise NotValidPlatformException( 'No routes are defined. Are you sure you are running on Platform.sh?' ) for (url, route) in self.routes().items(): if route['id'] == route_id: route['url'] = url return route raise KeyError('No such route id found: {}'.format(route_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_route_by_id(self, route_id):\n route = self.admin_repository.get_route_by_id(route_id)\n if route:\n print('''Route Id: {}\\nRoute: {}\\n\n '''.format(route[0], route[1]))\n return route\n else:\n print(\"Invalid Route Id\")\n return False", "def get_cab_route_by_id(self, id):\n cab_route = self.admin_repository.get_cab_route_by_id(id)\n if cab_route:\n print(\"Cab Number : {}\".format(cab_route[1]))\n print(\"Route Id : {}\".format(cab_route[2]))\n print(\"Stop Name : {}\".format(cab_route[3]))\n print(\"Stop stage : {}\".format(cab_route[4]))\n print(\"Timings : {}\".format(cab_route[5]))\n return cab_route\n else:\n print(\"Invalid Input\")\n return False", "def route(self, route):\n return self._routes.get(route, None)", "def get_route(self, endpoint: str):\n for route in self.tree_routes:\n if route.get_endpoint_name() == endpoint:\n return route\n\n return None", "def getGTFSRouteData(route_id):\n cur = get_cursor();\n SQLExec(cur,\"select * from gtf_routes where route_id=%(id)s\",{'id':str(route_id)});\n ret = cur.next();\n cur.close();\n return ret;", "def get_route_id(self):\n\n return self.route_id", "def func_def_node_from_id(self, id: int):\n assert(self.func_def_exists(id))\n return self.func_id_to_def[id]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteTableRouteArgs']]]]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'RouteTable':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RouteTableState.__new__(_RouteTableState)\n\n __props__.__dict__[\"disable_bgp_route_propagation\"] = disable_bgp_route_propagation\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"routes\"] = routes\n __props__.__dict__[\"subnets\"] = subnets\n __props__.__dict__[\"tags\"] = tags\n return RouteTable(resource_name, opts=opts, __props__=__props__)", "def get_by_id(\n self,\n id, # type: str\n event_routes_get_by_id_options=None, # type: Optional[\"_models.EventRoutesGetByIdOptions\"]\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.DigitalTwinsEventRoute\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.DigitalTwinsEventRoute\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n\n api_version = kwargs.pop('api_version', \"2022-05-31\") # type: str\n\n _traceparent = None\n _tracestate = None\n if event_routes_get_by_id_options is not None:\n _traceparent = event_routes_get_by_id_options.traceparent\n _tracestate = event_routes_get_by_id_options.tracestate\n\n request = build_get_by_id_request(\n id=id,\n api_version=api_version,\n traceparent=_traceparent,\n tracestate=_tracestate,\n template_url=self.get_by_id.metadata['url'],\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access\n request,\n stream=False,\n **kwargs\n )\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n deserialized = self._deserialize('DigitalTwinsEventRoute', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_restaurant(id):\r\n with current_app.app_context():\r\n if current_app.config[\"USE_MOCKS\"]:\r\n id -= 1 # restaurant IDs starting by 1\r\n if 0 <= id < len(restaurants):\r\n return restaurants[id]\r\n else:\r\n return None\r\n else:\r\n return get_from(current_app.config[\"REST_SERVICE_URL\"]+\"/restaurants/\"+str(id))", "def get_route(self, srcif, daddr):\n routes = self.lookup_routes(daddr)\n # Rules go here\n if routes:\n routes = self.longest_prefix_match(routes)\n # 1. Highest Preference\n routes = self.get_highest_preference(routes)\n # 2. Self Origin\n routes = self.get_self_origin(routes)\n # 3. Shortest ASPath\n routes = self.get_shortest_as_path(routes)\n # 4. IGP > EGP > UNK\n routes = self.get_origin_routes(routes)\n # 5. Lowest IP Address\n routes = self.get_lowest_ip(routes)\n # Final check: enforce peering relationships\n routes = self.filter_relationships(srcif, routes)\n return routes[0] if len(routes) > 0 else None", "def route(self) -> Optional[RoadMap.Route]:\n return self._route", "def test_get_route_by_id(self):\n\n post = {\n 'ip': 'test_ip',\n 'next_hop': 'test_nexthop',\n 'communities': 'test_commu'\n }\n route_id = self.database.add_route(post)\n post2 = self.database.get_route_by_id({'_id': route_id})\n self.database.delete_route({'_id': route_id})\n self.assertEqual(post2['ip'], post['ip'], 'insertion failed')\n self.assertEqual(post2['next_hop'], post['next_hop'],\n 'insertion failed')\n self.assertEqual(post2['communities'], post['communities'],\n 'insertion failed')", "def get_route(start_id, dest_id, csv):\n\n # route is not computed yet\n if csv[start_id][dest_id] is None:\n return -1\n\n route = [start_id]\n\n while route[-1] != dest_id:\n # append the next node on this route\n route.append(csv[route[-1]][dest_id])\n\n return route", "def GetRoom(self, id):\n try:\n return self._rooms[id]\n except:\n return None", "def find(self, route):\n curr = self.root\n for part in route:\n if part not in curr.children:\n return None\n curr = curr.children[part]\n return curr.handler", "def get_route_with_scariness_from_db(route_name):\n connection = administer_route_database.get_route_db_connection()\n route = administer_route_database.get_route_from_db(connection, route_name)\n return route", "def print_stops_for_route(route_id: str) -> None:\n mbta = MBTA(config.CT_MBTA_API_KEY)\n try:\n stops = mbta.get_stops_for_route(route_id)\n except MBTAEmptyResult:\n print(f\"Route '{route_id}' returned no results\")\n return\n title_text = f\"Stops for '{route_id}'\"\n print(f\"{title_text:=^80}\")\n for stop in stops:\n print(f\"ID: {stop['id']}, NAME: {stop['attributes']['name']}\")\n return", "def get(resource_name, id, opts=None, destination_cidr_block=None, destination_ipv6_cidr_block=None, destination_prefix_list_id=None, egress_only_gateway_id=None, gateway_id=None, instance_id=None, instance_owner_id=None, nat_gateway_id=None, network_interface_id=None, origin=None, route_table_id=None, state=None, transit_gateway_id=None, vpc_peering_connection_id=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"destination_cidr_block\"] = destination_cidr_block\n __props__[\"destination_ipv6_cidr_block\"] = destination_ipv6_cidr_block\n __props__[\"destination_prefix_list_id\"] = destination_prefix_list_id\n __props__[\"egress_only_gateway_id\"] = egress_only_gateway_id\n __props__[\"gateway_id\"] = gateway_id\n __props__[\"instance_id\"] = instance_id\n __props__[\"instance_owner_id\"] = instance_owner_id\n __props__[\"nat_gateway_id\"] = nat_gateway_id\n __props__[\"network_interface_id\"] = network_interface_id\n __props__[\"origin\"] = origin\n __props__[\"route_table_id\"] = route_table_id\n __props__[\"state\"] = state\n __props__[\"transit_gateway_id\"] = transit_gateway_id\n __props__[\"vpc_peering_connection_id\"] = vpc_peering_connection_id\n return Route(resource_name, opts=opts, __props__=__props__)", "def get_room_by_id(self, id):\n if not isinstance(id, int):\n id = int(id)\n if self.rooms.has_key(id):\n return self.rooms[id]\n raise RuntimeError, \"Room not known\"", "def get_primary_route(self):\n for (url, route) in self.routes().items():\n if route[\"primary\"]:\n return route\n raise KeyError(\"No primary route found. This isn't supposed to happen.\")", "def _route_call(aircraft_id):\n url = construct_endpoint_url(endpoint)\n resp = requests.get(url, params={config_param(\"query_aircraft_id\"): aircraft_id})\n if resp.status_code == 200:\n return json.loads(resp.text)\n elif response.status == config_param(\"status_code_aircraft_has_no_route\"):\n return {config_param(\"query_aircraft_id\"): aircraft_id}\n else:\n raise requests.HTTPError(resp.text)", "def get_room(self, name=None, id=None):\n \n if(name):\n return self.rooms[name] if name in self.rooms else None\n if(id):\n return next((v for (k,v) in self.rooms.items() if v.id == id), None)\n return None", "def get(self,\n provider_id,\n route_id,\n ):\n return self._invoke('get',\n {\n 'provider_id': provider_id,\n 'route_id': route_id,\n })", "def rt_lookup(session, vpc_id, rt_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_route_tables(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [rt_name]}])\n\n if len(response['RouteTables']) == 0:\n return None\n else:\n return response['RouteTables'][0]['RouteTableId']", "def get_route(self, srcif, daddr):\n peer = None\n routes = self.lookup_routes(daddr)\n\n # Rules go here\n if routes:\n # 1. Highest Preference\n routes = self.get_highest_preference(routes)\n # 2. Self Origin\n routes = self.get_self_origin(routes)\n # 3. Shortest ASPath\n routes = self.get_shortest_as_path(routes)\n # 4. EGP > IGP > UNK\n routes = self.get_origin_routes(routes)\n # 5. Lowest IP Address\n routes = self.get_lowest_ip(routes)\n # Final check: enforce peering relationships\n routes = self.filter_relationships(srcif, routes)\n\n if len(routes) > 0 and routes[0][SRCE]:\n return self.sockets[routes[0][SRCE]]\n else:\n # No viable routes found\n return None", "def get(self, _id):\n if not self.root:\n raise RootNotSet\n node = self.id_map.get(_id)\n if not node:\n raise IDNotFound(_id)\n\n link = node.get('link')\n if link:\n link_node = self.id_map.get(_id)\n if not link_node:\n logger.error('link node not found!')\n raise IDNotFound(link_node)\n data = self.get(node['link'])\n data['link'] = data['id']\n data['id'] = link_node['id']\n return data\n\n if node.get('type') == 'group' or node.get('type') == None:\n return self._adapter._get_group(_id)\n elif node.get('type') == 'data':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'json':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'config':\n data = self._adapter._load_data(_id)\n data.pop('name', None)\n return data\n else:\n raise UnsupportedType", "def flowdetail_get(fd_id):\n return IMPL.flowdetail_get(fd_id)", "def getRoomById(self, id):\n for room in self.rooms:\n if room.id == id:\n return room\n\n return None", "def get_resolver_rule(ResolverRuleId=None):\n pass" ]
[ "0.7411168", "0.65180796", "0.6465488", "0.5850693", "0.5827747", "0.5811879", "0.5733956", "0.5733195", "0.5618994", "0.55915236", "0.5536313", "0.5516131", "0.5510666", "0.5475697", "0.5438743", "0.54354966", "0.5373029", "0.53698295", "0.5353349", "0.53527266", "0.53224456", "0.5310577", "0.53012085", "0.529009", "0.52755356", "0.5264229", "0.52591723", "0.5235681", "0.5229877", "0.51843584" ]
0.73067266
1
Returns the application definition dict. This is, approximately, the .platform.app.yaml file as a nested dict. However, it also has other information added by Platform.sh as part of the build and deploy process.
def application(self): if not self._applicationDef: raise NotValidPlatformException( 'No application definition is available. Are you sure you are running on Platform.sh?' ) return self._applicationDef
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_to_app_environment_infos(self, app):\n app['environment_infos'] = {}\n app['environment_infos']['security_groups'] = []\n for form_security_group in self.environment_infos.form.security_groups:\n if form_security_group.data:\n security_group = form_security_group.data\n app['environment_infos']['security_groups'].append(security_group)\n\n app['environment_infos']['subnet_ids'] = []\n for form_subnet_id in self.environment_infos.form.subnet_ids:\n if form_subnet_id.data:\n subnet_id = form_subnet_id.data\n app['environment_infos']['subnet_ids'].append(subnet_id)\n\n app['environment_infos']['instance_profile'] = self.environment_infos.form.instance_profile.data\n app['environment_infos']['key_name'] = self.environment_infos.form.key_name.data\n app['environment_infos']['public_ip_address'] = self.environment_infos.form.public_ip_address.data\n\n app['environment_infos']['root_block_device'] = {}\n if self.environment_infos.form.root_block_device_size.data:\n app['environment_infos']['root_block_device'][\n 'size'] = self.environment_infos.form.root_block_device_size.data\n else:\n # default value to prevent low disk space alerts\n block_min_size = ghost_app_schema['environment_infos']['schema']['root_block_device']['schema']['size']['min']\n app['environment_infos']['root_block_device']['size'] = block_min_size\n\n root_block_name = self.environment_infos.form.root_block_device_name.data\n app['environment_infos']['root_block_device']['name'] = root_block_name or ''\n\n app['environment_infos']['optional_volumes'] = []\n for form_opt_vol in self.environment_infos.form.optional_volumes:\n opt_vol = {}\n if form_opt_vol.device_name.data:\n opt_vol['device_name'] = form_opt_vol.device_name.data\n if form_opt_vol.volume_type.data:\n opt_vol['volume_type'] = form_opt_vol.volume_type.data\n if form_opt_vol.volume_size.data:\n opt_vol['volume_size'] = form_opt_vol.volume_size.data\n if form_opt_vol.iops.data:\n opt_vol['iops'] = form_opt_vol.iops.data\n if form_opt_vol.launch_block_device_mappings.data:\n opt_vol['launch_block_device_mappings'] = form_opt_vol.launch_block_device_mappings.data\n app['environment_infos']['optional_volumes'].append(opt_vol)\n\n app['environment_infos']['instance_tags'] = []\n for form_tag in self.environment_infos.form.instance_tags:\n tag = {}\n if form_tag.tag_name.data:\n tag['tag_name'] = form_tag.tag_name.data\n tag['tag_value'] = form_tag.tag_value.data\n app['environment_infos']['instance_tags'].append(tag)", "def map_to_app(self, app):\n app['build_infos'] = {}\n app['build_infos']['ssh_username'] = self.ssh_username.data\n app['build_infos']['source_ami'] = self.source_ami.data\n app['build_infos']['source_container_image'] = self.container.data\n app['build_infos']['subnet_id'] = self.subnet_id.data", "def getSetupDict( script, app_name, app_version ):\r\n \r\n CleanDir( 'build' )\r\n CleanDir( app_name )\r\n \r\n setupDict = {}\r\n\r\n manifestRes = manifest_template % dict(prog=app_name)\r\n \r\n wd = {}\r\n wd['script'] = script\r\n #wd['icon_resources'] = [(1, iconFile)]\r\n wd['other_resources'] = [(RT_MANIFEST, 1, manifestRes)]\r\n wd['description'] = \"%s application\" % app_name\r\n wd['dest_base'] = app_name\r\n wd['version'] = app_version\r\n wd['company_name'] = \"Ginstrom IT Solutions (GITS)\"\r\n wd['copyright'] = \"(C) 2006 Ginstrom IT Solutions (GITS)\"\r\n wd['name'] = \"%s Application v %s\" % (app_name,app_version)\r\n \r\n setupDict['windows'] = [wd]\r\n setupDict['zipfile'] = None\r\n setupDict['data_files'] = [(\".\", glob.glob(\"./*.txt\") + glob.glob( \"./*.db\" ) )]\r\n\r\n excludes = [\"pywin\", \"pywin.debugger\", \"pywin.debugger.dbgcon\",\r\n \"pywin.dialogs\", \"pywin.dialogs.list\", \"win32com.server\"]\r\n \r\n options = {\"optimize\":2,\r\n \"dist_dir\":app_name,\r\n \"excludes\":excludes}\r\n \r\n setupDict['options'] = {\"py2exe\":options}\r\n \r\n return setupDict", "def get_default_app_config():\n # 1. Set function variables\n file_location = str(APP_DIR) + '/application/static/app_config.yaml'\n\n # 2. Read the YAML file\n yaml_dict = read_yaml(file_location)\n\n # 3. Return a dictionary of the contents\n return yaml_dict", "def app_metadata(requests_mock):\n return {\n \"description\": \"A test app that does not really exist.\",\n \"title\": \"Test App\",\n \"version\": \"1.0.0\",\n \"authors\": \"AiiDAlab team\",\n \"state\": \"development\",\n }", "def get_application_configurations(self, application):\n # 1. Get the path of the app_config directory\n app_conf_dir = self.sys_conf['configs']['env'][self.env]['app_config_url']\n\n # 2. Get the path for the given application configuration file\n app_conf_dir += '/{file}.yaml'.format(file=application)\n\n # 3. Read the YAML file\n yaml_dict = read_yaml(app_conf_dir)\n\n # 4. Return the YAML file\n return yaml_dict", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def map_to_app(self, app):\n if self.name:\n app['name'] = self.name.data\n if self.env:\n app['env'] = self.env.data\n if self.role:\n app['role'] = self.role.data\n if self.description:\n app['description'] = self.description.data\n # app['provider'] = self.provider.data\n if self.assumed_account_id:\n app['assumed_account_id'] = self.assumed_account_id.data\n if self.assumed_role_name:\n app['assumed_role_name'] = self.assumed_role_name.data\n if self.assumed_region_name:\n app['assumed_region_name'] = self.assumed_region_name.data\n app['region'] = self.region.data\n app['instance_type'] = self.instance_type.data\n app['instance_monitoring'] = self.instance_monitoring.data\n app['vpc_id'] = self.vpc_id.data\n\n self.map_to_app_log_notifications(app)\n self.map_to_app_blue_green(app)\n self.map_to_app_autoscale(app)\n self.map_to_app_safedeployment(app)\n self.map_to_app_build_infos(app)\n self.map_to_app_resources(app)\n self.map_to_app_environment_infos(app)\n self.map_to_app_env_vars(app)\n self.map_to_app_lifecycle_hooks(app)\n self.map_to_app_features(app)\n self.map_to_app_modules(app)", "def serialize(self):\n return {\n 'app_id': self.id,\n 'name': self.name,\n 'app_info': [item.serialize for item in self.appinfo.all()]\n }", "def map_to_app(self, app):\n app['safe-deployment'] = {}\n app['safe-deployment']['load_balancer_type'] = self.lb_type.data\n app['safe-deployment']['wait_before_deploy'] = self.safe_deploy_wait_before.data\n app['safe-deployment']['wait_after_deploy'] = self.safe_deploy_wait_after.data\n if self.lb_type.data == \"haproxy\":\n app['safe-deployment']['app_tag_value'] = self.haproxy_app_tag.data.strip()\n app['safe-deployment']['ha_backend'] = self.haproxy_backend.data.strip()\n app['safe-deployment']['api_port'] = self.haproxy_api_port.data", "def _addAppYaml(self):\n if self.wc.exists(self._branchPath('app/app.yaml')):\n raise ObstructionError('app/app.yaml exists already')\n\n yaml_path = self._branchPath('app/app.yaml')\n self.wc.copy(yaml_path + '.template', yaml_path)\n\n yaml = io.fileToLines(self.wc.path(yaml_path))\n out = []\n for i, line in enumerate(yaml):\n stripped_line = line.strip()\n if 'TODO' in stripped_line:\n continue\n elif stripped_line == '# application: FIXME':\n out.append('application: socghop')\n elif stripped_line.startswith('version:'):\n out.append(line.lstrip() + 'g0')\n out.append('# * initial Google fork of Melange ' + self.branch)\n else:\n out.append(line)\n io.linesToFile(self.wc.path(yaml_path), out)\n\n self.wc.commit('Create app.yaml with Google patch version g0 '\n 'in branch ' + self.branch)", "def get_app(self, app_name, ns_name):\n\n status, _ = self.helm_client.status(app_name, namespace=ns_name)\n values, _ = self.helm_client.get_values(app_name, namespace=ns_name)\n release_data = {\"status\": status, \"values\": values}\n\n schema_path = Path(\"%s/%s/values.schema.json\"\n % (self._get_ns_dir(ns_name), app_name))\n if schema_path.exists():\n schema = json.loads(schema_path.read_text())\n release_data[\"schema\"] = schema\n\n return release_data", "def load_appdata():\n try:\n with FNAME.open() as _in:\n appdata = json.load(_in)\n except FileNotFoundError:\n return None\n return appdata", "def render_application_template(self):\n self.pipeline_config['instance_links'] = self.retrieve_instance_links()\n jsondata = get_template(\n template_file='infrastructure/app_data.json.j2', appinfo=self.appinfo, pipeline_config=self.pipeline_config)\n return jsondata", "def generate_manifest_dict(self):\n\n annotations = dict()\n\n for build_project in self.projects.get('build', []):\n for annotation in build_project.get('annotation', []):\n annotations[annotation['name']] = annotation['value']\n\n product = annotations.get('PRODUCT', 'unknown')\n version = annotations.get('VERSION', 'unknown')\n bld_num = annotations.get('BLD_NUM', '9999')\n manifest_name = '{}-{}-{}'.format(product, version, bld_num)\n\n return {\n manifest_name: {\n 'remotes': self.remotes,\n 'defaults': self.defaults,\n 'projects': self.projects\n }\n }", "def app_registry_data(apps_yaml, categories_yaml):\n return app_registry.AppRegistryData(apps=apps_yaml, categories=categories_yaml)", "def get(self, **kwargs):\r\n # groups = kwargs.get('groups')\r\n return {\r\n 'app_fullname': main_config.app_name,\r\n 'app_name': main_config.package_name,\r\n 'app_version': main_config.app_version\r\n }", "def _get_app_info(self):\n info_plist = None\n\n for data in self.filelist:\n if re.match(self.info_plist_regex, data.filename):\n info_plist = data\n\n if not info_plist:\n self._raise_ipa_error()\n\n info_plist = self.read(info_plist)\n self.app_info = readPlistFromString(info_plist)\n\n return self.app_info", "def applicationsdetails():\n appdicts = db.hgetall('applications')\n finaldict = OrderedDict()\n for appname in sorted(appdicts):\n instances = json.loads(appdicts.get(appname))\n instance_map = OrderedDict()\n for key in sorted(instances):\n instance_map.__setitem__(key,instances.get(key))\n finaldict.__setitem__(appname,instance_map)\n return render_template('robots.html', appdicts=finaldict)", "def get_app_info(repo_name, config, settings, container):\n\n container[\"name\"] = settings[\"repositories\"][repo_name][\"name\"]\n container[\"commands\"] = settings[\"repositories\"][repo_name].get(\"commands\", {}).copy()\n container[\"repo_path\"] = os.path.join(os.path.realpath(\".\"), config[\"code\"][\"directory\"], repo_name)\n container[\"repo_name\"] = repo_name\n container[\"attached\"] = False\n container[\"process\"] = PROCESS_NOEXIST\n\n return container", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def get_predefined_application(application=None):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/predefined/application/entry[@name='{}']\".format(application),\n }\n\n return __proxy__[\"panos.call\"](query)", "def create_application_info_file(config):\n DEBUG(\"Collect application info\")\n\n app_info = config.get(\"ApplicationInfo\", {})\n\n # Load existing config file or create empty\n if \"BaseConfigFile\" in app_info:\n with open_file(app_info.pop(\"BaseConfigFile\"), config) as hdlr:\n info = json_load(hdlr)\n\n else:\n info = dict(ID=gen_guid(), Name=\"Application\", Description=\"\",\n Owner=\"-\", Active=\"1\", Serverversion=\"\",\n ScriptingLanguage=\"python\", Icon=\"\")\n\n # Update values from config\n for key, value in app_info.items():\n info[key] = value\n\n # Generate new GUID if it isn't exiisting\n if not info.get(\"ID\", \"\"):\n info[\"ID\"] = gen_guid()\n\n # Write data to file\n path = os.path.join(config[\"target\"][\"path\"], constants.INFO_FILE)\n\n DEBUG(\"Writing application info to '%s'\", path)\n\n with fopen(path, \"wb\") as hdlr:\n json_dump(info, hdlr)\n\n INFO(\"Application info successfully written to '%s'\", path)", "def map_to_app_lifecycle_hooks(self, app):\n app['lifecycle_hooks'] = {}\n form_lifecycle_hooks = self.lifecycle_hooks\n if form_lifecycle_hooks.pre_buildimage.data:\n app['lifecycle_hooks']['pre_buildimage'] = b64encode_utf8(\n form_lifecycle_hooks.pre_buildimage.data.replace('\\r\\n', '\\n'))\n else:\n app['lifecycle_hooks']['pre_buildimage'] = ''\n\n if form_lifecycle_hooks.post_buildimage.data:\n app['lifecycle_hooks']['post_buildimage'] = b64encode_utf8(\n form_lifecycle_hooks.post_buildimage.data.replace('\\r\\n', '\\n'))\n else:\n app['lifecycle_hooks']['post_buildimage'] = ''\n\n if form_lifecycle_hooks.pre_bootstrap.data:\n app['lifecycle_hooks']['pre_bootstrap'] = b64encode_utf8(\n form_lifecycle_hooks.pre_bootstrap.data.replace('\\r\\n', '\\n'))\n else:\n app['lifecycle_hooks']['pre_bootstrap'] = ''\n\n if form_lifecycle_hooks.post_bootstrap.data:\n app['lifecycle_hooks']['post_bootstrap'] = b64encode_utf8(\n form_lifecycle_hooks.post_bootstrap.data.replace('\\r\\n', '\\n'))\n else:\n app['lifecycle_hooks']['post_bootstrap'] = ''", "def test_one_application(self):\n applications = {\n Application(\n name='mysql-hybridcluster',\n image=Application(\n name='mysql-hybridcluster',\n image=DockerImage(repository='flocker/mysql',\n tag='v1.0.0'))\n )\n }\n result = configuration_to_yaml(applications)\n expected = {\n 'applications':\n {'mysql-hybridcluster': {'image': 'unknown', 'ports': []}},\n 'version': 1\n }\n self.assertEqual(safe_load(result), expected)", "def test_dict_of_applications(self):\n config = dict(\n version=1,\n applications={\n 'mysql-hybridcluster': dict(\n image='flocker/mysql:v1.0.0',\n volume={'mountpoint': b'/var/mysql/data'}\n ),\n 'site-hybridcluster': {\n 'image': 'flocker/wordpress:v1.0.0',\n 'ports': [dict(internal=80, external=8080)],\n }\n }\n )\n parser = Configuration()\n applications = parser._applications_from_configuration(config)\n expected_applications = {\n 'mysql-hybridcluster': Application(\n name='mysql-hybridcluster',\n image=DockerImage(repository='flocker/mysql', tag='v1.0.0'),\n ports=frozenset(),\n volume=AttachedVolume(\n name='mysql-hybridcluster',\n mountpoint=FilePath(b'/var/mysql/data'))),\n 'site-hybridcluster': Application(\n name='site-hybridcluster',\n image=DockerImage(repository='flocker/wordpress',\n tag='v1.0.0'),\n ports=frozenset([Port(internal_port=80,\n external_port=8080)]))\n }\n self.assertEqual(expected_applications, applications)", "def map_from_app(self, app):\n build_infos = app.get('build_infos', {})\n self.ssh_username.data = build_infos.get('ssh_username', '')\n self.source_ami.data = build_infos.get('source_ami', '')\n self.container.data = build_infos.get('source_container_image', '')\n self.subnet_id.data = build_infos.get('subnet_id', '')", "def app_settings():\n return {\n 'app_wksp_path': os.path.join(App.get_app_workspace().path, ''),\n 'threddsdatadir': App.get_custom_setting(\"thredds_path\"),\n 'threddsurl': App.get_custom_setting(\"thredds_url\"),\n 'logfile': os.path.join(App.get_app_workspace().path, 'workflow.log')\n }", "def map_from_app(self, app):\n\n # Populate form with app data\n self.name.data = app.get('name', '')\n self.env.data = app.get('env', '')\n self.role.data = app.get('role', '')\n self.description.data = app.get('description', '')\n\n # self.provider.data = app.get('provider', DEFAULT_PROVIDER)\n self.assumed_account_id.data = app.get('assumed_account_id', '')\n self.assumed_role_name.data = app.get('assumed_role_name', '')\n self.assumed_region_name.data = app.get('assumed_region_name', '')\n\n self.region.data = app.get('region', '')\n self.instance_type.data = app.get('instance_type', '')\n self.instance_monitoring.data = app.get('instance_monitoring', False)\n self.vpc_id.data = app.get('vpc_id', '')\n\n self.map_from_app_notifications(app)\n self.map_from_app_bluegreen(app)\n self.map_from_app_autoscale(app)\n self.map_from_app_safedeployment(app)\n self.map_from_app_build_infos(app)\n self.map_from_app_environment_infos(app)\n self.map_from_app_env_vars(app)\n self.map_from_app_lifecycle_hooks(app)\n self.map_from_app_features(app)\n self.map_from_app_modules(app)\n\n # TODO: handle resources app data", "def swagger():\n return jsonify(current_app.spec.to_dict())" ]
[ "0.65810484", "0.65566224", "0.6500841", "0.6466886", "0.6352461", "0.63500255", "0.6347475", "0.626704", "0.59420925", "0.59143335", "0.59136295", "0.5895054", "0.5882706", "0.5813377", "0.5798433", "0.5792468", "0.57485384", "0.574315", "0.57396924", "0.5672397", "0.565449", "0.5649284", "0.5639342", "0.56387293", "0.56312543", "0.5627883", "0.56163305", "0.56000185", "0.5598892", "0.558131" ]
0.6894871
0
Adds a credential formatter to the configuration. A credential formatter is responsible for formatting the credentials for a relationship in a way expected by a particular client library. For instance, it can take the credentials from Platform.sh for a MongoDB database and format them into a URL string expected by pymongo. Use the formatted credentials() method to get the formatted version of a particular relationship.
def register_formatter(self, name, formatter): self._credentialFormatters[name] = formatter return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatted_credentials(self, relationship, formatter):\n if formatter not in self._credentialFormatters:\n raise NoCredentialFormatterFoundException(\n 'There is no credential formatter named {0} registered. Did you remember to call register_formatter()?'\n .format(formatter)\n )\n return self._credentialFormatters[formatter](self.credentials(relationship))", "def pymongo_formatter(credentials):\n return '{0}:{1}/{2}'.format(\n credentials['host'],\n credentials['port'],\n credentials['path']\n )", "def pysolr_formatter(credentials):\n\n return \"http://{0}:{1}/{2}\".format(credentials['ip'],\n credentials['port'],\n credentials['path'])", "def get_credentials_format(cls):\n raise NotImplementedError", "def initialize_formatter(config):\n if config.json: # pylint: disable=R1705\n return formatters.JsonFormatter()\n elif config.severity: # pylint: disable=R1705\n return formatters.SeverityFormatter(config.colored)\n return formatters.Formatter(config.colored)", "def formatter(formatter_name):\n\n def _formatter_decorator(f):\n def _formatter_wrapper(*wrapper_args, **wrapper_kwargs):\n ctx = wrapper_args[1]\n if not ctx.json and formatter_name in _formatter_functions:\n ctx.format_function = _formatter_functions[formatter_name]\n return f(*wrapper_args, **wrapper_kwargs)\n\n return _formatter_wrapper\n\n return _formatter_decorator", "def add_credential(self, authenticator_id, credential):\n pass", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def __init__(self, config: Union[str, Path, TextIOWrapper] = None):\n if not isinstance(config, TextIOWrapper):\n config = Path(config) if config else Path(self._DEFAULT_LOCATION)\n config = config.expanduser().absolute()\n with open(config, 'r') as fp:\n self._config = json.load(fp)\n else:\n self._config = json.load(config)\n self._store = self._config.get('credsStore', None)\n if self._store not in self._SUPPORTED_STORES:\n raise UnsupportedStore(f'Credential store \"{self._store}\" not supported')\n # TODO: Support the other methods besides secretservice when we can actually test with them\n self._cmd = ['docker-credential-secretservice', 'get']", "def set_formatter_string(config: dict):\n formatter_str = \"%(levelname)s %(name)s\"\n\n if config.get(\"formatter\"):\n return config[\"formatter\"]\n\n if config.get(\"extended\"):\n formatter_str += \".%(funcName)s():\"\n\n if config.get(\"timestamp\"):\n formatter_str = \"%(asctime)s \" + formatter_str\n\n formatter_str += \" %(message)s\"\n\n return formatter_str", "def format(self, record):\n message = super(ConsoleFormatter, self).format(record)\n color_code = self.color(self.log_colors, record.levelname)\n if hasattr(record, 'ctx'):\n metadata = record.ctx.invocation_metadata()\n for item in metadata:\n if item.key == 'author_name':\n setattr(record, 'user', item.value)\n elif item.key == 'correlation_id':\n setattr(record, 'correlationId', item.value)\n\n for key, value in record.__dict__.items():\n #this allows to have numeric keys\n if (key not in RESERVED_ATTR_HASH\n and not (hasattr(key, \"startswith\")\n and key.startswith('_'))):\n message = append(color_code=color_code, message=message, key=key, value=value)\n return message", "def set_credentials_helper(cls, cred_helper):\n cls.credentials_helper = cred_helper", "def set_credentials(self, authenticator):\n pass", "def set_formatter(self, formatter):\n self.format = formatter", "def posgresql_dsn_formatter(credentials):\n\n return \"postgresql://{0}:{1}@{2}:{3}/{4}\".format(credentials[\"username\"],\n credentials[\"password\"],\n credentials[\"host\"],\n credentials[\"port\"],\n credentials[\"path\"])", "def add_formatter(config: Configurator, name: str, func: t.Callable) -> None:\n config.registry.settings.setdefault(\"pyramid_openapi3_formatters\", {})\n reg = config.registry.settings[\"pyramid_openapi3_formatters\"]\n reg[name] = func", "def add_credential(args):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n # next, load the new credential\n new_cred = read_new_credential(args.cred_file)\n\n # check for any conflicts between the new credential and an existing one\n conflicting_cred_idx = None\n for idx, cred in enumerate(creds):\n if cred.username == new_cred.username:\n if len(cred.hostname) > 0 and len(new_cred.hostname) > 0 \\\n and cred.hostname == new_cred.hostname:\n conflicting_cred_idx = idx\n elif len(cred.hostname) == 0 and len(new_cred.hostname) == 0:\n conflicting_cred_idx = idx\n if conflicting_cred_idx is not None:\n if args.force:\n creds[conflicting_cred_idx] = new_cred\n else:\n logger.error(\"Credential already exists; overwrite with --force\")\n return\n else:\n creds.append(new_cred)\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()", "def cfg_credentials(context):\n arguments = {\n '--config': context.config_file,\n 'authorize': False,\n 'account_summary': False\n }\n pychex_cli = PychexCli(arguments)\n pychex_cli.read_config()\n # Check that the values pulled from the read_config method match what we\n # know\n print(pychex_cli.username)\n assert pychex_cli.username == context.username\n assert pychex_cli.security_image_path == context.security_image_path\n assert pychex_cli.password == context.password\n # Check that the unencrypted values are not present\n with open(arguments['--config']) as cfg:\n cfg_txt = cfg.read()\n assert cfg_txt.find(context.username) == -1\n assert cfg_txt.find(context.security_image_path) == -1\n assert cfg_txt.find(context.password) == -1", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def add_formatter(self, fmt):\n if fmt and not isfunction(fmt):\n raise TypeError(\"custom format function must be a type of function\")\n\n if fmt and fmt.__code__.co_argcount < 2:\n raise TypeError(\"custom format function requires at least 2 arguments\")\n\n self.formatter = fmt", "def credential(self, value):\n credential = self.organization.get_credential_by_name_with_type_id(value,\n self.credential._data.get('credential_type'))\n if not credential:\n raise InvalidCredential(value)\n self._update_values('credential', credential.id)", "def get_formatter(style):\n if style == 'authoryear':\n return AuthorYearFormatter\n return AuthorYearFormatter", "def read_new_credential(csv_file=None):\n options = {}\n if csv_file is None:\n logger.info(\"Generating configuration with user-specified username + password\")\n username = input(\"Username: \")\n if len(username) == 0:\n raise RuntimeError(\"Username may not be empty\")\n password = getpass.getpass()\n if len(password) == 0:\n raise RuntimeError(\"Password may not be empty\")\n hostname = _validate_hostname(input(\"Hostname (may be empty): \"))\n token_endpoint = input(\"Token endpoint (empty if not applicable): \") or None\n else:\n if os.path.exists(csv_file):\n with open(csv_file, \"r\") as f:\n reader = csv.DictReader(f)\n cred = next(reader)\n username = cred[\"username\"]\n password = cred[\"password\"]\n hostname = cred[\"hostname\"] if \"hostname\" in cred else \"\"\n token_endpoint = cred.get(\"token_endpoint\")\n if \"mechanism\" in cred:\n options[\"method\"] = cred[\"mechanism\"].replace(\"-\", \"_\")\n if \"protocol\" in cred:\n options[\"ssl\"] = cred[\"protocol\"] != \"SASL_PLAINTEXT\"\n if \"ssl_ca_location\" in cred:\n options[\"ssl_ca_location\"] = cred[\"ssl_ca_location\"]\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), csv_file)\n return Auth(username, password, hostname, token_endpoint=token_endpoint, **options)", "def credentials(self, credentials):\n\n self._credentials = credentials", "def set_credentials(self, *args, **kwargs):\n pass", "def credential(self, credential):\n\n self._credential = credential", "def credential(self, credential):\n\n self._credential = credential", "def setFormat( self, fmt, style = '{' ):\n formatter = logging.Formatter( fmt, style = style )\n for handler in self.logger.handlers:\n handler.setFormatter( formatter )", "def configure(credentials, block_cache):\n if credentials:\n if isinstance(credentials, dict):\n credentials = json.dumps(credentials)\n placeholder = array_ops.placeholder(dtypes.string)\n op = gen_gcs_config_ops.gcs_configure_credentials(placeholder)\n session.run(op, feed_dict={placeholder: credentials})\n if block_cache:\n op = gen_gcs_config_ops.gcs_configure_block_cache(\n max_cache_size=block_cache.max_bytes,\n block_size=block_cache.block_size,\n max_staleness=block_cache.max_staleness)\n session.run(op)", "def formatter(cls, website):\n formatter = DataFormatter()\n\n if website in cls.author_extract:\n formatter.set_author_extract_func(cls.author_extract[website])\n\n return formatter" ]
[ "0.7578857", "0.5838164", "0.573238", "0.5614809", "0.5313772", "0.52097976", "0.5193559", "0.5052433", "0.49889576", "0.4981685", "0.49690244", "0.4948941", "0.49288505", "0.49056", "0.48717806", "0.48604256", "0.47441703", "0.46400562", "0.46297708", "0.4581356", "0.45790133", "0.45777065", "0.4538083", "0.4524448", "0.45222104", "0.45005825", "0.45005825", "0.44715348", "0.44624394", "0.445359" ]
0.71008307
1
Returns credentials for the specified relationship as formatted by the specified formatter.
def formatted_credentials(self, relationship, formatter): if formatter not in self._credentialFormatters: raise NoCredentialFormatterFoundException( 'There is no credential formatter named {0} registered. Did you remember to call register_formatter()?' .format(formatter) ) return self._credentialFormatters[formatter](self.credentials(relationship))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_credentials_format(cls):\n raise NotImplementedError", "def credentials(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n relation_data = rel.data[rel.app]\n creds_json = relation_data.get(\"credentials\")\n return json.loads(creds_json) if creds_json is not None else ()", "def pysolr_formatter(credentials):\n\n return \"http://{0}:{1}/{2}\".format(credentials['ip'],\n credentials['port'],\n credentials['path'])", "def pymongo_formatter(credentials):\n return '{0}:{1}/{2}'.format(\n credentials['host'],\n credentials['port'],\n credentials['path']\n )", "def credentials(self, relationship, index=0):\n\n if not self._relationshipsDef:\n if self.in_build():\n raise BuildTimeVariableAccessException(\n 'Relationships are not available during the build phase.'\n )\n raise NotValidPlatformException(\n \"\"\"No relationships are defined. Are you sure you are on Platform.sh?\n If you're running on your local system you may need to create a tunnel\n to access your environment services. See https://docs.platform.sh/gettingstarted/local/tethered.html\"\"\"\n )\n\n if not self.has_relationship(relationship):\n raise KeyError(\n 'No relationship defined: {}. Check your .platform.app.yaml file.'\n .format(relationship))\n if index >= len(self._relationshipsDef):\n raise KeyError('No index {} defined for relationship: {}. '\n 'Check your .platform.app.yaml file.'.format(\n index, relationship))\n return self._relationshipsDef[relationship][index]", "def register_formatter(self, name, formatter):\n\n self._credentialFormatters[name] = formatter\n return self", "def display_credentials(cls):\n return cls.credentials_list", "def display_credentials(cls):\n return cls.credentials_list", "def get_credentials(self):\n return self.credentials", "def get_credentials(self, authenticator_id):\n pass", "def get_access_credentials_output(backend: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[Optional[str]]] = None,\n role: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccessCredentialsResult]:\n ...", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def display_credentials(cls):\n return cls.credential_list", "def display_credentials(cls):\n return cls.credential_list", "def display_credentials(cls):\n return cls.credential_list", "def display_credential():\n return CredentialsData.display_credentials()", "def format(self, record):\n message = super(ConsoleFormatter, self).format(record)\n color_code = self.color(self.log_colors, record.levelname)\n if hasattr(record, 'ctx'):\n metadata = record.ctx.invocation_metadata()\n for item in metadata:\n if item.key == 'author_name':\n setattr(record, 'user', item.value)\n elif item.key == 'correlation_id':\n setattr(record, 'correlationId', item.value)\n\n for key, value in record.__dict__.items():\n #this allows to have numeric keys\n if (key not in RESERVED_ATTR_HASH\n and not (hasattr(key, \"startswith\")\n and key.startswith('_'))):\n message = append(color_code=color_code, message=message, key=key, value=value)\n return message", "def display_credential(cls):\n return cls.credential_list", "def credentials(self) -> Mapping:", "def credential(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"credential\")", "def posgresql_dsn_formatter(credentials):\n\n return \"postgresql://{0}:{1}@{2}:{3}/{4}\".format(credentials[\"username\"],\n credentials[\"password\"],\n credentials[\"host\"],\n credentials[\"port\"],\n credentials[\"path\"])", "def credentials(self):\n if self.user and self.is_authenticated():\n return AuthCredentials(['authenticated'] + self.user.permissions)\n else:\n return AuthCredentials()", "def stringify(cls, relationship):\n\n\t\tif relationship == cls.STUDENT:\n\t\t\treturn \"STUDENT\"\n\t\tif relationship == cls.TEACHER:\n\t\t\treturn \"TEACHER\"\n\t\tif relationship == cls.BOTH:\n\t\t\treturn \"BOTH\"\n\t\treturn \"ERROR\"", "def get_formatter(style):\n if style == 'authoryear':\n return AuthorYearFormatter\n return AuthorYearFormatter", "def serialize_cred(context_obj, creds_obj):\n\treturn serialize_or_deserialize_cred(context_obj,creds_obj,SERIALIZE)", "def list_credentials():\n creds = load_auth()\n max_username_len = max([len(c.username) for c in creds]) if len(creds) > 0 else 1\n long_format = f\"{{:{max_username_len}}} for {{}}\"\n for cred in creds:\n if len(cred.hostname) > 0:\n print(str.format(long_format, cred.username, cred.hostname))\n else:\n print(cred.username)\n if len(creds) == 0 and os.isatty(1):\n print(\"No credentials configured\")", "def __str__(self):\n return '{0}{1}'.format(self.user.username, self.provider)", "def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())", "def credentials(self):\n return self._credentials", "def get_storage_account_credential_output(credential_name: Optional[pulumi.Input[str]] = None,\n manager_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStorageAccountCredentialResult]:\n ..." ]
[ "0.6246171", "0.55523694", "0.5385444", "0.5299462", "0.5156621", "0.51410604", "0.49873176", "0.49873176", "0.49505085", "0.49480554", "0.49477988", "0.49305084", "0.4880686", "0.4880686", "0.4880686", "0.4846417", "0.47463864", "0.47260103", "0.46766758", "0.46693823", "0.46660128", "0.46387", "0.46155888", "0.45837334", "0.4573092", "0.45189148", "0.4507072", "0.45052946", "0.44918323", "0.44857714" ]
0.87677866
0
Returns formatted Posgresql credentials as DSN.
def posgresql_dsn_formatter(credentials): return "postgresql://{0}:{1}@{2}:{3}/{4}".format(credentials["username"], credentials["password"], credentials["host"], credentials["port"], credentials["path"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pg_str(host, port, user, dbname, password):\n return 'PG:host={} port={} user={} dbname={} password={}'.format(\n host, port, user, dbname, password\n )", "def get_connection_string(self):\n auth = ''\n if self.user:\n auth = self.user\n if self.password:\n auth = auth + ':' + self.password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self.host, dbname=self.dbname)\n\n return conn_string", "def get_connection_string(self):\n auth = ''\n if self._user:\n auth = self._user\n if self._password:\n auth = auth + ':' + self._password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self._hostname, dbname=self._dbname)\n\n return conn_string", "def get_database_dsn():\n return getattr(config, f\"POSTGRES_DSN_{config.SERVER_MODE}\")", "def get_db_connect_string(self):\n template_string = \"host={} dbname={} user={} password={}\"\n return template_string.format(self.get(\"DB_HOST\"),\n self.get(\"DB_NAME\"),\n self.get(\"DB_USER\"),\n self.get(\"DB_PASSWORD\"))", "def make_dsn(settings=django_settings):\r\n\r\n try:\r\n dsn = {\r\n 'mysql': 'mysql',\r\n 'postgresql': 'postgres',\r\n 'postgresql_psycopg2': 'postgres',\r\n 'sqlite3': 'sqlite',\r\n }[settings.DATABASE_ENGINE]\r\n except:\r\n dsn = settings.DATABASE_ENGINE\r\n dsn += '://'\r\n\r\n if settings.DATABASE_USER:\r\n dsn += settings.DATABASE_USER\r\n dsn += ':'\r\n\r\n if settings.DATABASE_PASSWORD:\r\n dsn += settings.DATABASE_PASSWORD\r\n dsn += '@'\r\n\r\n if settings.DATABASE_HOST:\r\n dsn += settings.DATABASE_HOST\r\n dsn += ':'\r\n\r\n if settings.DATABASE_PORT:\r\n dsn += '%s' % settings.DATABASE_PORT\r\n\r\n if settings.DATABASE_NAME:\r\n dsn += '/' + settings.DATABASE_NAME\r\n\r\n return dsn", "def connection_string(self):\n return \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % \\\n (os.environ['DB_USER'],\n os.environ['DB_PASSWORD'],\n os.environ['DB_HOST'],\n os.environ['DB_PORT'],\n self.database_name)", "def get_sqldb_dsn(vcap_services):\n parsed = json.loads(vcap_services)\n credentials = parsed[\"sqldb\"][0][\"credentials\"]\n user = credentials[\"username\"]\n password = credentials[\"password\"]\n host = credentials[\"hostname\"]\n port = credentials[\"port\"]\n dbname = credentials[\"db\"]\n dsn = \"\"\"DATABASE={};HOSTNAME={};PORT={};UID={};PWD={};\"\"\".format(dbname, host, port, user, password)\n return dsn", "def ogrString(self):\r\n return 'host={} user={} port={} dbname={} password={}'.format(self.host, self.user, self.port, self.database, self.password)", "def get_connection_string():\n connection_string = 'postgresql://' + config.GM_DB_USER + \":\" + config.GM_DB_PASSWORD + \"@\" \\\n + config.GM_DB_HOST + \":\" + config.GM_DB_PORT + \"/\" + config.GM_DB_NAME\n return connection_string", "def _get_connection_string(self, database):\r\n return ('ODBC;DNS=;DRIVER={Client Access ODBC Driver (32-bit)};'\r\n 'SYSTEM=%s;CMT=0;DBQ=%s;NAM=%d;DFT=5;DSP=1;TFT=0;TSP=0;DEC=0;'\r\n 'XDYNAMIC=0;UID=%s;PWD=%s;RECBLOCK=2;BLOCKSIZE=512;SCROLLABLE=0;'\r\n 'TRANSLATE=0;LAZYCLOSE=0;LIBVIEW=0;REMARKS=0;CONNTYPE=0;SORTTYPE=2;'\r\n 'SORTWEIGHT=1;LANGUAGEID=ENG;PREFETCH=0;DFTPKGLIB=' \r\n % (self._sys_name, database, self._delimiters[self._sql_delimiter], self._user, self._pwd))", "def get_db_landing_connect_string(self):\n template_string = \"host={} dbname={} user={} password={}\"\n return template_string.format(self.get(\"DB_HOST\"),\n self.get(\"DB_LANDING_NAME\"),\n self.get(\"DB_LANDING_USER\"),\n self.get(\"DB_LANDING_PASSWORD\"))", "def build_dsn(database):\n\t\tdsn = { 'database': database, 'host': config.get('database', 'host') }\n\t\ttry:\n\t\t\tssl = config.getboolean('database', 'ssl')\n\t\t\tif ssl:\n\t\t\t\tdsn['ssl'] = ssl\n\t\texcept ConfigParser.Error:\n\t\t\tpass\n\t\ttry:\n\t\t\tusername = config.get('database', 'username')\n\t\t\tdsn['user'] = username\n\t\texcept ConfigParser.Error:\n\t\t\tpass\n\t\ttry:\n\t\t\tpassword = config.get('database', 'password')\n\t\t\tdsn['password'] = password\n\t\texcept ConfigParser.Error:\n\t\t\tpass\n\t\treturn dsn", "def create_db_conn_string(db_config):\n db_config = {\n 'host': environ['DB_HOST'] if 'DB_HOST' in environ else db_config['Host'],\n 'name': environ['DB_NAME'] if 'DB_NAME' in environ else db_config['Name'],\n 'username': environ['DB_USERNAME'] if 'DB_USERNAME' in environ \\\n else db_config['User'],\n 'password': environ['DB_PASSWORD'] if 'DB_PASSWORD' in environ \\\n else db_config['Password']\n }\n\n return f'host={db_config[\"host\"]} user={db_config[\"username\"]} ' \\\n f'password={db_config[\"password\"]} dbname={db_config[\"name\"]}'", "def connection_string(self) -> str:\n if self.dialect == \"sqlite\":\n ret_connection_string = f\"{self.dialect}:///{self.database}\"\n else:\n escaped_password: str = urllib.parse.quote_plus(self.password)\n auth_section: str = f\"{self.username}:{escaped_password}\"\n address: str = f\"{self.host}:{self.port}\"\n ret_connection_string = f\"{self.dialect}://{auth_section}@{address}/{self.database}\"\n\n return ret_connection_string", "def _db_credentials(self):\n if self.password == \"\" or self.password == \"RUNTIME\":\n sys.stdout.write(PROMPT + \"Database password: \")\n sys.stdout.flush()\n self.password = getpass.getpass()\n elif self.password == \"ENV\":\n self.password = os.environ[\"sql_password\"]\n db_host = quote(self.hostname)\n db_name = quote(self.database)\n db_user = quote(self.username)\n db_password = quote_plus(self.password)\n if \"@\" in db_password:\n logging.warning(\n \"%sWARNING:%s Using the '@' symbol in your database password can cause login issues with SQL Alchemy.%s\"\n % (WARN + bold + R, W, WARN)\n )\n return db_host, db_name, db_user, db_password", "def session_info(self) -> str:\n info = [\n \"user\",\n \"dbname\",\n \"host\",\n \"port\",\n \"sslmode\",\n ]\n\n params = self.connection.get_dsn_parameters()\n\n return \" \".join(\n f\"{param}={params.get(param)}\"\n for param in info\n if params.get(param))", "def get_postgres_url(self, username, password, hostname, port, database):\n args = (username, password, hostname, port, database)\n return 'postgresql://%s:%s@%s:%d/%s' % args", "def get_postgredb_connection(properties):\n try:\n return postgreconnect(host=properties[0],\n database=properties[1],\n user=properties[2],\n password=properties[3])\n except postgreError as error:\n print(\"Error while connecting to PostgreSQL Database\", error)", "def _make_sql_url(hostname, database, **kwargs):\n url = hostname\n if kwargs.get(\"port\"):\n url = \"{}:{}\".format(url, kwargs[\"port\"])\n if kwargs.get(\"user\"):\n credentials = kwargs[\"user\"]\n if kwargs.get(\"password\"):\n credentials = \"{}:{}\".format(credentials, kwargs[\"password\"])\n url = \"{}@{}\".format(credentials, url)\n return \"postgresql://{}/{}\".format(url, database)", "def get_database_uri(\n database: Optional[str] = None,\n username: Optional[str] = None,\n password: Optional[str] = None,\n format: str =\n \"postgresql://{username}{':' if password else ''}{password}{'@' if credentials else ''}/{database}\",\n cred_file: str = \".dbcred\",\n save: bool = True\n) -> Optional[str]:\n\n if database is None:\n database = \"\"\n if username is None:\n username = \"\"\n if password is None:\n password = \"\"\n if format is None:\n format = \"\"\n\n db = \"\"\n un = \"\"\n pw = \"\"\n fm = \"\"\n\n if cred_file is not None and path.exists(cred_file):\n with open(cred_file, 'r') as f:\n db = f.readline().rstrip()\n if database == \"\":\n database = db\n \n un = f.readline().rstrip()\n if username == \"\":\n username = un\n\n pw = f.readline().rstrip()\n if password == \"\":\n password = pw\n \n fm = f.readline().rstrip()\n if format == \"\":\n format = fm\n\n if cred_file is not None and save:\n with open(cred_file, 'w') as f:\n db = db if database == \"\" else database\n f.write(db + \"\\n\")\n\n un = un if username == \"\" else username\n f.write(un + \"\\n\")\n\n pw = pw if password == \"\" else password\n f.write(pw + \"\\n\")\n\n fm = fm if format == \"\" else format\n f.write(fm + '\\n')\n\n if database == \"\" or format == \"\":\n return None\n\n loc = {\n \"credentials\": (not not username) or (not not password),\n \"username\": username,\n \"password\": password,\n \"database\": database\n }\n \n try:\n exec(f'x = f\"{format}\"', None, loc)\n return loc['x']\n except:\n return None", "def connect_to_postgres():\n postgres_creds = os.environ.get(\"POSTGRES_CREDS\")\n if not postgres_creds:\n postgres_creds = get_secret(\"POSTGRES_CREDS\")\n print(\"Using {} Postgres database credentials\".format(postgres_creds))\n postgres_url = os.environ.get(\"POSTGRES\")\n if not postgres_url:\n postgres_url = get_secret(\"POSTGRES\")\n return connect(postgres_url)", "def _get_driver_from_dsn(self, dsn):\n\n return dsn.split(':')[0]", "def connect(db: str=None) -> Connection:\n return pgsql.connect(\"postgres.internal\", db or \"sysadmins\")", "def _get_connect_string(backend,\n user=\"openstack_citest\",\n passwd=\"openstack_citest\",\n database=\"openstack_citest\"):\n if backend == \"mysql\":\n backend = \"mysql+mysqldb\"\n elif backend == \"postgres\":\n backend = \"postgresql+psycopg2\"\n\n return (\"%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s\"\n % {'backend': backend, 'user': user, 'passwd': passwd,\n 'database': database})", "def _format_database(self, request: DatabaseSecretRequest,\n secret: Secret) -> str:\n username, password = secret.value\n return f'{request.engine}://{username}:{password}@' \\\n f'{request.host}:{request.port}/{request.database}?' \\\n f'{request.params}'", "def read_dsn(\n section: str, dsn: str = \"\",\n):\n check_types([(\"dsn\", dsn, [str],), (\"section\", section, [str],)])\n confparser = ConfigParser()\n confparser.optionxform = str\n if not dsn:\n dsn = os.environ[\"ODBCINI\"]\n confparser.read(dsn)\n if confparser.has_section(section):\n options = confparser.items(section)\n conn_info = {\"port\": 5433, \"user\": \"dbadmin\"}\n for elem in options:\n if elem[0].lower() in (\"servername\", \"server\"):\n conn_info[\"host\"] = elem[1]\n elif elem[0].lower() == \"uid\":\n conn_info[\"user\"] = elem[1]\n elif elem[0].lower() == \"port\":\n try:\n conn_info[\"port\"] = int(elem[1])\n except:\n conn_info[\"port\"] = elem[1]\n elif elem[0].lower() == \"pwd\":\n conn_info[\"password\"] = elem[1]\n elif elem[0].lower() == \"kerberosservicename\":\n conn_info[\"kerberos_service_name\"] = elem[1]\n elif elem[0].lower() == \"kerberoshostname\":\n conn_info[\"kerberos_host_name\"] = elem[1]\n elif \"vp_test_\" in elem[0].lower():\n conn_info[elem[0].lower()[8:]] = elem[1]\n else:\n conn_info[elem[0].lower()] = elem[1]\n return conn_info\n else:\n raise NameError(\"The DSN Section '{}' doesn't exist.\".format(section))", "def pymongo_formatter(credentials):\n return '{0}:{1}/{2}'.format(\n credentials['host'],\n credentials['port'],\n credentials['path']\n )", "def dsn(self, dsn_type: t.Optional[DSNType] = None, **kwargs: t.Any) -> str:\n hostname = self._internet.hostname(**kwargs)\n scheme, port = self.validate_enum(dsn_type, DSNType)\n return f\"{scheme}://{hostname}:{port}\"", "def build_db_uri() -> str:\n\n return \"{DB_DRIVER}://{DB_USERNAME}:{DB_PASSWD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\".format(**{\n 'DB_DRIVER': os.environ.get('DB_DRIVER', ''),\n 'DB_HOST': os.environ.get('DB_HOST', ''),\n 'DB_PORT': os.environ.get('DB_PORT', ''),\n 'DB_NAME': os.environ.get('DB_NAME', ''),\n 'DB_USERNAME': os.environ.get('DB_USERNAME', ''),\n 'DB_PASSWD': os.environ.get('DB_PASSWD', '')\n })" ]
[ "0.67622787", "0.67376924", "0.67143553", "0.64170873", "0.632588", "0.63191694", "0.63169056", "0.6244294", "0.59022504", "0.5898417", "0.589464", "0.5890298", "0.5859012", "0.5762957", "0.5742873", "0.5584011", "0.5552785", "0.5513872", "0.549809", "0.54919595", "0.54588574", "0.54472953", "0.5426575", "0.54041106", "0.53801876", "0.5358468", "0.53342056", "0.53109545", "0.53012794", "0.53006774" ]
0.84027815
0
Bibliographic reference to the portrayal catalogue cited.
def portrayalCatalogueCitation(self) -> Sequence[Citation]: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reference(self):\n licence = self.context\n if IEnvironmentBase.providedBy(licence):\n return licence.reference\n to_shore = queryAdapter(licence, IShore)\n ref = '{} {}'.format(licence.reference, to_shore.display())\n return ref", "def get_reference(self):\t\t\n\t\treturn self._reference", "def reference(self):\n \n return self._reference", "def getReference(self):\n return _libsbml.Association_getReference(self)", "def get_cite(self, ):\n return self.attrs.get(self.AttributeNames.CITE, None)", "def reference(self):\n return self._reference", "def reference(self):\n return self._reference", "def get_publication_reference(self):\n return unicode(self.pk)", "def get_cite(self, ):\n if self.AttributeNames.CITE not in self.attrs:\n return None\n return str(self.attrs[self.AttributeNames.CITE])", "def alternativeMetadataReference(self) -> Sequence[Citation]:\n return None", "def cite(silent=False):\n if silent is False:\n print(__cite__)\n else:\n return __bibtex__", "def get_citation(self):\n etree = self.get_eml()\n try:\n citation_ipt = etree.find('./additionalMetadata/metadata/gbif/citation').text\n except AttributeError:\n citation_ipt = None\n if citation_ipt:\n now = datetime.datetime.now().date()\n pola3r = \"(Available: Polar 'Omics Links to Antarctic, Arctic and Alpine Research. Antarctic Biodiversity Portal. Scientific Committee for Antarctic Research. www.biodiversity.aq/pola3r. Accessed: {})\".format(\n now)\n citation = \"{} {}\".format(citation_ipt, pola3r)\n else:\n citation = None\n return citation", "def coauthor_link(self):\n return self._json['coredata'].get('link', [])[3].get('@href')", "def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid", "def us_citizen(self, instance):\r\n return instance.user.profile.us_citizen", "def compartment(self):\n return \"_links\"", "def ref(self):\n return self._ref", "def catalog(self) -> str:\n return pulumi.get(self, \"catalog\")", "def Reference(self, default={}):\n return HEP.ReferenceHEPObject(self.data.get('reference', default))", "def ref(self):\n\t\treturn self.bottle.ref", "def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")", "def reference_id(self) -> Optional[str]:\n return pulumi.get(self, \"reference_id\")", "def ref(self) -> str:\n return self._ref", "def getCompartmentReference(self):\n return _libsbml.MultiSimpleSpeciesReferencePlugin_getCompartmentReference(self)", "def get_reference(self):\n return self.resource.url", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"coco\":\n return \"http://cocodataset.org/#explore?id={}\".format(info[\"id\"])\n else:\n super(CocoDataset, self).image_reference(image_id)", "def get_reference(self):\n return self.PotTax_reference", "def reference_url(self):\n return self.get(\"reference_url\", decode=True)", "def catalog_id(self):\n return self._catalog_id", "def addCite(self, paper, tag=\"\"):\r\n for c in self.cites:\r\n if paper is c.paper2:\r\n return\r\n cit=Citation(self, paper, tag)\r\n self.cites.add(cit)\r\n paper.citesBack.add(cit)" ]
[ "0.60247076", "0.5914841", "0.5735147", "0.5714687", "0.57077384", "0.56788075", "0.56788075", "0.55938745", "0.5560254", "0.5541559", "0.5455756", "0.54168093", "0.53956026", "0.5360822", "0.5344456", "0.53180736", "0.5306963", "0.5287715", "0.5272013", "0.5257117", "0.5246071", "0.5192694", "0.517244", "0.5142158", "0.5136751", "0.51296806", "0.51126885", "0.50969553", "0.5077285", "0.5057458" ]
0.6322485
0
Citation for the standards to which the metadata conforms.
def metadataStandard(self) -> Sequence[Citation]: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_citation(meta):\n pass", "def portrayalCatalogueCitation(self) -> Sequence[Citation]:\n pass", "def collect_citation_metadata(\n metadata: dict, references: List[pybtex.database.Entry]\n) -> dict:\n # Author list\n citation_authors = []\n for author_tier in [\"Core\", \"Developers\", \"Contributors\"]:\n for author in metadata[\"Authors\"][author_tier][\"List\"]:\n family_names, given_names = author[\"Name\"].split(\", \")\n citation_author = {\n \"family-names\": family_names,\n \"given-names\": given_names,\n }\n if \"Orcid\" in author:\n citation_author[\"orcid\"] = (\n \"https://orcid.org/\" + author[\"Orcid\"]\n )\n if \"Affiliations\" in author and len(author[\"Affiliations\"]) > 0:\n citation_author[\"affiliation\"] = \" and \".join(\n author[\"Affiliations\"]\n )\n citation_authors.append(citation_author)\n # References in CITATION.cff format\n citation_references = [to_cff_reference(entry) for entry in references]\n return {\n \"cff-version\": \"1.2.0\",\n \"message\": (\n \"Please cite SpECTRE in any publications that make use of its code\"\n \" or data. Cite the latest version that you use in your\"\n \" publication. The citation for this version is listed below.\"\n ),\n \"title\": metadata[\"Name\"],\n \"url\": metadata[\"Homepage\"],\n \"repository-code\": \"https://github.com/\" + metadata[\"GitHub\"],\n \"version\": metadata[\"Version\"],\n \"date-released\": metadata[\"PublicationDate\"],\n \"doi\": metadata[\"Doi\"],\n \"authors\": citation_authors,\n \"keywords\": metadata[\"Keywords\"],\n \"license\": metadata[\"License\"],\n \"references\": citation_references,\n }", "def alternativeMetadataReference(self) -> Sequence[Citation]:\n return None", "def to_citation(self, type):\n acs_authors = \"; \".join(self.format_authors(\"acs\"))\n # Some articles don't come with pages. :-(\n pages_with_endash = (self.pages.replace(\"-\", \"\\u2013\") if self.pages\n else \"\")\n # Actually, not using quote() generally gives results that work fine.\n # The only issue is that when using Markdown URLs with parentheses in\n # Jupyter notebooks, the conversion to HTML gets it wrong, thinking\n # that the URL ends at the first close parentheses in the URL. (In\n # the notebook itself, it is fine, only the conversion to HTML messes\n # up.) So we might as well escape them generally.\n doi_url = f\"https://doi.org/{urllib.parse.quote(self.doi)}\"\n\n # BibLaTeX\n if type in [\"bib\", \"b\"]:\n # Create (hopefully) unique identifier\n author_decoded = unidecode(self.authors[0][\"family\"])\n journal_initials = \"\".join(c for c in self.journal_short\n if c.isupper())\n ref_identifier = f\"{author_decoded}{self.year}{journal_initials}\"\n ref_identifier = \"\".join(ref_identifier.split()) # remove spaces\n # Author names in bib style\n author_names = \" and \".join(self.format_authors(\"bib\"))\n journal = self.journal_short.replace(\". \", \".\\\\ \")\n # Open and close braces\n # Truthfully we don't need this. However, including the doubled\n # curly braces in the f-string makes vim's indentation go crazy.\n open, close = \"{\", \"}\"\n # Make the citation\n s = (f\"@article{open}{ref_identifier},\\n\"\n f\" doi = {{{self.doi}}},\\n\"\n f\" author = {{{author_names}}},\\n\"\n f\" journal = {{{journal}}},\\n\"\n f\" title = {{{self.title}}},\\n\"\n f\" year = {{{self.year}}},\\n\")\n if self.volume is not None:\n s += f\" volume = {{{self.volume}}},\\n\"\n if self.issue is not None:\n s += f\" issue = {{{self.issue}}},\\n\"\n if self.pages is not None:\n s += f\" pages = {{{self.pages.replace('-', '--')}}},\\n\"\n s += close\n # Replace Unicode characters with their LaTeX equivalents\n for char in _g.unicodeLatexDict:\n s = s.replace(char, _g.unicodeLatexDict[char])\n return s\n\n # Just DOI\n if type in [\"doi\", \"d\"]:\n return self.doi\n\n # The rest all have a long vs short type.\n # Discern long vs short type\n long = False\n if type[0].upper() == type[0]:\n long = True\n type = type.lower()\n\n # reStructuredText\n if type in [\"rst\", \"r\"]:\n author_title = f\"{acs_authors} {self.title}. \" if long else \"\"\n vol_issue = (f\"*{self.volume}* ({self.issue}), \" if self.issue\n else f\"*{self.volume},* \")\n return (author_title\n + f\"*{self.journal_short}* **{self.year},** \"\n + vol_issue\n + f\"{pages_with_endash}. \"\n + f\"`DOI: {self.doi} <{doi_url}>`_\")\n\n # Markdown\n if type in [\"markdown\", \"m\"]:\n author_title = f\"{acs_authors} {self.title}. \" if long else \"\"\n vol_issue = (f\"*{self.volume}* ({self.issue}), \" if self.issue\n else f\"*{self.volume},* \")\n return (author_title\n + f\"*{self.journal_short}* **{self.year},** \"\n + vol_issue\n + f\"{pages_with_endash}. \"\n + f\"[DOI: {self.doi}]({doi_url})\")\n\n # Word\n elif type in [\"word\", \"w\"]:\n author_title = f\"{acs_authors} {self.title}. \" if long else \"\"\n vol_issue = (f\"{self.volume} ({self.issue}), \" if self.issue\n else f\"{self.volume}, \")\n return (author_title\n + f\"{self.journal_short} {self.year}, \"\n + vol_issue\n + f\"{pages_with_endash}.\")\n\n else:\n raise ValueError(\"Invalid citation type '{type}' given\")", "def get_citation(self):\n etree = self.get_eml()\n try:\n citation_ipt = etree.find('./additionalMetadata/metadata/gbif/citation').text\n except AttributeError:\n citation_ipt = None\n if citation_ipt:\n now = datetime.datetime.now().date()\n pola3r = \"(Available: Polar 'Omics Links to Antarctic, Arctic and Alpine Research. Antarctic Biodiversity Portal. Scientific Committee for Antarctic Research. www.biodiversity.aq/pola3r. Accessed: {})\".format(\n now)\n citation = \"{} {}\".format(citation_ipt, pola3r)\n else:\n citation = None\n return citation", "def get_standard_citatation(citation, cache, override):\n try:\n print('Trying metadata cache for {}...'.format(citation))\n metadata = citation_to_metadata(citation, cache, override)\n return metadata['standard_citation'], metadata['citation_id']\n except Exception as e:\n return None, None", "def _construct_standardized_metadata(self):\n return None", "def test_tax_court_citation_extractor(self):\n\n test_pairs = (\n (\n \"\"\" 1 UNITED STATES TAX COURT REPORT (2018)\n\n\n\n UNITED STATES TAX COURT\n\n\n\n BENTLEY COURT II LIMITED PARTNERSHIP, B.F. BENTLEY, INC., TAX\n MATTERS PARTNER, Petitioner v.\n COMMISSIONER OF INTERNAL REVENUE, Respondent\n\n\n\n Docket No. 5393-04. Filed May 31, 2006.\n\n\n\n Nancy Ortmeyer Kuhn, for petitioner.\n \"\"\",\n \"1 T.C. 2018\",\n ),\n (\n \"\"\" T.C. Memo. 2003-150\n\n\n\n UNITED STATES TAX COURT\n\n\n\n RIVER CITY RANCHES #1 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #2 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #3 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n\n\n \"\"\",\n \"2003 T.C. Memo. 150\",\n ),\n (\n \"\"\" T.C. Summary Opinion 2003-150\n\n\n\n UNITED STATES TAX COURT\n\n\n\n RIVER CITY RANCHES #1 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #2 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #3 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n\n\n \"\"\",\n \"2003 T.C. Summary Opinion 150\",\n ),\n (\n \"\"\"\n MICHAEL KEITH SHENK, PETITIONER v. COMMISSIONER\n OF INTERNAL REVENUE, RESPONDENT\n\n Docket No. 5706–12. Filed May 6, 2013.\n\n P was divorced from his wife, and their 2003 ‘‘Judgment of\n Absolute Divorce’’ provided that his ex-wife would have pri-\n mary residential custody of their three minor children. The\n judgment provided that the dependency exemption deductions\n for the three children would be divided between the two ex-\n spouses according to various conditions but did not provide\n that the ex-wife must execute in P’s favor a Form 8332,\n ‘‘Release of Claim to Exemption for Child of Divorced or Sepa-\n rated Parents’’. The children resided with P’s ex-wife for more\n than half of 2009, and P’s ex-wife did not execute in P’s favor\n any Form 8332 or equivalent document for any year. For 2009\n P timely filed a Federal income tax return on which he\n claimed dependency exemption deductions and the child tax\n credit for two of the children, consistent with his under-\n standing of the terms of the judgment, but he did not attach\n any Form 8332 to his return. He also claimed head-of-house-\n hold filing status. His ex-wife, the custodial parent, timely\n filed a Federal income tax return for 2009 on which she also\n\n 200\n\n\n\n\nVerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00001 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n\f (200) SHENK v. COMMISSIONER 201\n\n\n claimed two dependency exemption deductions, so that one\n child was claimed on both parents’ returns. R allowed to P the\n dependency exemption deduction for one of the children but\n disallowed his claim for the dependency exemption deduction\n for the child who had also been claimed by the custodial\n parent. At trial P contended he is entitled to a dependency\n exemption deduction for all three children. Held: Since the\n custodial parent did not execute, and P could not and did not\n attach to his return, any Form 8332 or equivalent release, P\n is not entitled under I.R.C. sec. 152(e)(2)(A) to claim the\n dependency exemption deduction or the child tax credit. Held,\n further, where both the custodial parent and the noncustodial\n parent have claimed for the same year a dependency exemp-\n tion deduction for the same child, a declaration signed by the\n custodial parent after the period of limitations for assess-\n ments has expired as to the custodial parent could not qualify\n under I.R.C. sec. 152(e)(2)(A), and therefore there is no reason\n to grant P’s request to leave the record open so that he may\n obtain and proffer such a declaration. Held, further, P is not\n entitled to head-of-household filing status under I.R.C. sec.\n 2(b)(1) nor to the child tax credit under I.R.C. sec. 24.\n\n Michael Keith Shenk, for himself.\n Shari Salu, for respondent.\n GUSTAFSON, Judge: The Internal Revenue Service (IRS)\n determined a deficiency of $3,136 in the 2009 Federal income\n tax of petitioner Michael Keith Shenk. Mr. Shenk petitioned\n this Court, pursuant to section 6213(a), 1 for redetermination\n of the deficiency. After Mr. Shenk’s concession that he\n received but did not report $254 in dividend income, the\n issue for decision is whether Mr. Shenk is entitled to a\n dependency exemption deduction for one of his children\n under section 151(c), a child tax credit for that child under\n section 24(a), and head-of-household filing status under sec-\n tion 2(b)(1). On these issues, we hold for the IRS.\n FINDINGS OF FACT\n\n The judgment of divorce\n Mr. Shenk was married to Julie Phillips, and they have\n three minor children—M.S., W.S., and L.S. They divorced in\n 2003. The family court’s ‘‘Judgment of Absolute Divorce’’ pro-\n 1 Unless otherwise indicated, all citations of sections refer to the Internal\n\n Revenue Code (26 U.S.C.) in effect for the tax year at issue, and all cita-\n tions of Rules refer to the Tax Court Rules of Practice and Procedure.\n\n\n\n\nVerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00002 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n\f 202 140 UNITED STATES TAX COURT REPORTS (200)\n\n\n vided: that Ms. Phillips was ‘‘awarded primary residential\n custody’’ of the parties’ three children; and that Mr. Shenk\n would be liable for child support payments; but that, as to\n dependency exemptions—\"\"\",\n \"140 T.C. 200\",\n ),\n )\n site = tax.Site()\n for q, a in test_pairs:\n results = site.extract_from_text(q)\n cite_string = \"%s %s %s\" % (\n results[\"Citation\"][\"volume\"],\n results[\"Citation\"][\"reporter\"],\n results[\"Citation\"][\"page\"],\n )\n\n self.assertEqual(cite_string, a)\n print \"✓\", cite_string", "def parse_citations(s: str):\n print(\"parse_citations(%s)\" % s)\n if pandas.isnull(s) or not s:\n return []\n terms = [e.strip() for e in s.split(\";\")]\n print(\"parse_citations: \" + str(terms))\n out = []\n for term in terms:\n if term.startswith(\"PMID\"):\n out.append({\"db\": \"PubMed\", \"id\": term})\n else:\n raise RuntimeError(\"Unknown citation format: \" + term)\n return out", "def create_citation(cit):\n if cit is not None:\n if cit['citation-type'] == \"BIBTEX\":\n return pybtex.database.parse_string(cit['citation-value'], \"bibtex\")\n return None", "def docstring(self):\n raw_description = self.qualifiers.get(\"description\")\n\n # short circuit if there isn't a description to process\n if not raw_description:\n _LOGGER.debug(\"No raw description found in MOF, substituting placeholder\")\n return \"No documentation in MOF\"\n\n # process the raw description, normalizing whitespace and special characters\n _LOGGER.debug(\"Normalizing raw description from MOF:\\n%s\", raw_description)\n normalized_lines = []\n for raw_line in raw_description:\n # split to normalize \\n in the entry\n normalized_line_elements = []\n for text in raw_line.split():\n # strip leading/trailing whitespace\n stripped_text = text.strip()\n # escape any special rst characters\n escaped_text = stripped_text.replace('*', '\\*')\n # add to normalized line elements\n normalized_line_elements.append(escaped_text)\n # create normalized line and save it\n normalized_line = \" \".join(normalized_line_elements)\n normalized_lines.append(normalized_line)\n\n # create and return the normalized line block\n normalized_description = \"\\n\".join(normalized_lines)\n _LOGGER.debug(\"Normalized description is:\\n%s\", normalized_description)\n return normalized_description", "def parentMetadata(self) -> Citation:\n return None", "def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail", "def newThesisCitation(self, **attrlinks):\n return ThesisCitation(self, **attrlinks)", "def make_knowledge_header(name: str,\n version: Optional[str] = None,\n description: Optional[str] = None,\n authors: Optional[str] = None,\n contact: Optional[str] = None,\n copyright: Optional[str] = None,\n licenses: Optional[str] = None,\n disclaimer: Optional[str] = None,\n namespace_url: Optional[Mapping[str, str]] = None,\n namespace_patterns: Optional[Mapping[str, str]] = None,\n annotation_url: Optional[Mapping[str, str]] = None,\n annotation_patterns: Optional[Mapping[str, str]] = None,\n annotation_list: Optional[Mapping[str, Set[str]]] = None,\n ) -> Iterable[str]:\n yield from make_document_metadata(\n name=name,\n contact=contact,\n description=description,\n authors=authors,\n version=version,\n copyright=copyright,\n licenses=licenses,\n disclaimer=disclaimer,\n )\n\n yield from make_document_namespaces(\n namespace_url=namespace_url,\n namespace_patterns=namespace_patterns,\n )\n\n yield from make_document_annotations(\n annotation_url=annotation_url,\n annotation_patterns=annotation_patterns,\n annotation_list=annotation_list,\n )\n\n yield '#' * 80\n yield '#| Statements'\n yield '#' * 80", "def _get_standards(self):\n standards = self._extract_methods_signatures()\n return {\n \"standard_\" + standard: \" AND \".join([\n \"(bytecode LIKE '%{}%')\".format(signature) for signature in signatures.values()\n ])\n for standard, signatures in standards.items()\n }", "def authorSign(g = None):\n if not g:\n return\n authors = g.find(\"Author\")\n ret = dict()\n for author in authors:\n ret[author['name']] = author['citation'].replace('\\n','').lower()\n return ret", "def internal_citations(text, initial_label=None,\n require_marker=False, title=None):\n if not initial_label:\n initial_label = Label()\n citations = []\n\n def multiple_citations(matches, comment):\n \"\"\"i.e. head :: tail\"\"\"\n for match, start, end in matches:\n label = initial_label\n for submatch in chain([match.head], match.tail):\n cit = ParagraphCitation(\n submatch.pos[0], submatch.pos[1],\n match_to_label(submatch.tokens, label, comment=comment),\n full_start=start,\n full_end=end,\n in_clause=True)\n label = cit.label # update the label to keep context\n citations.append(cit)\n\n def single_citations(matches, comment):\n for match, start, end in matches:\n full_start = start\n if match.marker is not '':\n # Remove the marker from the beginning of the string\n start = match.marker.pos[1]\n citations.append(ParagraphCitation(\n start, end, match_to_label(match, initial_label,\n comment=comment),\n full_start=full_start))\n\n single_citations(grammar.marker_comment.scanString(text), True)\n\n multiple_citations(grammar.multiple_non_comments.scanString(text), False)\n multiple_citations(grammar.multiple_appendix_section.scanString(text),\n False)\n multiple_citations(grammar.multiple_comments.scanString(text), True)\n multiple_citations(grammar.multiple_appendices.scanString(text), False)\n multiple_citations(grammar.multiple_period_sections.scanString(text),\n False)\n\n single_citations(grammar.marker_appendix.scanString(text), False)\n single_citations(grammar.appendix_with_section.scanString(text), False)\n single_citations(grammar.marker_paragraph.scanString(text), False)\n single_citations(grammar.mps_paragraph.scanString(text), False)\n single_citations(grammar.m_section_paragraph.scanString(text), False)\n if not require_marker:\n single_citations(grammar.section_paragraph.scanString(text), False)\n single_citations(grammar.part_section_paragraph.scanString(text),\n False)\n multiple_citations(\n grammar.multiple_section_paragraphs.scanString(text), False)\n\n # Some appendix citations are... complex\n for match, start, end in grammar.appendix_with_part.scanString(text):\n full_start = start\n if match.marker is not '':\n start = match.marker.pos[1]\n label = filter(lambda l: l != '.', list(match)[3:])\n label = dict(zip(['p1', 'p2', 'p3'], label))\n citations.append(ParagraphCitation(\n start, end, initial_label.copy(\n appendix=match.appendix, appendix_section=match.a1,\n **label), full_start=full_start))\n\n # Internal citations can sometimes be in the form XX CFR YY.ZZ\n for match, start, end in grammar.internal_cfr_p.scanString(text):\n # Check if this is a reference to the CFR title and part we are parsing\n if match.cfr_title == title and match[1] == initial_label.to_list()[0]:\n full_start = start\n if match.marker is not '':\n # Remove the marker from the beginning of the string\n start = match.marker.pos[1]\n citations.append(ParagraphCitation(\n start, end, match_to_label(match, initial_label),\n full_start=full_start))\n else:\n continue\n\n # And sometimes there are several of them\n for match, start, end in grammar.multiple_cfr_p.scanString(text):\n label = initial_label\n if match.head.cfr_title == title:\n for submatch in chain([match.head], match.tail):\n if submatch.part == initial_label.to_list()[0]:\n cit = ParagraphCitation(\n submatch.pos[0], submatch.pos[1],\n match_to_label(submatch.tokens, label),\n full_start=start,\n full_end=end,\n in_clause=True)\n label = cit.label # update the label to keep context\n citations.append(cit)\n else:\n continue\n\n # Remove any sub-citations\n final_citations = []\n for cit in citations:\n if not any(cit in other for other in citations):\n final_citations.append(cit)\n\n return final_citations", "def test_citations_and_toc_2(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS + [\"+citations\"],\n PANDOC_ARGS=PANDOC_ARGS\n + [\n \"--table-of-contents\",\n \"--citeproc\",\n \"--csl=https://www.zotero.org/styles/ieee-with-url\",\n \"--metadata=link-citations:false\",\n \"--metadata=reference-section-title:References\",\n ],\n FORMATTED_FIELDS=FORMATTED_FIELDS,\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_citation.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n self.maxDiff = None\n\n self.assertEqual(\n (\n '<h2 id=\"string-theory\">String Theory</h2>\\n'\n \"<p>But this foundational principle of science has\"\n \" now been called into question by\"\n ' <a href=\"https://www.britannica.com/science/'\n 'string-theory\">String Theory</a>,'\n \" which is a relative newcomer to theoretical physics, but one\"\n \" that has captured the common imagination, judging by\"\n \" the popular explanations that abound on the Web\"\n ' <span class=\"citation\" data-cites=\"mann2019 wood2019'\n ' jones2020\">[1]–[3]</span>.'\n \" And whether string theory is or is not science, Popper\"\n \" notwithstanding, is an issue that is still up for debate\"\n \" <span\"\n ' class=\"citation\" data-cites=\"siegel2015 castelvecchi2016'\n ' alves2017 francis2019\">[4]–[7]</span>.</p>\\n'\n '<h1 class=\"unnumbered\" id=\"bibliography\">References</h1>\\n'\n '<div id=\"refs\" class=\"references csl-bib-body\"'\n ' role=\"doc-bibliography\">\\n'\n '<div id=\"ref-mann2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[1]'\n ' </div><div class=\"csl-right-inline\">A. Mann,'\n \" <span>“<span>What Is String Theory?</span>”</span>\"\n \" 20-Mar-2019. [Online].\"\n ' Available: <a href=\"https://www.livescience.com/'\n '65033-what-is-string-theory.html\">'\n \"https://www.livescience.com/\"\n \"65033-what-is-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-wood2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[2] </div>'\n '<div class=\"csl-right-inline\">'\n \"C. Wood, <span>“<span>What Is String Theory?</span>.\"\n \" Reference article:\"\n \" A simplified explanation and brief history of string\"\n \" theory,”</span> 11-Jul-2019.\"\n ' [Online]. Available: <a href=\"https://www.space.com/'\n '17594-string-theory.html\">'\n \"https://www.space.com/17594-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-jones2020\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[3]'\n ' </div><div class=\"csl-right-inline\">'\n 'A. Z. Jones, <span>“<span class=\"nocase\">The Basics of String'\n \" Theory</span>,”</span> 02-Mar-2019. [Online]. Available:\"\n ' <a href=\"https://www.thoughtco.com/'\n 'what-is-string-theory-2699363\">'\n \"https://www.thoughtco.com/what-is-string-theory-2699363</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-siegel2015\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[4]'\n ' </div><div class=\"csl-right-inline\">'\n \"E. Siegel, <span>“<span>Why String Theory Is Not A Scientific\"\n \" Theory</span>,”</span> 23-Dec-2015. [Online]. Available:\"\n \" <a\"\n ' href=\"https://www.forbes.com/sites/'\n \"startswithabang/2015/12/23/\"\n 'why-string-theory-is-not-science/\">https://www.forbes.com/'\n \"sites/startswithabang/2015/12/23/\"\n \"why-string-theory-is-not-science/</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-castelvecchi2016\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[5]'\n ' </div><div class=\"csl-right-inline\">'\n 'D. Castelvecchi, <span>“<span class=\"nocase\">'\n \"Feuding physicists turn\"\n \" to philosophy for help</span>. String theory is at the\"\n \" heart of a debate over the integrity of the scientific\"\n \" method itself,”</span> 05-Jan-2016. [Online]. Available:\"\n ' <a href=\"https://www.nature.com/news/'\n 'feuding-physicists-turn-to-philosophy-for-help-1.19076\">'\n \"https://www.nature.com/news/\"\n \"feuding-physicists-turn-to-philosophy-for-help-1.19076</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-alves2017\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[6] </div>'\n '<div class=\"csl-right-inline\">'\n 'R. A. Batista and J. Primack, <span>“<span class=\"nocase\">'\n \"Is String theory falsifiable?</span>. Can a theory that isn’t\"\n \" completely testable still be useful to physics?”</span>\"\n \" [Online].\"\n ' Available: <a href=\"https://metafact.io/factchecks/'\n '30-is-string-theory-falsifiable\">'\n \"https://metafact.io/factchecks/\"\n \"30-is-string-theory-falsifiable</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-francis2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[7]'\n ' </div><div class=\"csl-right-inline\">'\n 'M. R. Francis, <span>“<span class=\"nocase\">Falsifiability and'\n \" physics</span>. Can a theory that isn’t completely testable\"\n \" still be useful to physics?”</span> 23-Apr-2019.\"\n \" [Online]. Available:\"\n ' <a href=\"https://www.scientificamerican.com/'\n 'article/is-string-theory-science/\">'\n \"https://www.scientificamerican.com/article/is-\"\n \"string-theory-science/</a>. [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n \"</div>\\n\"\n ),\n output,\n )\n\n self.assertEqual(\"Valid Content With Citation\", str(metadata[\"title\"]))\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))\n self.assertEqual(\n (\n '<nav class=\"toc\" role=\"doc-toc\">\\n'\n \"<ul>\\n\"\n '<li><a href=\"#string-theory\">String Theory</a></li>\\n'\n '<li><a href=\"#bibliography\">References</a></li>\\n'\n \"</ul>\\n</nav>\\n\"\n ),\n str(metadata[\"toc\"]),\n )", "def test_citations_and_toc(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS + [\"+citations\"],\n PANDOC_ARGS=PANDOC_ARGS\n + [\n \"--toc\",\n \"-C\",\n \"--csl=https://www.zotero.org/styles/ieee-with-url\",\n \"--metadata=link-citations:false\",\n \"--metadata=reference-section-title:References\",\n ],\n FORMATTED_FIELDS=FORMATTED_FIELDS,\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_citation.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n self.maxDiff = None\n\n self.assertEqual(\n (\n '<h2 id=\"string-theory\">String Theory</h2>\\n'\n \"<p>But this foundational principle of science has\"\n \" now been called into question by\"\n ' <a href=\"https://www.britannica.com/science/'\n 'string-theory\">String Theory</a>,'\n \" which is a relative newcomer to theoretical physics, but one\"\n \" that has captured the common imagination, judging by\"\n \" the popular explanations that abound on the Web\"\n ' <span class=\"citation\" data-cites=\"mann2019 wood2019'\n ' jones2020\">[1]–[3]</span>.'\n \" And whether string theory is or is not science, Popper\"\n \" notwithstanding, is an issue that is still up for debate\"\n \" <span\"\n ' class=\"citation\" data-cites=\"siegel2015 castelvecchi2016'\n ' alves2017 francis2019\">[4]–[7]</span>.</p>\\n'\n '<h1 class=\"unnumbered\" id=\"bibliography\">References</h1>\\n'\n '<div id=\"refs\" class=\"references csl-bib-body\"'\n ' role=\"doc-bibliography\">\\n'\n '<div id=\"ref-mann2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[1]'\n ' </div><div class=\"csl-right-inline\">A. Mann,'\n \" <span>“<span>What Is String Theory?</span>”</span>\"\n \" 20-Mar-2019. [Online].\"\n ' Available: <a href=\"https://www.livescience.com/'\n '65033-what-is-string-theory.html\">'\n \"https://www.livescience.com/\"\n \"65033-what-is-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-wood2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[2] </div>'\n '<div class=\"csl-right-inline\">'\n \"C. Wood, <span>“<span>What Is String Theory?</span>.\"\n \" Reference article:\"\n \" A simplified explanation and brief history of string\"\n \" theory,”</span> 11-Jul-2019.\"\n ' [Online]. Available: <a href=\"https://www.space.com/'\n '17594-string-theory.html\">'\n \"https://www.space.com/17594-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-jones2020\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[3]'\n ' </div><div class=\"csl-right-inline\">'\n 'A. Z. Jones, <span>“<span class=\"nocase\">The Basics of String'\n \" Theory</span>,”</span> 02-Mar-2019. [Online]. Available:\"\n ' <a href=\"https://www.thoughtco.com/'\n 'what-is-string-theory-2699363\">'\n \"https://www.thoughtco.com/what-is-string-theory-2699363</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-siegel2015\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[4]'\n ' </div><div class=\"csl-right-inline\">'\n \"E. Siegel, <span>“<span>Why String Theory Is Not A Scientific\"\n \" Theory</span>,”</span> 23-Dec-2015. [Online]. Available:\"\n \" <a\"\n ' href=\"https://www.forbes.com/sites/'\n \"startswithabang/2015/12/23/\"\n 'why-string-theory-is-not-science/\">https://www.forbes.com/'\n \"sites/startswithabang/2015/12/23/\"\n \"why-string-theory-is-not-science/</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-castelvecchi2016\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[5]'\n ' </div><div class=\"csl-right-inline\">'\n 'D. Castelvecchi, <span>“<span class=\"nocase\">'\n \"Feuding physicists turn\"\n \" to philosophy for help</span>. String theory is at the\"\n \" heart of a debate over the integrity of the scientific\"\n \" method itself,”</span> 05-Jan-2016. [Online]. Available:\"\n ' <a href=\"https://www.nature.com/news/'\n 'feuding-physicists-turn-to-philosophy-for-help-1.19076\">'\n \"https://www.nature.com/news/\"\n \"feuding-physicists-turn-to-philosophy-for-help-1.19076</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-alves2017\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[6] </div>'\n '<div class=\"csl-right-inline\">'\n 'R. A. Batista and J. Primack, <span>“<span class=\"nocase\">'\n \"Is String theory falsifiable?</span>. Can a theory that isn’t\"\n \" completely testable still be useful to physics?”</span>\"\n \" [Online].\"\n ' Available: <a href=\"https://metafact.io/factchecks/'\n '30-is-string-theory-falsifiable\">'\n \"https://metafact.io/factchecks/\"\n \"30-is-string-theory-falsifiable</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-francis2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[7]'\n ' </div><div class=\"csl-right-inline\">'\n 'M. R. Francis, <span>“<span class=\"nocase\">Falsifiability and'\n \" physics</span>. Can a theory that isn’t completely testable\"\n \" still be useful to physics?”</span> 23-Apr-2019.\"\n \" [Online]. Available:\"\n ' <a href=\"https://www.scientificamerican.com/'\n 'article/is-string-theory-science/\">'\n \"https://www.scientificamerican.com/article/is-\"\n \"string-theory-science/</a>. [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n \"</div>\\n\"\n ),\n output,\n )\n\n self.assertEqual(\"Valid Content With Citation\", str(metadata[\"title\"]))\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))\n self.assertEqual(\n (\n '<nav class=\"toc\" role=\"doc-toc\">\\n'\n \"<ul>\\n\"\n '<li><a href=\"#string-theory\">String Theory</a></li>\\n'\n '<li><a href=\"#bibliography\">References</a></li>\\n'\n \"</ul>\\n</nav>\\n\"\n ),\n str(metadata[\"toc\"]),\n )\n self.assertEqual(\n (\n \"<p>But this foundational principle of science has now been\"\n \" called into question by\"\n ' <a href=\"https://www.britannica.com/science/string-theory\">'\n \"String Theory</a>.</p>\\n\"\n ),\n str(metadata[\"summary\"]),\n )", "def test_html_metadata_dc_case() -> None:\n\n snippet = \"\"\"\n <html>\n <head>\n <meta name=\"DC.Citation.Issue\" content=\"123\"/>\n </head>\n <body>Hi.</body>\n </html>\"\"\"\n\n meta = html_extract_biblio(\"http://example.org\", HTMLParser(snippet))\n assert meta is not None\n assert meta.issue == \"123\"", "def citeproc(style, reference_list):\n pass", "def get_clinical_strat(params: Parameters) -> Stratification:\n clinical_strat = Stratification(\"clinical\", CLINICAL_STRATA, INFECTIOUS_COMPARTMENTS)\n clinical_params = params.clinical_stratification\n country = params.country\n pop = params.population\n\n \"\"\"\n Infectiousness adjustments for clinical stratification\n \"\"\"\n # Add infectiousness reduction multiplier for all non-symptomatic infectious people.\n # These people are less infectious because of biology.\n non_sympt_adjust = Overwrite(clinical_params.non_sympt_infect_multiplier)\n clinical_strat.add_infectiousness_adjustments(\n Compartment.LATE_EXPOSED,\n {\n Clinical.NON_SYMPT: non_sympt_adjust,\n Clinical.SYMPT_NON_HOSPITAL: None,\n Clinical.SYMPT_ISOLATE: None,\n Clinical.HOSPITAL_NON_ICU: None,\n Clinical.ICU: None,\n },\n )\n clinical_strat.add_infectiousness_adjustments(\n Compartment.EARLY_ACTIVE,\n {\n Clinical.NON_SYMPT: non_sympt_adjust,\n Clinical.SYMPT_NON_HOSPITAL: None,\n Clinical.SYMPT_ISOLATE: None,\n Clinical.HOSPITAL_NON_ICU: None,\n Clinical.ICU: None,\n },\n )\n # Add infectiousness reduction for people who are late active and in isolation or hospital/icu.\n # These people are less infectious because of physical distancing/isolation/PPE precautions.\n late_infect_multiplier = clinical_params.late_infect_multiplier\n clinical_strat.add_infectiousness_adjustments(\n Compartment.LATE_ACTIVE,\n {\n Clinical.NON_SYMPT: non_sympt_adjust,\n Clinical.SYMPT_ISOLATE: Overwrite(late_infect_multiplier[Clinical.SYMPT_ISOLATE]),\n Clinical.SYMPT_NON_HOSPITAL: None,\n Clinical.HOSPITAL_NON_ICU: Overwrite(late_infect_multiplier[Clinical.HOSPITAL_NON_ICU]),\n Clinical.ICU: Overwrite(late_infect_multiplier[Clinical.ICU]),\n },\n )\n\n \"\"\"\n Adjust infection death rates for hospital patients (ICU and non-ICU)\n \"\"\"\n symptomatic_adjuster = params.clinical_stratification.props.symptomatic.multiplier\n hospital_adjuster = params.clinical_stratification.props.hospital.multiplier\n ifr_adjuster = params.infection_fatality.multiplier\n\n # Get all the adjustments in the same way as we will do if the immunity stratification is implemented\n entry_adjustments, death_adjs, progress_adjs, recovery_adjs, _, _ = get_all_adjs(\n clinical_params,\n country,\n pop,\n params.infection_fatality.props,\n params.sojourn,\n params.testing_to_detection,\n params.case_detection,\n ifr_adjuster,\n symptomatic_adjuster,\n hospital_adjuster,\n )\n\n # Assign all the adjustments to the model\n for agegroup in AGEGROUP_STRATA:\n source = {\"agegroup\": agegroup}\n clinical_strat.add_flow_adjustments(\n \"infect_onset\", entry_adjustments[agegroup], source_strata=source\n )\n clinical_strat.add_flow_adjustments(\n \"infect_death\", death_adjs[agegroup], source_strata=source\n )\n clinical_strat.add_flow_adjustments(\n \"progress\",\n progress_adjs,\n source_strata=source,\n )\n clinical_strat.add_flow_adjustments(\n \"recovery\",\n recovery_adjs[agegroup],\n source_strata=source,\n )\n\n return clinical_strat", "def get_meta_information() -> Dict:\n return {'name': 'NAS-Bench-201',\n 'references': ['Xuanyi Dong, Yi Yang',\n 'NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search',\n 'https://openreview.net/forum?id=HJxyZkBKDr',\n 'https://github.com/D-X-Y/AutoDL-Projects'],\n }", "def test_citations_and_toc_with_valid_defaults(self):\n\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH,\n \"valid_defaults_with_toc_and_citations.yaml\",\n )\n ]\n\n settings = get_settings(\n PANDOC_DEFAULT_FILES=pandoc_default_files,\n FORMATTED_FIELDS=FORMATTED_FIELDS,\n )\n pandoc_reader = PandocReader(settings)\n\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_citation.md\"\n )\n output, metadata = pandoc_reader.read(source_path)\n self.maxDiff = None # pylint: disable=invalid-name\n\n self.assertEqual(\n (\n '<h2 id=\"string-theory\">String Theory</h2>\\n'\n \"<p>But this foundational principle of science has\"\n \" now been called into question by\"\n ' <a href=\"https://www.britannica.com/science/'\n 'string-theory\">String Theory</a>,'\n \" which is a relative newcomer to theoretical physics, but one\"\n \" that has captured the common imagination, judging by\"\n \" the popular explanations that abound on the Web\"\n ' <span class=\"citation\" data-cites=\"mann2019 wood2019'\n ' jones2020\">[1]–[3]</span>.'\n \" And whether string theory is or is not science, Popper\"\n \" notwithstanding, is an issue that is still up for debate\"\n \" <span\"\n ' class=\"citation\" data-cites=\"siegel2015 castelvecchi2016'\n ' alves2017 francis2019\">[4]–[7]</span>.</p>\\n'\n '<h1 class=\"unnumbered\" id=\"bibliography\">References</h1>\\n'\n '<div id=\"refs\" class=\"references csl-bib-body\"'\n ' role=\"doc-bibliography\">\\n'\n '<div id=\"ref-mann2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[1]'\n ' </div><div class=\"csl-right-inline\">A. Mann,'\n \" <span>“<span>What Is String Theory?</span>”</span>\"\n \" 20-Mar-2019. [Online].\"\n ' Available: <a href=\"https://www.livescience.com/'\n '65033-what-is-string-theory.html\">'\n \"https://www.livescience.com/\"\n \"65033-what-is-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-wood2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[2] </div>'\n '<div class=\"csl-right-inline\">'\n \"C. Wood, <span>“<span>What Is String Theory?</span>.\"\n \" Reference article:\"\n \" A simplified explanation and brief history of string\"\n \" theory,”</span> 11-Jul-2019.\"\n ' [Online]. Available: <a href=\"https://www.space.com/'\n '17594-string-theory.html\">'\n \"https://www.space.com/17594-string-theory.html</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-jones2020\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[3]'\n ' </div><div class=\"csl-right-inline\">'\n 'A. Z. Jones, <span>“<span class=\"nocase\">The Basics of String'\n \" Theory</span>,”</span> 02-Mar-2019. [Online]. Available:\"\n ' <a href=\"https://www.thoughtco.com/'\n 'what-is-string-theory-2699363\">'\n \"https://www.thoughtco.com/what-is-string-theory-2699363</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-siegel2015\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[4]'\n ' </div><div class=\"csl-right-inline\">'\n \"E. Siegel, <span>“<span>Why String Theory Is Not A Scientific\"\n \" Theory</span>,”</span> 23-Dec-2015. [Online]. Available:\"\n \" <a\"\n ' href=\"https://www.forbes.com/sites/'\n \"startswithabang/2015/12/23/\"\n 'why-string-theory-is-not-science/\">https://www.forbes.com/'\n \"sites/startswithabang/2015/12/23/\"\n \"why-string-theory-is-not-science/</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-castelvecchi2016\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[5]'\n ' </div><div class=\"csl-right-inline\">'\n 'D. Castelvecchi, <span>“<span class=\"nocase\">'\n \"Feuding physicists turn\"\n \" to philosophy for help</span>. String theory is at the\"\n \" heart of a debate over the integrity of the scientific\"\n \" method itself,”</span> 05-Jan-2016. [Online]. Available:\"\n ' <a href=\"https://www.nature.com/news/'\n 'feuding-physicists-turn-to-philosophy-for-help-1.19076\">'\n \"https://www.nature.com/news/\"\n \"feuding-physicists-turn-to-philosophy-for-help-1.19076</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-alves2017\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[6] </div>'\n '<div class=\"csl-right-inline\">'\n 'R. A. Batista and J. Primack, <span>“<span class=\"nocase\">'\n \"Is String theory falsifiable?</span>. Can a theory that isn’t\"\n \" completely testable still be useful to physics?”</span>\"\n \" [Online].\"\n ' Available: <a href=\"https://metafact.io/factchecks/'\n '30-is-string-theory-falsifiable\">'\n \"https://metafact.io/factchecks/\"\n \"30-is-string-theory-falsifiable</a>.\"\n \" [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n '<div id=\"ref-francis2019\" class=\"csl-entry\"'\n ' role=\"doc-biblioentry\">\\n'\n '<div class=\"csl-left-margin\">[7]'\n ' </div><div class=\"csl-right-inline\">'\n 'M. R. Francis, <span>“<span class=\"nocase\">Falsifiability and'\n \" physics</span>. Can a theory that isn’t completely testable\"\n \" still be useful to physics?”</span> 23-Apr-2019.\"\n \" [Online]. Available:\"\n ' <a href=\"https://www.scientificamerican.com/'\n 'article/is-string-theory-science/\">'\n \"https://www.scientificamerican.com/article/is-\"\n \"string-theory-science/</a>. [Accessed: 12-Nov-2020]</div>\\n\"\n \"</div>\\n\"\n \"</div>\\n\"\n ),\n output,\n )\n\n self.assertEqual(\"Valid Content With Citation\", str(metadata[\"title\"]))\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))\n self.assertEqual(\n (\n '<nav class=\"toc\" role=\"doc-toc\">\\n'\n \"<ul>\\n\"\n '<li><a href=\"#string-theory\">String Theory</a></li>\\n'\n '<li><a href=\"#bibliography\">References</a></li>\\n'\n \"</ul>\\n</nav>\\n\"\n ),\n str(metadata[\"toc\"]),\n )", "def fill(self):\n if self.source == 'citations':\n url = self._scholarly.URLS(\"CITATIONPUB\").format(self.id_citations)\n soup = self._scholarly._get_soup(\n self._scholarly.URLS('HOST').format(url))\n self.bib['title'] = soup.find('div', id='gsc_vcd_title').text\n\n if soup.find('a', class_='gsc_vcd_title_link'):\n self.bib['url'] = soup.find(\n 'a', class_='gsc_vcd_title_link')['href']\n\n for item in soup.find_all('div', class_='gs_scl'):\n key = item.find(class_='gsc_vcd_field').text\n val = item.find(class_='gsc_vcd_value')\n if key == 'Authors':\n self.bib['author'] = ' and '.join(self.get_authorlist(val))\n elif key == 'Journal':\n self.bib['journal'] = val.text\n elif key == 'Volume':\n self.bib['volume'] = val.text\n elif key == 'Issue':\n self.bib['number'] = val.text\n elif key == 'Pages':\n self.bib['pages'] = val.text\n elif key == 'Publisher':\n self.bib['publisher'] = val.text\n elif key == 'Publication date':\n self.bib['year'] = arrow.get(val.text).year\n elif key == 'Description':\n if val.text[0:8].lower() == 'abstract':\n val = val.text[9:].strip()\n self.bib['abstract'] = val\n elif key == 'Total citations':\n self.id_scholarcitedby = re.findall(\n self._scholarly.URLS('SCHOLARPUBRE'), val.a['href'])[0]\n\n # number of citation per year\n years = [int(y.text) for y in soup.find_all(class_='gsc_vcd_g_t')]\n cites = [int(c.text) for c in soup.find_all(class_='gsc_vcd_g_al')]\n self.cites_per_year = dict(zip(years, cites))\n\n if soup.find('div', class_='gsc_vcd_title_ggi'):\n self.bib['eprint'] = soup.find(\n 'div', class_='gsc_vcd_title_ggi').a['href']\n self._filled = True\n\n elif self.source == 'scholar':\n self.bib['add_to_lib'] = self.url_add_sclib\n\n try:\n bibtex = self._scholarly._get_soup(self.url_scholarbib)\n bibtex = bibtex.find('pre').string\n self.bib.update(bibtexparser.loads(bibtex).entries[0])\n self.bib['author_count'] = str(\n len(self.bib['author'].split('and')))\n self.bib['age'] = str(\n int(date.today().year) - int(self.bib['year']))\n except:\n # did not find year\n pass\n\n self._filled = True\n return self", "def test_check_canonical_styles(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n self.assertIn(font_metadata.style, self.CANONICAL_STYLE_VALUES)\n if self.is_italic(font_metadata):\n if font_metadata.style != 'italic':\n _ = \"%s: The font style is %s but it should be italic\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))\n else:\n if font_metadata.style != 'normal':\n _ = \"%s: The font style is %s but it should be normal\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))", "def get_resource_details (self):\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]\")", "def to_spec(self) -> dict[str, typing.Any]:\n spec = {\n \"name\": self.name,\n \"title\": self.title,\n \"comment\": self.comment,\n \"references\": self.references,\n \"institution\": self.institution,\n \"hierarchical\": self.hierarchical,\n \"last_update\": self.last_update.isoformat(),\n }\n if self.version is not None:\n spec[\"version\"] = self.version\n categories = {}\n for cat in self.values():\n code, cat_spec = cat.to_spec()\n categories[code] = cat_spec\n spec[\"categories\"] = categories\n\n return spec" ]
[ "0.6560424", "0.63728225", "0.6298377", "0.62302023", "0.6112516", "0.5896945", "0.5736574", "0.57289135", "0.5602861", "0.5523079", "0.55127895", "0.5486311", "0.5477174", "0.5460558", "0.5376419", "0.5359614", "0.5359592", "0.53399044", "0.52923423", "0.52919734", "0.5280967", "0.5261026", "0.5254781", "0.5236804", "0.5228463", "0.5206179", "0.5206015", "0.51969373", "0.51679313", "0.5112143" ]
0.7456293
0
Checks the rules for each registered style see the checked program.
def test_all_styles() -> None: with tempfile.NamedTemporaryFile(suffix='.f90', mode='wt') as handle: print('module teapot\nend module teapot\n', file=handle) handle.seek(0) styles = [_StyleHarness(), _StyleHarness()] unit_under_test = CheckEngine(styles) unit_under_test.check(Path(handle.name)) assert [program.get_text() for program in styles[0].seen] \ == ['module teapot\nend module teapot\n\n'] assert [program.get_text() for program in styles[1].seen] \ == ['module teapot\nend module teapot\n\n']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def style(command, checkonly=False):\n black(command, checkonly=checkonly)\n isort(command, checkonly=checkonly)\n lint(command)\n # Only prints if doesn't exit from the above not failing out\n print(\n \"\"\"\nAll Style Checks Passed Successfully\n====================================\n\"\"\"\n )", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def warning_check(self, rule_to_check, valid):\r\n for warning in self.warning_functions:\r\n warning(rule_to_check, valid)", "def check_custom_rules(self):\n if self.custom_rules:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n passed_custom, override = self.custom_rules(self.results.linter.stats, self.fname)\n if not passed_custom:\n self.logging.warning(\"{} FAILED CUSTOM CHECKS\".format(self.fname))\n self.custom_failed.append(self.fname)\n return passed_custom, override\n return False, False", "def _check_styles(self, dataset):\n # list of column ids to exclude from plotting\n exclude_cols = [\"sample_name\", \"patient_id\", \"title\"]\n\n # check column styles\n if dataset['styles']['columns']['color'] == []:\n if dataset['metadata']['columns'] != '':\n # load metadata\n mdat = load_data(dataset['metadata']['columns'])\n\n # exclude known uninformative columns\n cols_to_drop = [x for x in exclude_cols if x in mdat.columns]\n\n if len(cols_to_drop) > 0:\n mdat = mdat.drop(cols_to_drop, axis=1)\n\n # set default columns to use for plotting\n dataset['styles']['columns']['color'] = mdat.columns[mdat.nunique() > 1].tolist()\n\n # check row styles\n if dataset['styles']['rows']['color'] == []:\n if dataset['metadata']['rows'] != '':\n mdat = load_data(dataset['metadata']['rows'])\n dataset['styles']['rows']['color'] = mdat.columns[mdat.nunique() > 1].tolist()", "def run_check(self, ctx: RunContext): # noqa\n params = ctx.get_params(\"pycodestyle\")\n options = ctx.options\n if options:\n params.setdefault(\"max_line_length\", options.max_line_length)\n\n if params:\n parser = get_parser()\n for option in parser.option_list:\n if option.dest and option.dest in params:\n value = params[option.dest]\n if isinstance(value, str):\n params[option.dest] = option.convert_value(option, value)\n\n style = StyleGuide(reporter=_PycodestyleReport, **params)\n options = style.options\n options.report.ctx = ctx # type: ignore\n checker = Checker(ctx.filename, lines=ctx.lines, options=options)\n checker.check_all()", "def _verify_rules(rules, source_type, item_name, item_link, step_links):\n global retval\n\n for rule_data in rules:\n try:\n rule = toto.models.matchrule.Matchrule.read(rule_data)\n rule.source_type = source_type\n log.doing(\"'%s' - '%s' - verify %s matchrule - %s\" \\\n % (layout_path, item_name, source_type, rule_data))\n rule.verify_rule(item_link, step_links)\n\n except toto.models.matchrule.RuleVerficationFailed, e:\n log.failing(\"'%s' - '%s' - verify %s matchrule - %s\" \\\n % (layout_path, item_name, source_type, e))\n retval = 1\n except Exception, e:\n log.error(\"in verify matchrule - %s\" % e)\n else:\n log.passing(\"'%s' - '%s' - verify %s matchrule\" \\\n % (layout_path, item_name, source_type))", "def check(self):\r\n for action in self._actions:\r\n action.check()", "def update_known_styles_state(app: sphinx.application.Sphinx) -> None:\n global _KNOWN_STYLES_IN_USE\n\n _KNOWN_STYLES_IN_USE = {\n \"light\": _get_light_style(app),\n \"dark\": _get_dark_style(app),\n }", "def run_check(self, path):\n\n result = pycodestyle.StyleGuide().check_files(paths=[path])\n\n if result.total_errors != 0:\n self.assertEqual(\n result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def check_all(c):", "def checks(self):\r\n return checks.Checks(self)", "def test_all_rules_are_tested():\n tested_rules = defaultdict(list)\n for test in TESTS:\n cls = test[0]\n rule = test[1]\n tested_rules[cls].append(rule)\n for cls in tested_rules.keys():\n if hasattr(cls, '_binary_rules'):\n rules = set(cls._binary_rules.keys())\n elif hasattr(cls, '_rules'):\n rules = set(cls._rules.keys())\n assert set(tested_rules[cls]) == rules", "def valid(self):\n\t\tfor k, v in self.rules.items():\n\t\t\tfor i in v:\n\t\t\t\tif any([self.valid_rule_1(i), self.valid_rule_2(i), self.valid_rule_3(k, i)]):\n\t\t\t\t\t# print(\"Got a pass\")\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t# print(\"Got a fail\")\n\t\t\t\t\treturn False\n\t\t# print(\"CORRECT CFG\")\n\t\treturn True", "def check_color_scoping(self):\n for mapping in self.OrderedColorMappings:\n if mapping.token.text not in self.ColorDefinitions:\n raise Exception(\"%d:%d Color %s is never defined\" % (mapping.token.line, mapping.token.col, mapping.token.text))", "def check_rules(self):\n conditions = ('element', 'name', 'type')\n metric = set(['type', 'value'])\n \n elements = {}\n for rule in self.rules:\n if not isinstance(rule, dict):\n self.fail(msg='Rule format is not type dict: %s, type: %s'\n % (rule, type(rule)))\n if 'name' not in rule:\n self.fail(msg='Name is a required field for all rules')\n if 'match_condition' in rule:\n if not isinstance(rule['match_condition'], list):\n self.fail(msg='Match condition is expected to be a list')\n\n for match in rule['match_condition']:\n if match.get('type') == 'metric':\n if set(match.keys()) ^ metric:\n self.fail(msg='Metric definition can only have '\n 'values: %s, given: %s' % (list(metric), match))\n continue\n # Validate fields in condition\n for field in conditions:\n if field not in match:\n self.fail(msg='Match condition is missing a required '\n 'key: %r ,required: %s' % (match, list(conditions)))\n if field == 'element' and match.get(field) not in access_list:\n self.fail(msg='Match condition element is not valid: %s, '\n 'valid types: %s' % (match.get(field), list(access_list)))\n elif field == 'type' and match[field] not in match_conditions:\n self.fail(msg='Match condition type is not valid: %s, '\n 'valid types: %s' % (match[field], list(match_conditions)))\n \n element = match.get('element')\n # peer_address can only be type engine or external_bgp_peer\n if match['type'] == 'peer_address' and element not \\\n in ('engine', 'external_bgp_peer'):\n self.fail(msg='A peer address element can only be of type '\n 'engine or external_bgp_peer, provided definition: %s' % match)\n elif match['type'] == 'next_hop' and ('prefix_list' not in \\\n element and 'access_list' not in element):\n self.fail(msg='A next hop definition must be either an access '\n 'list or prefix list type, provided defintion: %s' % match)\n \n if 'engine' in element:\n element = 'single_fw,fw_cluster,virtual_fw'\n elements.setdefault(\n element, set([])).add(match.get('name'))\n \n return [elements] if elements else []", "def scan_system_css():\r\n pass", "def styles_formatting(styles):\n for index, style in enumerate(styles):\n try:\n parse_style(style)\n except EmptyLineError:\n continue\n except NotAValidCssClassError:\n raise Invalid(\"Style %i does not have a valid CSS class: %s\" % (index + 1, style))\n except:\n raise Invalid(\"Style %i is not correctly formatted: %s\" % (index + 1, style))\n return True", "def RunChecks(self):\n results = []\n\n affected_files = self.input_api.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n affected_js_files = filter(\n lambda f: f.LocalPath().endswith('.js'), affected_files)\n\n if affected_js_files:\n self.input_api.logging.info(\n 'Running appengine eslint on %d JS file(s)', len(affected_js_files))\n results += self.RunESLintChecks(affected_js_files)\n\n\n if results:\n results.append(self.output_api.PresubmitNotifyResult(\n 'See the JavaScript style guide at https://goo.gl/Ld1CqR.'))\n\n return results", "def validate(self):\r\n\r\n for sprite in self.sprites:\r\n sprite.validate()", "def hrules(self):\n ...", "def parseRules(self, filename):\n\n try:\n with open(filename) as fd:\n\n lineNo = 1\n \n for line in fd:\n if '#' in line:\n line = line.split('#', 1)[0]\n if line.strip() != '':\n try:\n rule = self.parseRule(line, lineNo)\n if rule != None:\n self._rules.append(rule)\n else:\n eprint(\"Error in line {0} of rule file '{1}' ignoring rule:\\n{2}\\n\".format(lineNo, filename, line), end='')\n \n except Exception as ex:\n eprint(\"Error in line {0} of rule file '{1}': {2} ignoring rule:\\n{3}\\n\".format(lineNo, filename, ex, line), end='')\n lineNo = lineNo + 1\n except:\n # error opening file\n eprint(\"Error in parseRules: traceback info: {0}\".format(traceback.format_exc()))", "def update_rules():\n update_all_rules()\n return \"OK\"", "def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()", "def rules():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n \r\n document.add_heading('System Rules', level=1)\r\n document.add_heading('Rule Action Definition', level=2)\r\n paragraph = document.add_paragraph('The action on a rule is calculated by summing the selected permissions (e.g 3 = Create + Read.)')\r\n actions = [1, 2, 4, 8, 16, 32, 64, 128, 256]\r\n action_header = ['create', 'read', 'update', 'delete',\r\n 'export', 'publish', 'change owner',\r\n 'change role', 'export data']\r\n table = document.add_table(rows=2, cols=len(action_header))\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n for item in range(0, len(action_header)):\r\n row.cells[item].text = action_header[item]\r\n row = table.rows[1]\r\n row.cells[0].text = str(actions[0])\r\n row.cells[1].text = str(actions[1])\r\n row.cells[2].text = str(actions[2])\r\n row.cells[3].text = str(actions[3])\r\n row.cells[4].text = str(actions[4])\r\n row.cells[5].text = str(actions[5])\r\n row.cells[6].text = str(actions[6])\r\n row.cells[7].text = str(actions[7])\r\n row.cells[8].text = str(actions[8])\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('Default rules', level=2)\r\n systemrules = get_qlik_sense.get_systemrules('Default')\r\n num_of_systemrules = len(systemrules)\r\n table = document.add_table(rows=num_of_systemrules+1, cols=5)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'rule'\r\n row.cells[2].text = 'resource filter'\r\n row.cells[3].text = 'actions'\r\n row.cells[4].text = 'disabled'\r\n for rule in range(num_of_systemrules):\r\n row = table.rows[rule+1]\r\n row.cells[0].text = str(systemrules[rule][0])\r\n row.cells[1].text = str(systemrules[rule][1])\r\n row.cells[2].text = str(systemrules[rule][2])\r\n row.cells[3].text = str(systemrules[rule][3])\r\n row.cells[4].text = str(systemrules[rule][4])\r\n\r\n document.add_page_break()\r\n document.add_heading('Custom rules', level=2)\r\n\r\n systemrules = get_qlik_sense.get_systemrules('Custom')\r\n num_of_systemrules = len(systemrules)\r\n table = document.add_table(rows=num_of_systemrules+1, cols=5)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'rule'\r\n row.cells[2].text = 'resource filter'\r\n row.cells[3].text = 'actions'\r\n row.cells[4].text = 'disabled'\r\n\r\n for rule in range(num_of_systemrules):\r\n row = table.rows[rule+1]\r\n row.cells[0].text = str(systemrules[rule][0])\r\n row.cells[1].text = str(systemrules[rule][1])\r\n row.cells[2].text = str(systemrules[rule][2])\r\n row.cells[3].text = str(systemrules[rule][3])\r\n row.cells[4].text = str(systemrules[rule][4])\r\n\r\n document.add_page_break()\r\n\r\n document.add_heading('Read only rules', level=2)\r\n\r\n systemrules = get_qlik_sense.get_systemrules('ReadOnly')\r\n num_of_systemrules = len(systemrules)\r\n table = document.add_table(rows=num_of_systemrules+1, cols=5)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'rule'\r\n row.cells[2].text = 'resource filter'\r\n row.cells[3].text = 'actions'\r\n row.cells[4].text = 'disabled'\r\n\r\n for rule in range(num_of_systemrules):\r\n row = table.rows[rule+1]\r\n row.cells[0].text = str(systemrules[rule][0])\r\n row.cells[1].text = str(systemrules[rule][1])\r\n row.cells[2].text = str(systemrules[rule][2])\r\n row.cells[3].text = str(systemrules[rule][3])\r\n row.cells[4].text = str(systemrules[rule][4])\r\n \r\n\r\n\r\n document.add_page_break()", "def test_conformance_tests(self):\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['tests'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def _apply_commit_rules(rules, commit):\n all_violations = []\n for rule in rules:\n violations = rule.validate(commit)\n if violations:\n all_violations.extend(violations)\n return all_violations" ]
[ "0.63099915", "0.62471724", "0.61242145", "0.6110035", "0.6045199", "0.5876749", "0.5856414", "0.57824194", "0.573222", "0.57175714", "0.5628307", "0.5610072", "0.55852497", "0.55773956", "0.5566077", "0.5560245", "0.55477786", "0.5546691", "0.5544556", "0.55175143", "0.55173016", "0.5507233", "0.54638743", "0.5447961", "0.54420173", "0.5432004", "0.5424714", "0.5417555", "0.5407851", "0.54023063" ]
0.63328475
0
Recursive function to determine all valid combinations of numbers in the 'values' array that sum to the 'desiredNum' target number. This is a combinatoric function that essentially checks combinations of values from the array 'values' to see if they sum to the 'desiredNum', so some changes have been made to only see each combination of values once to improve the algorithmic running time. Since this is an O(n!) combinatoric function, the running time grows rapidly with respect to n (which represents the size of the list of values) but it can also run quickly if there are few or no possible solutions. This function will run up to the recursive depth set by max_depth, so if this value is set to 5, there will be at most 5 numbers used in a set of numbers that sums to the desiredNum, and no more. There can be fewer numbers used in any set, however.
def recursiveSums(desiredNum, values, depth=0, max_depth=5): depth+=1 if(depth>max_depth): return if(len(values)==1): if(values[0]==desiredNum): return values[0] else: arr = [] removals = [] for i, value in enumerate(values): thisDesiredNum = desiredNum-value if(thisDesiredNum==0): arr.append(value) elif(thisDesiredNum>0): #quick fix prevents double counting here newValues = [l for l in values if(l not in removals)] newValues.pop(newValues.index(value)) arr.append([value]) if(len(newValues)!=0 and sum(newValues)>=thisDesiredNum): newSums = recursiveSums(thisDesiredNum, newValues, depth, max_depth) if(newSums): if(isinstance(newSums, int)): arr.append([newSums]) else: arr[-1].extend(newSums) if(len(arr[-1])==0 or arr[-1]==[value]): arr.pop() removals.append(value) #remove unusable values iteratedValues = [value for value in values if(value not in removals)] if(iteratedValues): arr.append(recursiveSums(desiredNum, iteratedValues, depth, max_depth)) return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def challenge1(data, num_values=2, target_value=2020):\n data_array = [data for _ in range(num_values)]\n for values in product(*data_array):\n if np.sum(values) == target_value:\n print(\"Solution found.\")\n return np.product(values)", "def recursion(self, size, target, start):\n # Base case:\n if target == 0 and size == 0:\n return [[]]\n \n result = []\n for i in range(start, 10):\n if target - i >= 0 and size - 1 >= 0:\n all_but_i = self.recursion(size - 1, target - i, i + 1)\n # 1. all the combinations in `all_but_i` has size `size` - 1\n # 2. and sums up to `target` - `i`,\n # 3. and the minimum number in each combination is `i` + 1\n for combination in all_but_i:\n result.append([i] + combination)\n # inductive assumption maintained:\n \n # Adding back the number `i`,\n # 1. all the combinations in `result` has size `size`,\n # 2. and sums up to `target`\n # 3. and the minimum number in each combination is `i`\n return result", "def get_subsets(arr, n, value):\n result = []\n # return immediately if there is no possible subset in arr whose sum is equal to value\n if dp[n][value] == False:\n return\n \n queue = deque()\n queue.append(Pair(n, value, set()))\n\n while len(queue) > 0:\n pair = queue.popleft()\n if pair.i == 0 or pair.j == 0:\n result.append([arr[i] for i in pair.path_set])\n else:\n exclude = dp[pair.i - 1][pair.j]\n if exclude:\n queue.append(Pair(pair.i-1, pair.j, pair.path_set))\n\n if pair.j >= arr[pair.i-1]:\n include = dp[pair.i - 1][pair.j - arr[pair.i -1]]\n if include:\n b = pair.path_set.copy()\n b.add(pair.i - 1)\n queue.append(Pair(pair.i - 1, pair.j-arr[pair.i-1], b))\n \n return result", "def dynamic_programming_solution():\n num_combos = [1] * (TOTAL + 1)\n\n # ignore 1p coin since we are initializing num_combos with that value\n for coin in COINS[1:]:\n for total in range(TOTAL + 1):\n # if coin value is greater than total then num_combos cannot change\n if coin <= total: \n num_combos[total] += num_combos[total - coin]\n\n return num_combos[TOTAL]", "def combination_sum(candidates: list, target: int) -> list:\n path = [] # type: list[int]\n answer = [] # type: list[int]\n backtrack(candidates, path, answer, target, 0)\n return answer", "def solve_brute_force(n: int, W: int, weight: List[int], value: List[int]) -> int:\n mapped_items = [{\"w\": w, \"v\": v} for i, (w, v) in enumerate(zip(weight, value))]\n\n maximum_value: int = 0\n updated: bool = False\n for i in range(1, n + 1):\n if i > 1 and not updated:\n break\n\n updated = False\n for chosen_items in list(combinations(mapped_items, i)):\n sum_weight = 0\n sum_value = 0\n for item in chosen_items:\n sum_weight += item[\"w\"]\n sum_value += item[\"v\"]\n\n if sum_weight <= W and maximum_value < sum_value:\n updated = True\n maximum_value = sum_value\n return maximum_value", "def coding_problem_42(numbers, target):\n if target == 0:\n return []\n\n valid_numbers = [n for n in numbers if 0 < n <= target]\n for number in sorted(valid_numbers, reverse=True):\n\n remaining_numbers = copy(valid_numbers)\n remaining_numbers.remove(number)\n partial_sum = coding_problem_42(remaining_numbers, target - number)\n if partial_sum is not None:\n return [number] + partial_sum\n\n return None", "def all_change(cur: Currency, r: float, max_permutation=1000, only_cents=True) -> List[Union[int, List]]:\n\n # sub problem: combinations to return n < r with a set of coins s' subset of s\n # base case: combinations to return 0 = 1: empty list\n # characteristic equation: sum of combinations using that coin with combinations without using that coin.\n\n # introducing convenient notation working in cents\n m = cur.num_denominations()\n S = [_ for _ in cur.iter_denominations()] if only_cents else [_float2int(x) for x in cur.iter_denominations()]\n n = _float2int(r) if not only_cents else r\n\n # Construct table\n T = [[[0, []] for _ in range(m)] for _ in range(n + 1)]\n\n # Base case (value zero has just one solution: empty list)\n for i in range(m):\n T[0][i] = [1, [[0 for _ in range(m)]]]\n\n # Fill rest of the table entries\n for i in range(1, n + 1):\n for j in range(m):\n # Count of solutions including S[j]\n x = T[i - S[j]][j][0] if i - S[j] >= 0 else 0\n\n # get max_permutation permutation value\n if x > 0:\n k = 0\n while len(T[i][j][1]) < max_permutation and k < len(T[i - S[j]][j][1]):\n sol = T[i - S[j]][j][1][k]\n T[i][j][1].append(sol[:])\n T[i][j][1][-1][j] += 1\n k += 1\n\n # Count of solutions excluding S[j]\n y = T[i][j - 1][0] if j >= 1 else 0\n\n # get max_permutation permutation value\n if y > 0:\n k = 0\n while len(T[i][j][1]) < max_permutation and k < len(T[i][j - 1][1]):\n sol = T[i][j - 1][1][k]\n T[i][j][1].append(sol[:])\n k += 1\n\n # total count\n T[i][j][0] = x + y\n\n return T[-1][-1]", "def all_change_bottom_up(cur: Currency, r: float, max_permutation=1000, only_cents=True) -> List[Union[int, List]]:\n\n # sub problem: combinations to return n < r with a set of coins s' subset of s\n # base case: combinations to return 0 = 1: empty list\n # characteristic equation: sum of combinations using that coin with combinations without using that coin.\n #\n # Since it is expensive to save all solutions m times as many coins, the spatial complexity of m can be reduced by\n # using a bottom up method.\n\n # introducing convenient notation working in cents\n m = cur.num_denominations()\n n = _float2int(r) if not only_cents else r\n S = [_ for _ in cur.iter_denominations()] if only_cents else [_float2int(x) for x in cur.iter_denominations()]\n\n # Construct table\n T = [[0, []] for _ in range(n + 1)]\n\n # Base case (value zero has just one solution: empty list)\n T[0] = [1, [[0 for _ in range(m)]]]\n\n # Pick all coins one by one and update the T[] values after the index greater than or equal to the value of the\n # picked coin\n for i in range(0, m):\n for j in range(S[i], n + 1):\n # Get permutation number\n T[j][0] += T[j - S[i]][0]\n\n # get max_permutation permutation value\n k = 0\n while len(T[j][1]) < max_permutation and k < len(T[j - S[i]][1]):\n sol = T[j - S[i]][1][k]\n T[j][1].append(sol[:])\n T[j][1][-1][i] += 1\n k += 1\n\n return T[n]", "def find_combinations_of_coins(target, max_coin_amount):\n # Terminate conditions\n if max_coin_amount == 1:\n return 1\n if target == 0:\n return 1\n # If not terminating then partition by the max_coin_amount and reducing it to iteratively reduce the problem\n total_combinations = 0\n next_max_coin_amount = max([coin for coin in DENOMINATIONS if coin < max_coin_amount])\n for amount_of_max_coin in range(int(target / max_coin_amount) + 1):\n new_target = target - amount_of_max_coin * max_coin_amount\n total_combinations += find_combinations_of_coins(new_target, next_max_coin_amount)\n return total_combinations", "def combinations(graph, all_combs, all_costs, all_values, start, prev_cost, prev_value, prev_nodes):\n for ii in range(start, graph.size):\n # combination\n nodes = prev_nodes + [ii]\n all_combs.append(nodes)\n # cost\n cost = prev_cost + graph.node_weights[ii][0]\n all_costs.append(cost)\n # value\n value = prev_value + graph.node_weights[ii][1] - graph.node_weights[ii][0]\n for node in prev_nodes: # complementarity\n for adjacent in graph.graph[node]:\n if adjacent[0] == ii:\n value += adjacent[1]\n all_values.append(value)\n # recurse\n combinations(graph, all_combs, all_costs, all_values, ii+1, cost, value, nodes)", "def sum_in_list_dyn(number_list, total_value):\n memoization = [[False for i in range(total_value + 1)] for i in range(len(number_list) + 1)]\n\n for i in range(len(number_list) + 1):\n # We can always make 0 with the empty set.\n memoization[i][0] = True\n\n for i in range(1, len(number_list) + 1):\n for j in range(1, total_value + 1):\n if j < number_list[i - 1]:\n memoization[i][j] = memoization[i - 1][j]\n else:\n memoization[i][j] = memoization[i - 1][j] or memoization[i - 1][j - number_list[i - 1]]\n\n return memoization[len(number_list)][total_value]", "def solve(self, value, i=0, current=[]):\n if value == 0:\n temp = [i for i in current]\n temp1 = temp\n temp.sort()\n temp = tuple(temp)\n\n # Checks if the temporary sorted list is unique and if so append it\n # to the main combination sum list\n if temp not in self.unique:\n self.unique[temp] = 1\n self.fibonacciCombinationSum.append(temp1)\n pass\n elif value < 0:\n return\n else:\n pass\n\n for x in range(i, len(self.fibonacciTerms)):\n current.append(self.fibonacciTerms[x])\n self.solve(value - self.fibonacciTerms[x], i, current)\n current.pop(len(current) - 1)", "def minSums(numbers, num_sum):\n\n def generate_permutations(numbers):\n \"\"\" [string,] Given a string of numbers, generate all possible permutations\n of the numbers with plusses in between. \"1\" returns \"1\".\n \"11\" returns [\"11\", \"1+1\"], etc \"\"\"\n\n permutations = list()\n temp = list()\n # Generate all possible permutations of numbers and plusses, record\n # the number of plus signs as cost.\n for i, num in enumerate(numbers):\n # Base case, append the number and cost of 0\n if i == 0:\n permutations.append((num, 0))\n else:\n # Iterate through permutations, appending new items to temp.\n # Strings can be permutated two ways: string + char,\n # string + '+' + char\n for item in permutations:\n temp.append((item[0] + num, item[1]))\n temp.append((item[0] + '+' + num, item[1] + 1))\n # Now we move temp to permutations and clear out temp.\n permutations = temp\n temp = list()\n return permutations\n\n def clean_eval(exp):\n \"\"\" (int) Evaluate expression, ensuring we account for weirdness with\n leading zeros, etc. \"\"\"\n\n # Split expression using '+' as our split token\n number_string = exp.split(\"+\")\n total = int()\n\n # Cost each number string to int, cleaning up leading zeros, then total\n for num in number_string:\n total += int(num)\n\n return total\n\n # Create a dictionary of each permutations' sum and cost. Cost is defined as\n # the total number of additions necessary to generate the sum.\n value_costs = dict()\n\n # Iterate through permutations and populate values and their\n # respective costs into value_costs. If we find two equal values\n # with differing costs, use the lower cost.\n for perm in generate_permutations(numbers):\n value = clean_eval(perm[0])\n cost = perm[1]\n # Default the cost to 20 as an upper limit, given our number\n # will never cost more than 9 given 10 characters max.\n if value_costs.get(value, 20) > cost:\n value_costs[value] = cost\n\n return value_costs.get(num_sum, -1)", "def main():\n\n import sys\n sys.setrecursionlimit(10**7)\n from itertools import accumulate, combinations, permutations, product # https://docs.python.org/ja/3/library/itertools.html\n # accumulate() returns iterator! to get list: list(accumulate())\n from math import factorial, ceil, floor\n def factorize(n):\n \"\"\"return the factors of the Arg and count of each factor\n \n Args:\n n (long): number to be resolved into factors\n \n Returns:\n list of tuples: factorize(220) returns [(2, 2), (5, 1), (11, 1)]\n \"\"\"\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct\n def combinations_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n # TODO: How should I do when n - r is negative?\n if n < 0 or r < 0:\n raise Exception('combinations_count(n, r) not defined when n or r is negative')\n if n - r < r: r = n - r\n if r < 0: return 0\n if r == 0: return 1\n if r == 1: return n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n return result\n def combinations_with_replacement_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items allowing individual elements to be repeated more than once.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n if n < 0 or r < 0:\n raise Exception('combinations_with_replacement_count(n, r) not defined when n or r is negative')\n elif n == 0:\n return 1\n else:\n return combinations_count(n + r - 1, r)\n from bisect import bisect_left, bisect_right\n from collections import deque, Counter, defaultdict # https://docs.python.org/ja/3/library/collections.html#collections.deque\n from heapq import heapify, heappop, heappush, heappushpop, heapreplace,nlargest,nsmallest # https://docs.python.org/ja/3/library/heapq.html\n from copy import deepcopy, copy # https://docs.python.org/ja/3/library/copy.html\n from operator import itemgetter\n # ex1: List.sort(key=itemgetter(1))\n # ex2: sorted(tuples, key=itemgetter(1,2))\n from functools import reduce\n def chmin(x, y):\n \"\"\"change minimum\n if x > y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current minimum value\n y (long): potential minimum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x > y:\n x = y\n return (x, True)\n else:\n return (x, False)\n def chmax(x, y):\n \"\"\"change maximum\n if x < y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current maximum value\n y (long): potential maximum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x < y:\n x = y\n return (x, True)\n else:\n return (x, False)\n\n from fractions import gcd # Deprecated since version 3.5: Use math.gcd() instead.\n def gcds(numbers):\n return reduce(gcd, numbers)\n def lcm(x, y):\n return (x * y) // gcd(x, y)\n def lcms(numbers):\n return reduce(lcm, numbers, 1)\n\n # first create factorial_list\n # fac_list = mod_factorial_list(n)\n INF = 10 ** 18\n MOD = 10 ** 9 + 7\n modpow = lambda a, n, p = MOD: pow(a, n, p) # Recursive function in python is slow!\n def modinv(a, p = MOD):\n # evaluate reciprocal using Fermat's little theorem:\n # a**(p-1) is identical to 1 (mod p) when a and p is coprime\n return modpow(a, p-2, p)\n def modinv_list(n, p = MOD):\n if n <= 1:\n return [0,1][:n+1]\n else:\n inv_t = [0,1]\n for i in range(2, n+1):\n inv_t += [inv_t[p % i] * (p - int(p / i)) % p]\n return inv_t\n def modfactorial_list(n, p = MOD):\n if n == 0:\n return [1]\n else:\n l = [0] * (n+1)\n tmp = 1\n for i in range(1, n+1):\n tmp = tmp * i % p\n l[i] = tmp\n return l\n def modcomb(n, k, fac_list = [], p = MOD):\n # fac_list = modfactorial_list(100)\n # print(modcomb(100, 5, modfactorial_list(100)))\n from math import factorial\n if n < 0 or k < 0 or n < k: return 0\n if n == 0 or k == 0: return 1\n if len(fac_list) <= n:\n a = factorial(n) % p\n b = factorial(k) % p\n c = factorial(n-k) % p\n else:\n a = fac_list[n]\n b = fac_list[k]\n c = fac_list[n-k]\n return (a * modpow(b, p-2, p) * modpow(c, p-2, p)) % p\n def modadd(a, b, p = MOD):\n return (a + b) % MOD\n def modsub(a, b, p = MOD):\n return (a - b) % p\n def modmul(a, b, p = MOD):\n return ((a % p) * (b % p)) % p\n def moddiv(a, b, p = MOD):\n return modmul(a, modpow(b, p-2, p))\n\n \"\"\" initialize variables and set inputs\n # initialize variables\n # to initialize list, use [0] * n\n # to initialize two dimentional array, use [[0] * N for _ in range(N)]\n # set inputs\n # open(0).read() is a convenient method:\n # ex) n, m, *x = map(int, open(0).read().split())\n # min(x[::2]) - max(x[1::2])\n # ex2) *x, = map(int, open(0).read().split())\n # don't forget to add comma after *x if only one variable is used\n # preprocessing\n # transpose = [x for x in zip(*data)]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [(1, 4, 7), (2, 5, 8), (3, 6, 9)]\n # flat = [flatten for inner in data for flatten in inner]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # calculate and output\n # output pattern\n # ex1) print(*l) => when l = [2, 5, 6], printed 2 5 6\n \"\"\"\n\n # functions used\n r = lambda: sys.stdin.readline().strip()\n r_int = lambda: int(r())\n R = lambda: list(map(int, r().split()))\n Rfloat = lambda: list(map(float, r().split()))\n Rtuple = lambda: tuple(map(int, r().split()))\n Rmap = lambda: map(int, r().split())\n\n \"\"\" how to treat input\n # single int: int(r())\n # single string: r()\n # single float: float(r())\n # line int: R()\n # line string: r().split()\n # line (str, int, int): [j if i == 0 else int(j) for i, j in enumerate(r().split())]\n # lines int: [R() for _ in range(n)]\n \"\"\"\n\n # main\n N, Q = R()\n STX = [R() for _ in range(N)]\n STX.sort(key=itemgetter(2))\n\n D = [int(r()) for _ in range(Q)]\n Stopped = [-1] * Q\n ans = [-1] * Q\n\n for s, t, x in STX:\n l = bisect_left(D, s-x)\n r = bisect_left(D,t-x)\n a = l\n while a < r:\n if Stopped[a] == -1:\n ans[a] = x\n Stopped[a] = r\n a += 1\n else:\n a = Stopped[a]\n\n for i in ans:\n print(i)\n\n \"\"\"memo: how to use defaultdict of list\n # initialize\n Dic = defaultdict(list)\n # append / extend\n Dic[x].append(y)\n # for\n for k, v in Dic.items():\n \"\"\"", "def search(values): \n # First, reduce the puzzle\n values = reduce_puzzle(values)\n if values is False:\n return False\n if all(len(values[s]) == 1 for s in boxes):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n min_possibility = sys.maxsize\n min_box = \"\"\n for box in boxes:\n if len(values[box]) > 1 and len(values[box]) < min_possibility:\n min_possibility = len(values[box])\n min_box = box\n # Use recursion to solve each one of the resulting sudokus, and \n # if one returns a value (not False), return that answer\n for digit in values[min_box]:\n new_values = values.copy()\n new_values[min_box] = digit\n attempt = search(new_values)\n if attempt:\n return attempt", "def search(values):\n \"Using depth-first search and propagation, try all possible values.\"\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes):\n return values ## Solved!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt", "def knapsack_recursive_dp(array, values, weights, n, capacity):\n if n == 0 or capacity == 0:\n return 0\n if array[n][capacity] != None:\n return array[n][capacity]\n\n # if weight of n item > capacity\n # cant include it\n if (weights[n-1] > capacity):\n array[n][capacity] = knapsack_recursive(values, weights, n-1, capacity)\n return array[n][capacity]\n else:\n # max without or max with (value + knapsack(capacity - weight))\n array[n][capacity] = max(knapsack_recursive(values, weights, n-1, capacity), values[n-1] + knapsack_recursive(values, weights, n-1, capacity-weights[n-1]))\n return array[n][capacity]", "def findTargetSumWays(self, nums, S):\n ways = 0\n if len(nums) == 0:\n return ways\n\n multiple = [1, -1]\n stack = [[nums[0], 0], [-nums[0], 0]]\n max_num_index = len(nums) - 1\n while stack:\n cur_sum, cur_num_index = stack.pop()\n\n if cur_num_index == max_num_index:\n if cur_sum == S:\n ways += 1\n continue\n\n next_num_index = cur_num_index + 1\n for value in multiple:\n next_sum = cur_sum + nums[next_num_index] * value\n stack_new_item = (next_sum, next_num_index)\n\n if next_num_index == max_num_index:\n if next_sum == S:\n ways += 1\n continue\n\n stack.append(stack_new_item)\n\n return ways", "def solve_problem_recursive(self, amount, denominations):\n global number_of_variants\n number_of_variants = 0\n arraylist = []\n self.inner_solve_problem_recursive(amount, arraylist, 0, denominations)\n return number_of_variants", "def search(values):\n global assignments\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n\n # Check if this solution is unsolvable\n if values is False:\n return False\n\n # Check if we found a solutio, all boxes have one digit\n if all(len(values[s]) == 1 for s in boxes):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n min = 10\n minKey = None\n for v in values:\n if 1 < len(values[v]) < min:\n min = len(values[v])\n minKey = v\n\n for digit in values[minKey]:\n new_values = dict(values)\n assignments_bck = assignments.copy()\n new_values = assign_value(new_values, minKey, digit)\n new_values = search(new_values)\n if new_values != False:\n return new_values\n assignments = assignments_bck.copy()\n return False", "def calulate_total_permutations(value):\n if len(value) == len(set(value)):\n return factorial(len(value))\n\n count_map = {}\n for char in value:\n if char in count_map:\n count_map[char] += 1\n else:\n count_map[char] = 1\n\n base_factor = factorial(len(value))\n additional_factors = [factorial(x) for x in count_map.values()]\n result = 1\n for x in additional_factors:\n result = result * x\n return base_factor / result", "def knapsack_recursive(values, weights, n, capacity):\n if n == 0 or capacity == 0:\n return 0\n\n # if weight of n item > capacity\n # cant include it\n if (weights[n-1] > capacity):\n return knapsack_recursive(values, weights, n-1, capacity)\n else:\n # max without or max with (value + knapsack(capacity - weight))\n return max(knapsack_recursive(values, weights, n-1, capacity), values[n-1] + knapsack_recursive(values, weights, n-1, capacity-weights[n-1]))", "def diophantine_subset_sum(number_list, target, time_limit=TIME_LIMIT):\n started_at = time.time()\n\n # Sort numbers list.\n number_list = sorted(number_list)\n\n # Build sums list.\n sums_list = [number_list[0]]\n for n in range(1, len(number_list)):\n sums_list.append(number_list[n] + sums_list[n-1])\n\n # Sanity check target.\n if number_list[0] > target or target > sums_list[-1]:\n return []\n\n # Add first subset to subset stack.\n subset_stack = [(len(number_list)-1, target, ())]\n\n # Process subset stack.\n while subset_stack:\n # Enforce time constraint.\n runtime = time.time() - started_at\n if runtime > time_limit:\n raise SummerTimeoutError('No solution found in %d seconds.' % (time_limit))\n\n # Pop first subset off queue\n offset, subtarget, subset = subset_stack.pop()\n\n # Keeps only sums less than subset target.\n sumlist_offset = 0\n while sums_list[sumlist_offset] < subtarget and sumlist_offset < len(sums_list)-1:\n sumlist_offset += 1\n\n # If next sums list value matches subset target, we have a solution.\n if sums_list[sumlist_offset] == subtarget:\n return subset + tuple(number_list[0:sumlist_offset+1])\n\n # Keep only numbers in list less than subset target.\n while number_list[offset] > subtarget and offset > 0:\n offset = offset - 1\n\n # If next number in list matches subset target, we have a solution.\n if number_list[offset] == subtarget:\n return subset + tuple([number_list[offset]])\n\n # Add subsets to queue for any number list values falling between sums list\n # offset and numbers list offset\n step = (sumlist_offset <= offset) and 1 or -1\n for new_offset in range(sumlist_offset, offset+step, step):\n\n new_subset = subset + tuple([number_list[new_offset]])\n new_subtarget = subtarget - number_list[new_offset]\n\n if number_list[0] > new_subtarget:\n break\n\n subset_stack.append((new_offset-1, new_subtarget, new_subset))\n\n # Solution not found\n return []", "def no_sums_exist(numbers_to_check: List[int]) -> bool:\n\n # the specification does not state what to do where the input list is None or empty\n # assume that this meets the criteria\n if not numbers_to_check:\n return True\n\n # sort in place\n numbers_to_check.sort()\n # to help with early termination, select the maximum input number\n max_number = numbers_to_check[-1]\n\n list_length = len(numbers_to_check)\n for left_index in range(list_length - 1):\n # index of the number which could be a sum matching a pair\n # to allow for negative numbers we need to start checking from 0\n candidate_index = 0\n for right_index in range(left_index + 1, list_length):\n # Number pairs must be at distinct indexes and constitute half the possible combinations\n assert left_index < right_index, f\"Unexpected index pair {left_index}, {right_index}\"\n\n pair_sum = numbers_to_check[left_index] + numbers_to_check[right_index]\n # if the sum of this pair is greater than the largest number in the list then all subsequent sums\n # will also be larger so we can stop looking at pairs with this left index now\n if pair_sum > max_number:\n break\n # move the candidate index through the list\n # until the sum of the current pair is the same or larger than the candidate\n while candidate_index < list_length and pair_sum >= numbers_to_check[candidate_index]:\n if numbers_to_check[candidate_index] == pair_sum:\n return False\n candidate_index += 1\n return True", "def test_combinations(self):\r\n self.assertFloatEqual(combinations(5, 3), 10)\r\n self.assertFloatEqual(combinations(5, 2), 10)\r\n # only one way to pick no items or the same number of items\r\n self.assertFloatEqual(combinations(123456789, 0), 1)\r\n self.assertFloatEqual(combinations(123456789, 123456789), 1)\r\n # n ways to pick one item\r\n self.assertFloatEqual(combinations(123456789, 1), 123456789)\r\n # n(n-1)/2 ways to pick 2 items\r\n self.assertFloatEqual(\r\n combinations(\r\n 123456789,\r\n 2),\r\n 123456789 *\r\n 123456788 /\r\n 2)\r\n # check an arbitrary value in R\r\n self.assertFloatEqual(combinations(1234567, 12), 2.617073e64)", "def combinationSum2(self, candidates, target):\n if not candidates:\n return []\n\n candidates = sorted(candidates)\n c, f = [candidates[0]], [1]\n for i in xrange(1, len(candidates)):\n if candidates[i-1] == candidates[i]:\n f[-1] += 1\n else:\n c.append(candidates[i])\n f.append(1)\n\n sol_lst, sol = [], []\n self._solve(sol_lst, sol, c, f, len(c)-1, target)\n return sorted(sol_lst)", "def part2(data): # pylint: disable=line-too-long\n combinations_up_to = {0: 1}\n for d in sorted(data):\n val = 0\n for i in range(1, 3 + 1):\n if d - i in combinations_up_to:\n val += combinations_up_to[d - i]\n combinations_up_to[d] = val\n return combinations_up_to[max(data)]", "def find_solutions(target, max_coin=None):\n if target == 0:\n return\n if max_coin is None:\n max_coin = 200\n for coin in DENOMINATIONS:\n if coin > max_coin:\n continue\n if coin <= target:\n solution = [coin]\n new_target = target - coin\n if new_target:\n for x in find_solutions(target - coin, coin):\n yield solution + x\n else:\n yield solution", "def linear_combination(n):\n weighs = (1, 3, 9, 27)\n\n for factors in factors_set():\n sum = 0\n for i in range(len(factors)):\n sum += factors[i] * weighs[i]\n if sum == n:\n return factors" ]
[ "0.6217983", "0.57935846", "0.57762575", "0.574159", "0.56296545", "0.5558328", "0.55192983", "0.5499441", "0.546985", "0.5435398", "0.5432282", "0.5407435", "0.5388396", "0.5349023", "0.53418314", "0.5290699", "0.5256048", "0.5246714", "0.5219322", "0.5185486", "0.51835936", "0.51831174", "0.51682407", "0.5132348", "0.5099472", "0.50698006", "0.5058027", "0.5048252", "0.5010525", "0.4990845" ]
0.79681623
0
Creates a map from the output of 'recursiveSums' with the numbers in its parameter 'values' as the keys, using the helper function 'setFromValues'
def convertSumsToMap(arr, values): num_map = {} for item in values: num_map[item] = [] for i in range(len(arr)): num_map[i+1] = setFromValues(arr[i]) return num_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum_values(values):\n return (sum(values))", "def __valuesToIndices(self, mappings, values):\n indices = np.empty(0, dtype=np.int_)\n\n for key, _ in mappings.items():\n # Lookup the index of the value of the values in the map.\n index = mappings[key](values[key])\n\n indices = np.hstack((indices, index))\n\n return indices", "def createMap(*values):\n\tresult = dict()\n\tfor i in range(0, len(values), 2):\n\t\tresult[values[i]] = values[i+1]\n\treturn result", "def daily_speed_sum_reduce(key, values):\n\tyield \"%s: %s, %s\\n\" % (key, sum([int(value) for value in values]), len(values))", "def recursiveSums(desiredNum, values, depth=0, max_depth=5):\n depth+=1\n if(depth>max_depth):\n return\n if(len(values)==1):\n if(values[0]==desiredNum):\n return values[0]\n else:\n arr = []\n removals = []\n for i, value in enumerate(values):\n thisDesiredNum = desiredNum-value\n if(thisDesiredNum==0):\n arr.append(value)\n elif(thisDesiredNum>0):\n #quick fix prevents double counting here\n newValues = [l for l in values if(l not in removals)]\n newValues.pop(newValues.index(value))\n arr.append([value])\n if(len(newValues)!=0 and sum(newValues)>=thisDesiredNum):\n newSums = recursiveSums(thisDesiredNum, newValues, depth, max_depth)\n if(newSums):\n if(isinstance(newSums, int)):\n arr.append([newSums])\n else:\n arr[-1].extend(newSums)\n if(len(arr[-1])==0 or arr[-1]==[value]):\n arr.pop()\n removals.append(value)\n #remove unusable values\n iteratedValues = [value for value in values if(value not in removals)]\n if(iteratedValues):\n arr.append(recursiveSums(desiredNum, iteratedValues, depth, max_depth))\n return arr", "def sum(self, values):\n return self.aggregate(values, \"sum\")", "def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)", "def makeValMap(self,value = 'readcount'):\n self.valMap = np.zeros(len(self))\n self.valMap = self.valMap-1\n myTmp = []\n for x in range(0,len(self)):\n myTmp.append([])\n for i in self.children:\n for j in range(i.start,i.end+1):\n myTmp[j-self.start].append(i.__dict__[value])\n for nt in range(0,len(myTmp)):\n if len(myTmp[nt])>0:\n self.valMap[nt]=sum(myTmp[nt])/len(myTmp[nt])", "def mergeIndiciesToValuesByMap(valueList, valueIndexMap):\r\n if len(valueList) != len(valueIndexMap):\r\n raise Exception(\"Stats: Value list and value index map did not match.\")\r\n return {valueIndexMap[i]: valueList[i] for i in range(len(valueList))}", "def map(keys, values) -> MapValue:\n return ops.Map(keys, values).to_expr()", "def _buildTotalsDict(self, fromdt, todt):\r\n pairs = [(t, t.effectForPeriod(fromdt, todt)) for t in self.transactions]\r\n return dict(pairs)", "def get_values_from_dict(self, values):\n return dict()", "def aggregate_pairs(horizon_host, pairs, start, end, resolution):\n retval = {}\n for pair in pairs:\n name = pair[\"name\"]\n if name not in retval:\n retval[name] = (0, 0, 0)\n retval[name] = sum_tuples(retval[name], aggregate_pair(horizon_host, pair, start, end, resolution))\n return retval", "def transformResults(threadCounts, values, function):\n res = {}\n for bm in list(values.keys()):\n res[bm] = []\n for (nThreads, v) in zip(threadCounts, values[bm]):\n res[bm].append(None if v == None else function(v, nThreads))\n return res", "def values_to_dict(keys: tuple, values: list) -> dict:\n out = {}\n for i, key in enumerate(keys):\n out[key] = np.array([x[i] for x in values])\n return out", "def combiner(self, key, values):\n yield key, sum(values, ValueFormat(0, 0))", "def _partition(mapped_values):\n partitioned_data = collections.defaultdict(list)\n\n for key, value in mapped_values:\n partitioned_data[key].append(value)\n\n return partitioned_data.items()", "def reducer(self, key, values):\n yield key, suma_parejas(values)", "def partition(self, mapped_values):\r\n partitioned_data = collections.defaultdict(list)\r\n for key, value in mapped_values:\r\n partitioned_data[key].append(value)\r\n return partitioned_data.items()", "def __indicesToValues(self, mappings, indices):\n values = collections.OrderedDict()\n\n i = 0\n for key, _ in mappings.items():\n values[key] = mappings[key](indices[i])\n\n i = i + 1\n\n return values", "def summed(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield sum(v)", "def zipmap(keys, values):\n z = zip(keys, values)\n data = {k: v for k, v in z}\n return data", "def coordinate_Loc_Val(loc, values):\n assert loc is not None\n assert values is not None\n res = dict()\n for i in range(len(loc)):\n if loc[i] not in res: res[loc[i]] = values[i]\n else: res[loc[i]] += values[i]\n return list(res.keys()), [math.log(i+1) for i in list(res.values())]", "def make_dict(keys, values):\n\n return dict(zip(keys, values))", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def compute_empirical_distribution(values):\n distribution = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n for value in values:\n if value not in distribution:\n distribution[value] = 1\n else:\n distribution[value] += 1\n \n total = len(values)\n for v in distribution.keys():\n distribution[v] /= total\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return distribution", "def get_values(self):\n\n variable_values = {}\n\n # Do first of all the first ones, as these are special cases.\n for edge in self.node_levels[0]:\n variable_values[edge] = self._random()\n\n # Now loop through the rest, excluding the first level\n for edges in self.node_levels[1:]:\n for edge in edges:\n parents = self.adj_inv[edge]\n partial_values = [self.weights[(parent, edge)]*variable_values[parent]\n for parent in parents]\n\n variable_values[edge] = sum(partial_values)\n\n return variable_values", "def sum_in_list_dyn(number_list, total_value):\n memoization = [[False for i in range(total_value + 1)] for i in range(len(number_list) + 1)]\n\n for i in range(len(number_list) + 1):\n # We can always make 0 with the empty set.\n memoization[i][0] = True\n\n for i in range(1, len(number_list) + 1):\n for j in range(1, total_value + 1):\n if j < number_list[i - 1]:\n memoization[i][j] = memoization[i - 1][j]\n else:\n memoization[i][j] = memoization[i - 1][j] or memoization[i - 1][j - number_list[i - 1]]\n\n return memoization[len(number_list)][total_value]", "def dict_values(self, values):\n return _(_dict(self._, values))", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total" ]
[ "0.59756285", "0.58839905", "0.5788393", "0.57338154", "0.5684472", "0.56488043", "0.5573895", "0.54984564", "0.54971635", "0.54934275", "0.54745364", "0.54526776", "0.5443377", "0.5410857", "0.54092413", "0.5406314", "0.54035735", "0.53682464", "0.53666186", "0.5336206", "0.5309366", "0.5298578", "0.52500534", "0.52344036", "0.51977175", "0.5196533", "0.5192738", "0.5185265", "0.5182611", "0.5157945" ]
0.77104384
0
creates a new entry for each chromosome holding an array with counts for each afs bin 50.0/perc bins are created
def create_af_histo(snp_dict,perc): for i in snp_dict.keys(): hist=np.histogram(snp_dict[i]['afs'], bins=np.ceil(100.0/perc), range=(0.0,1.0)) # get rid of bins with afs == 0 in the spectrum vals=[] bins=[] for j,x in enumerate(hist[0]): if x > 0: if len(vals) == 0: bins.append(hist[1][j]) elif (bins[-1] != hist[1][j] ): vals.append(0) bins.append(hist[1][j]) vals.append(x) bins.append(hist[1][j+1]) snp_dict[i]['afs_hist'] = { 'counts':np.array(vals),'bins':np.array(bins) } return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_grid(self):\n from cemc_cpp_code import hoshen_kopelman\n self.bins[:, :, :] = 0\n for atom in self.atoms:\n if atom.symbol in self.track_elements:\n n = self.get_bin(atom.index)\n self.bins[n[0], n[1], n[2]] += 1\n\n # Run the Hoshen-Kopelman algorithm to label the \n # bins into clusters\n self.clusters = hoshen_kopelman(self.bins)", "def new(num_buckets=256):\n aMap=[]", "def to_bins(filein, fileout, window, numbins, chr=None, generator=None):\n if not generator:\n generator = hg38_generator()\n bam = pysam.AlignmentFile(filein, 'rb')\n cm = []\n for row in generator: # iterate over each chromosome\n if chr is None or (chr is not None and row[0] in chr): # checks for chr #\n count = int(int(row[1]) / window) # number of windows\n res = int(window / numbins)\n chr_i = row[0]\n for i in range(count): # iterate over each window\n win_start = i * window\n win_finish = (i + 1) * window - 1\n cm_i = np.zeros(3 + numbins, dtype=object) # array to hold bin counts info\n cm_i[0] = chr_i\n cm_i[1] = win_start\n cm_i[2] = win_finish\n for j in range(numbins): # iterate over each bin\n bin_start = win_start + j * res\n bin_finish = win_start + (j + 1) * res - 1\n cm_i[j + 3] = bam.count(chr_i, bin_start, bin_finish)\n cm.append(cm_i)\n status_statement(i, count, 20, chr_i)\n np.savetxt(fileout + \".csv\", np.asarray(cm), fmt='%s', delimiter=',')\n bam.close()", "def _coverage(self, chr, limit, nbins):\n\n c = np.zeros(nbins, dtype=np.int)\n chr_start, chr_stop = self.refs[chr][1:]\n bin_size = float((limit[1] - limit[0]) / nbins)\n\n for i in range(chr_start, chr_stop):\n read_start = self.lines[i][3]\n read_len = len(self.lines[i][9])\n\n start_bin = int((read_start - limit[0]) / bin_size)\n stop_bin = int((read_start + read_len - limit[0]) / bin_size)\n\n # print start_bin, stop_bin\n c[start_bin:stop_bin + 1] += 1\n \n return c", "def __init__(self, bins):\n self.bins = bins", "def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)", "def createBinsByEntropy(self, data, structure, colName, numOfBins):\n splits = self.miningCalculator.getBestSplitsInDataByInfoGain(data, structure, colName, numOfBins-1)\n splits.sort()\n bins = {\"value<=\"+str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins-1):\n bins[str(splits[i-1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i-1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits)-1])] = (lambda x: x > splits[len(splits)-1])\n return bins", "def push_histogram(self, data):\n # Loop through bands of this tile\n for i, dat in enumerate(data):\n # Create histogram for new data with the same bins\n new_hist = numpy.histogram(dat['data'], bins=self.hist_bins[i])\n # Add counts of this tile to band metadata histogram\n self.hist_values[i] += new_hist[0]", "def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def bin_sizing(self):\n\n self.log.info(\"Begin Re-Binning the Genome Space.\")\n new_list = []\n seg_num = 0\n\n for chrom in natsort.natsorted(self.seg_analyzer.chrom_list):\n self.log.debug(\"Binning Chromosome {0}\".format(chrom))\n\n # Some chromosomes have no segments.\n try:\n chrom_slice = \\\n self.seg_analyzer.seg_copy_array[self.seg_analyzer.seg_copy_array[:, 1] == chrom.encode()]\n seg_count = chrom_slice.shape[0]\n coord_start = int(chrom_slice[0, 2])\n except IndexError:\n continue\n\n for i in range((seg_count-1)):\n if (i+1) < seg_count and (i+1) % int(self.args.Combine_Segments) == 0:\n coord_stop = int(chrom_slice[i, 3])\n new_list.append([seg_num, chrom.encode(), coord_start, coord_stop])\n\n coord_start = int(chrom_slice[i+1, 2])\n seg_num += 1\n\n self.log.info(\"Genome Space Successfully Re-Binned.\")\n\n return numpy.array(new_list, dtype='object')", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def __init__(self):\n self.buckets = 1009\n self.table = [[] for _ in range(self.buckets)]", "def create_bin_indeces(snp_dict,sig_snp):\n for i in sig_snp.keys():\n dig=np.digitize(snp_dict[i]['afs'],sig_snp[i]['afs_hist']['bins'])\n # bin indeces are shifted +1 against histogram count indeces\n dig -= 1\n indx_bins=defaultdict(list)\n for j,x in enumerate(dig):\n indx_bins[x].append(j)\n for j in indx_bins.keys():\n indx_bins[j]=np.array(indx_bins[j])\n snp_dict[i]['bin_idx']=indx_bins\n return True", "def findBins(): \n\n df = pd.read_csv('significantData.csv')\n df = df.sort_values('RecordingTimestamp')\n df.to_csv('significantData.csv', index=False)\n read_in = pd.read_csv('significantData.csv')\n count = 0\n this = []\n return_bins = {}\n word = (read_in['AOI[Neutral_Left]Hit_0']).tolist()\n \n if word[0] == '1':\n return_bins.update({'start_value': 1})\n else: \n return_bins.update({'start_value': 0})\n for v, w in zip(word[:-1], word[1:]):\n if v == w and v != '': \n print v\n count = count + 1\n else: \n total = count\n this.append(count)\n my_list = sorted(list(set(this)))\n return_bins.update({'my_list': my_list})\n return return_bins", "def test_bin_entries(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np:\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame(\n {'A': [0, 1, 2, 3, 4, 3, 2, 1, 1, 1], 'C': ['f1', 'f3', 'f4', 'f3', 'f4', 'f2', 'f2', 'f1', 'f3', 'f4']})\n df2 = pd.DataFrame(\n {'A': [2, 3, 4, 5, 7, 4, 6, 5, 7, 8], 'C': ['f7', 'f3', 'f5', 'f8', 'f9', 'f2', 'f3', 'f6', 'f7', 'f7']})\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist0 = hg.Categorize(unit('C'))\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=10, low=0.0, high=10., quantity=unit('A'))\n hist5 = hg.Bin(num=10, low=0.0, high=10., quantity=unit('A'))\n\n # fill them\n hist0.fill.numpy(df1)\n hist1.fill.numpy(df2)\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n hist4.fill.numpy(df1)\n hist5.fill.numpy(df2)\n\n labels0 = hist0.bin_labels()\n labels1 = hist1.bin_labels()\n centers2 = hist2.bin_centers()\n centers3 = hist3.bin_centers()\n centers = hist4.bin_centers()\n\n import numpy as np\n np.testing.assert_array_equal(hist0.bin_entries(), [2., 2., 3., 3.])\n np.testing.assert_array_equal(hist1.bin_entries(), [1., 2., 1., 1., 3., 1., 1.])\n np.testing.assert_array_equal(hist0.bin_entries(labels=labels1), [2., 3., 0., 0., 0., 0., 0.])\n np.testing.assert_array_equal(hist1.bin_entries(labels=labels0), [0., 1., 2., 0.])\n\n np.testing.assert_array_equal(hist2.bin_entries(), [1., 4., 2., 2., 1.])\n np.testing.assert_array_equal(hist3.bin_entries(), [1., 1., 2., 2., 1., 2., 1.])\n np.testing.assert_array_equal(hist4.bin_entries(), [1., 4., 2., 2., 1., 0., 0., 0., 0., 0.])\n np.testing.assert_array_equal(hist5.bin_entries(), [0., 0., 1., 1., 2., 2., 1., 2., 1., 0.])\n\n np.testing.assert_array_equal(hist2.bin_entries(xvalues=centers3), [2., 2., 1., 0., 0., 0., 0.])\n np.testing.assert_array_equal(hist3.bin_entries(xvalues=centers2), [0., 0., 1., 1., 2.])\n np.testing.assert_array_equal(hist2.bin_entries(xvalues=centers), [\n 1., 4., 2., 2., 1., 0., 0., 0., 0., 0.])\n np.testing.assert_array_equal(hist3.bin_entries(xvalues=centers), [\n 0., 0., 1., 1., 2., 2., 1., 2., 1., 0.])\n\n np.testing.assert_array_equal(hist2.bin_entries(low=2.1, high=11.9), [\n 2., 2., 1., 0., 0., 0., 0., 0., 0., 0.])\n np.testing.assert_array_equal(hist3.bin_entries(low=1.1, high=5.4), [0., 1., 1., 2., 2.])\n np.testing.assert_array_equal(hist4.bin_entries(low=2.1, high=11.9), [2., 2., 1., 0., 0., 0., 0., 0.])\n np.testing.assert_array_equal(hist5.bin_entries(low=1.1, high=5.4), [0., 1., 1., 2., 2.])", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def __init__(self, nBin, realHists, imagHists, normHists, indexHists, coma, integralReal = None, integralImag = None):\n\t\tself.bin3pi = nBin\n\t\tself.binCenter = 0.52 + .04*nBin\n\t\tif not len(realHists) == len(imagHists) or not len(imagHists) == len(normHists) or not len(normHists) == len(indexHists):\n\t\t\tprint \"Numbers of histogams do not match:\"\n\t\t\tprint \" real:\",len(realHists)\n\t\t\tprint \" imag:\",len(imagHists)\n\t\t\tprint \" norm:\",len(normHists)\n\t\t\tprint \" index:\",len(indexHists)\n\t\t\traise ValueError(\"Histogram size mismatch\")\n\t\tself.nSect = len(realHists)\n\t\tif self.nSect == 0:\n\t\t\traise ValueError(\"No histograms given.\")\n\t\tself.nBins = [ ]\n\t\tself.totalBins = 0\n\t\tself.sectors = [ ]\n\t\tfor s in range(self.nSect):\n\t\t\tbinMax = 0\n\t\t\tfor bin in range(realHists[s].GetNbinsY()):\n\t\t\t\tm2Pi = realHists[s].GetYaxis().GetBinCenter( bin+1)\n\t\t\t\tm3Pi = realHists[s].GetXaxis().GetBinCenter(nBin+1)\n\t\t\t\tif utils.isValidPhaseSpace(m3Pi, m2Pi):\n#\t\t\t\tif realHists[s].GetBinContent(nBin + 1, bin+1) != 0.:\n\t\t\t\t\tbinMax = bin\n\t\t\tself.nBins.append(binMax+1)\n\t\t\tself.totalBins += binMax+1\n\t\t\tself.sectors.append(realHists[s].GetTitle().split('_')[0])\n\t\tself.reals = np.zeros((self.totalBins))\n\t\tself.imags = np.zeros((self.totalBins))\n\t\tself.norms = np.zeros((self.totalBins))\n#\t#\tCMwrite(\"__init__\")\n\t\tself.coma = np.zeros((2*self.totalBins,2*self.totalBins))\n\t\tself.hasIntegralMatrix = False\n\t\tif integralReal and integralImag:\n\t\t\tself.hasIntegralMatrix = True\n\t\t\tself.integralMatrix = np.zeros((self.totalBins, self.totalBins), dtype = complex)\n\t\telif integralReal:\n\t\t\traise RuntimeError(\"Cannot handle real integral matrix only, need also imaginary\")\n\t\telif integralImag:\n\t\t\traise RuntimeError(\"Cannot handle imaginary integral matrix only, need also real\")\n\t\tself.binCenters = np.zeros((self.totalBins))\n\t\tself.numLim = 2.e-8\n\t\tself.ownPinv = True\n\t\tcount = 0\n\t\tfor s in range(self.nSect):\n\t\t\tfor bin in range(self.nBins[s]):\n\t\t\t\tself.reals[count] = realHists[s].GetBinContent(nBin + 1, bin + 1)\n\t\t\t\tself.imags[count] = imagHists[s].GetBinContent(nBin + 1, bin + 1)\n\t\t\t\tself.norms[count] = normHists[s].GetBinContent(nBin + 1, bin + 1)\n\t\t\t\tself.binCenters[count] = realHists[s].GetYaxis().GetBinCenter(bin + 1)\n\t\t\t\tcomaIndex = int(round(indexHists[s].GetBinContent(nBin + 1, bin + 1)))\n\t\t\t\tcount2 = 0\n\t\t\t\tfor s2 in range(self.nSect):\n\t\t\t\t\tfor bin2 in range(self.nBins[s2]):\n\t\t\t\t\t\tcomaIndex2 = int(round(indexHists[s2].GetBinContent(nBin + 1, bin2 + 1)))\n\t\t\t\t\t\tself.coma[2*count , 2*count2 ] = coma.GetBinContent(2*comaIndex+1, 2*comaIndex2+1)\n\t\t\t\t\t\tself.coma[2*count , 2*count2+1] = coma.GetBinContent(2*comaIndex+1, 2*comaIndex2+2)\n\t\t\t\t\t\tself.coma[2*count+1, 2*count2 ] = coma.GetBinContent(2*comaIndex+2, 2*comaIndex2+1)\n\t\t\t\t\t\tself.coma[2*count+1, 2*count2+1] = coma.GetBinContent(2*comaIndex+2, 2*comaIndex2+2)\n\t\t\t\t\t\tif self.hasIntegralMatrix:\n\t\t\t\t\t\t\tval = integralReal.GetBinContent(comaIndex+1, comaIndex2+1) + 1.j*integralImag.GetBinContent(comaIndex+1, comaIndex2+1)\n\t\t\t\t\t\t\tself.integralMatrix[count,count2] = val\n\t\t\t\t\t\tcount2 += 1\n\t\t\t\tcount +=1\n\t\tself.hasMassRange = False\n\t\tself.makeComaInv()\n\t\tself.borders = [0]\n\t\tfor i in range(self.nSect):\n\t\t\tself.borders.append(self.borders[-1] + self.nBins[i])\n\t\tself.nZero = 0\n\t\tself.zeroModes = [ ]\n\t\tself.zeroModeNumbers = [ ]\n\t\tself.zeroModeTitles = [ ]\n\t\tself.zeroEigenvalues = [ ]\n\t\tself.hasTheo = False\n\t\tself.chi2init = False\n\t\tself.zeroModesRemovedFromComa = False\n\t\tself.globalPhaseRemoved = False\n\t\tself.specialCOMAs = { }\n\t\tself.hasZMP = False\n\t\tself.zeroModeParameters = None\n\t\tself.hasRandomizedAmplitudes = False", "def compute_histogram(self):\n # compute distance between points \n distmatrix = np.sqrt(pdist(self.points))\n if not self.mean_dist:\n self.mean_dist = np.mean(distmatrix)\n distmatrix = distmatrix/self.mean_dist\n distmatrix = squareform(distmatrix)\n #compute angles between points\n angles = compute_angles(self.points)\n #quantize angles to a bin\n tbins = np.floor(angles / (2 * pi / self.nbins_theta))\n lg = np.logspace(self.r1, self.r2, num=5)\n #quantize radious to bins\n rbins = np.ones(angles.shape) * -1\n for r in lg:\n counts = (distmatrix < r) \n rbins = rbins + counts.astype(int) \n return rbins, tbins", "def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]", "def get_gridpoint_histograms(self):\n\n ind_array = np.indices(self.results_array.shape)\n\n def results_array_histograms(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Num_zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n # hist, bin_edges = np.histogram(hist_arr, bins=20)\n colour_dict = {\"acceptor\": \"r\", \"donor\": \"b\", \"apolar\": \"y\"}\n hist_name = self.prot_name + '_' + self.probe + '_{}_{}_{}'.format(x, y, z)\n\n plt.figure(1)\n plt.hist(hist_arr, bins=20, color=colour_dict[self.probe])\n plt.figtext(0.6, 0.8, ('Number of zero values:' + str(num_zeros)))\n plt.title('Score distribution at point x:{}, y:{}, z:{}'.format(x, y, z))\n plt.xlabel('Fragment hotspot score')\n plt.ylabel('Frequency')\n plt.savefig(join(self.out_dir, hist_name))\n plt.close()\n\n print('Generating Histograms')\n vresults_array_histograms = np.vectorize(results_array_histograms)\n vresults_array_histograms(ind_array[0], ind_array[1], ind_array[2])", "def bins(self, value):\n self.num_bins = int(value)", "def disp_map(disp):\n map = np.array([\n [0,0,0,114],\n [0,0,1,185],\n [1,0,0,114],\n [1,0,1,174],\n [0,1,0,114],\n [0,1,1,185],\n [1,1,0,114],\n [1,1,1,0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0]-1,map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] -1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0]-1] / cbins[cbins.shape[0] -1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6,1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size+1,1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s,0:3] * np.tile(1 - disp,(1,3)) + map[s + 1,0:3] * np.tile(disp,(1,3))\n\n return disp", "def create_gc_bin_index(genome, fname, min_bin_size=100):\n logger.info(\"Creating index for genomic GC frequencies.\")\n g = Genome(genome)\n fasta = g.filename\n sizes = g.filename + \".sizes\" # props[\"sizes\"][\"sizes\"]\n\n with NamedTemporaryFile() as tmp:\n # pylint: disable=unexpected-keyword-arg\n pybedtools.BedTool().window_maker(g=sizes, w=min_bin_size).nucleotide_content(\n fi=fasta\n ).saveas(tmp.name)\n df = pd.read_csv(\n tmp.name,\n sep=\"\\t\",\n usecols=[0, 1, 2, 4, 9],\n dtype={\n \"#1_usercol\": \"string\",\n \"2_usercol\": np.int64,\n \"3_usercol\": np.int64,\n \"5_pct_gc\": np.float32,\n \"10_num_N\": np.int8,\n },\n )\n\n cols = [\n \"chrom\",\n \"start\",\n \"end\",\n f\"w{min_bin_size}\",\n f\"n{min_bin_size}\",\n ]\n for t in (2, 5):\n df[f\"w{min_bin_size * t}\"] = df.iloc[:, 3].rolling(t, min_periods=t).mean()\n df[f\"n{min_bin_size * t}\"] = df.iloc[:, 4].rolling(t, min_periods=t).sum()\n cols += [f\"w{min_bin_size * t}\", f\"n{min_bin_size * t}\"]\n\n df.columns = cols\n\n # Make really sure that column 'chrom' is a string\n df.dropna(subset=[\"chrom\"], inplace=True)\n df[\"chrom\"] = df[\"chrom\"].apply(str).astype(\"string\")\n\n df.reset_index()[cols].to_feather(fname)", "def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)", "def htable(nbuckets):", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames", "def disp_map(disp):\n map = np.array([\n [0, 0, 0, 114],\n [0, 0, 1, 185],\n [1, 0, 0, 114],\n [1, 0, 1, 174],\n [0, 1, 0, 114],\n [0, 1, 1, 185],\n [1, 1, 0, 114],\n [1, 1, 1, 0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0] - 1, map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] - 1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0] - 1] / cbins[cbins.shape[0] - 1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6, 1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size + 1, 1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s, 0:3] * np.tile(1 - disp, (1, 3)) + map[s + 1, 0:3] * np.tile(disp, (1, 3))\n\n return disp", "def __init__(self, x, bin_edges, Nsamp):\n raw_vals, bin_edges = np.histogram(x, bins=bin_edges, normed=False)\n self.bin_edges = bin_edges\n self.bin_widths = np.diff(self.bin_edges)\n self.bin_centers = 0.5*(self.bin_edges[:-1] + self.bin_edges[1:])\n \n P, low, high = np.array([BinomialErrors(v, Nsamp) for v in raw_vals]).T\n self.raw_vals = P\n self.raw_low = low\n self.raw_high = high\n self.complete_vals = None\n self.malm_vals = None\n return" ]
[ "0.6291447", "0.6064143", "0.6035484", "0.60193425", "0.59708476", "0.59208965", "0.59045327", "0.58342344", "0.5824317", "0.5811913", "0.5811913", "0.5807041", "0.5800794", "0.57974356", "0.57807654", "0.5760431", "0.57553273", "0.57453716", "0.5717335", "0.5683787", "0.56476355", "0.5646826", "0.56411916", "0.56298244", "0.5601383", "0.5585742", "0.556831", "0.5566041", "0.55616444", "0.5550836" ]
0.6163611
1
creates dictionary of the bin index positions in the AF histogram for each chromosome with np arrays of the indeces of all SNPs in that frequency bin for sampling.
def create_bin_indeces(snp_dict,sig_snp): for i in sig_snp.keys(): dig=np.digitize(snp_dict[i]['afs'],sig_snp[i]['afs_hist']['bins']) # bin indeces are shifted +1 against histogram count indeces dig -= 1 indx_bins=defaultdict(list) for j,x in enumerate(dig): indx_bins[x].append(j) for j in indx_bins.keys(): indx_bins[j]=np.array(indx_bins[j]) snp_dict[i]['bin_idx']=indx_bins return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def create_af_histo(snp_dict,perc):\n for i in snp_dict.keys():\n hist=np.histogram(snp_dict[i]['afs'], bins=np.ceil(100.0/perc), range=(0.0,1.0))\n # get rid of bins with afs == 0 in the spectrum\n vals=[]\n bins=[]\n for j,x in enumerate(hist[0]):\n if x > 0:\n if len(vals) == 0:\n bins.append(hist[1][j])\n elif (bins[-1] != hist[1][j] ):\n vals.append(0)\n bins.append(hist[1][j])\n vals.append(x)\n bins.append(hist[1][j+1])\n snp_dict[i]['afs_hist'] = { 'counts':np.array(vals),'bins':np.array(bins) }\n return True", "def get_indices_convert_dict(fn):\n pdb_inp = pdb.input(file_name=fn)\n pdb_hierarchy = pdb_inp.construct_hierarchy()\n \n newids = OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_hierarchy.atoms()))\n oldids= OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_inp.atoms()))\n \n return {'p2a': np.array([newids[atom.id_str()] for atom in pdb_inp.atoms()]),\n 'a2p': np.array([oldids[atom.id_str()] for atom in pdb_hierarchy.atoms()])}", "def get_canidate_histogram(self, *names, **params):\n num = dict([ (i, []) for i in self.SYMBOL_SET ])\n\n for position,col in self.get_canidate_list(names, **params).iteritems():\n for digit in col:\n num[digit].append(position)\n\n return num", "def calculate_histograms_from_assignments(self, funcs, bin_size=1):\n result = defaultdict(lambda: defaultdict(\n lambda: Histogram(bin_size)\n ))\n for assignment in self.get_assignment_reader():\n for name, func in funcs.iteritems():\n value = func(assignment)\n if value is None:\n continue\n result[name][assignment.source].add(value)\n return result", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def expanded_counts_map(self):\n if self.hpx._ipix is None:\n return self.counts\n\n output = np.zeros(\n (self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)\n for i in range(self.counts.shape[0]):\n output[i][self.hpx._ipix] = self.counts[i]\n return output", "def findBins(): \n\n df = pd.read_csv('significantData.csv')\n df = df.sort_values('RecordingTimestamp')\n df.to_csv('significantData.csv', index=False)\n read_in = pd.read_csv('significantData.csv')\n count = 0\n this = []\n return_bins = {}\n word = (read_in['AOI[Neutral_Left]Hit_0']).tolist()\n \n if word[0] == '1':\n return_bins.update({'start_value': 1})\n else: \n return_bins.update({'start_value': 0})\n for v, w in zip(word[:-1], word[1:]):\n if v == w and v != '': \n print v\n count = count + 1\n else: \n total = count\n this.append(count)\n my_list = sorted(list(set(this)))\n return_bins.update({'my_list': my_list})\n return return_bins", "def window_histogram(f,pl, Bc, bins = []):\n\t\n\tif bins == []:\n\t\tbins = f.max()+1\n\tndim = f.ndim\n\tif ndim == 2:\n\t\tf = f.reshape(1,f.shape[0],f,shape[1])\n\t\n\tf = f.astype(np.int32)\n\t\n\toff = se2off(Bc)\n\tif ndim == 2:\n\t\toff = np.concatenate((np.zeros((off.shape[0],1), dtype = np.int32),off), axis = 1)\n\t\tpl = np.concatenate((np.zeros((pl.shape[0],1), dtype = np.int32),off), axis = 1)\n\t\n\thists = np.zeros((pl.shape[0],bins), dtype = np.int32)\n\t\n\twatershed_c.window_histogram_aux_c(f,off,pl,hists) \n\treturn hists", "def fft_bin_to_index(self, bins):\n idx = bins.copy()\n return idx", "def generate_formula_histogram(self):\n\n histogram = dict()\n for element in self.atomelements:\n if element in histogram.keys():\n histogram[element] += 1\n else:\n histogram[element] = 1\n return histogram", "def calc_pIdent_hist(infile, outfile):\n data_dict = {}\n\n print(f'Parsing file {infile}')\n parse_magic_blast(infile, data_dict)\n\n\n with open(outfile, 'w') as o:\n\n print(f'File parsed. Writing to {outfile}')\n header_order = sorted(data_dict.keys())\n header = 'pIdent\\t' + '\\t'.join(header_order) + '\\n'\n\n o.write(header)\n\n for i in reversed(range(70, 101)):\n buildLine = [str(i)]\n\n for j in header_order:\n buildLine.append(str(data_dict[j][i]))\n\n o.write('\\t'.join(buildLine) + '\\n')", "def get_histogram( self , statistic_file ):\n histogram_bin = {}\n label_count = {}\n with open(statistic_file, 'r') as statistic_file:\n statistic_list = json.load( statistic_file )\n for feature_type in statistic_list:\n if feature_type not in histogram_bin:\n histogram_bin[feature_type] = {}\n if feature_type not in label_count:\n label_count[feature_type] = defaultdict(int)\n for result_instance in statistic_list[feature_type]:\n versus_labels = []\n svm_values = []\n original_label = None\n for versus_label, label, svm_value in result_instance:\n if original_label == None:\n original_label = label\n elif original_label != label:\n print 'Weird instance'\n versus_labels.append(versus_label)\n svm_values.append(svm_value)\n \n label_count[feature_type][original_label] += 1\n label_group = []\n for versus_label in versus_labels:\n label_group.append(tuple(versus_label))\n \n if not (tuple(label_group) in histogram_bin[feature_type]):\n histogram_bin[feature_type][tuple(label_group)] = {}\n if not original_label in histogram_bin[feature_type][tuple(label_group)]:\n histogram_bin[feature_type][tuple(label_group)][original_label] = defaultdict(int)\n bin_indices = tuple([self.get_bin(value) for value in svm_values])\n histogram_bin[feature_type][tuple(label_group)][original_label][bin_indices]+=1\n return label_count, histogram_bin", "def vec2spec_dict(n_bins, vec, spectra):\n return {spec: vec[i * n_bins:(i + 1) * n_bins] for i, spec in enumerate(spectra)}", "def _fast_hist_2d(data, bin_edges):\n # Yes, I've tested this against histogramdd().\n xassign = np.digitize(data[:,0], bin_edges[1:-1]) \n yassign = np.digitize(data[:,1], bin_edges[1:-1])\n nbins = len(bin_edges) - 1\n flatcount = np.bincount(xassign + yassign * nbins, minlength=nbins*nbins)\n return flatcount.reshape((nbins, nbins))", "def get_gridpoint_histograms(self):\n\n ind_array = np.indices(self.results_array.shape)\n\n def results_array_histograms(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Num_zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n # hist, bin_edges = np.histogram(hist_arr, bins=20)\n colour_dict = {\"acceptor\": \"r\", \"donor\": \"b\", \"apolar\": \"y\"}\n hist_name = self.prot_name + '_' + self.probe + '_{}_{}_{}'.format(x, y, z)\n\n plt.figure(1)\n plt.hist(hist_arr, bins=20, color=colour_dict[self.probe])\n plt.figtext(0.6, 0.8, ('Number of zero values:' + str(num_zeros)))\n plt.title('Score distribution at point x:{}, y:{}, z:{}'.format(x, y, z))\n plt.xlabel('Fragment hotspot score')\n plt.ylabel('Frequency')\n plt.savefig(join(self.out_dir, hist_name))\n plt.close()\n\n print('Generating Histograms')\n vresults_array_histograms = np.vectorize(results_array_histograms)\n vresults_array_histograms(ind_array[0], ind_array[1], ind_array[2])", "def get_bin_seeds(X, bin_size, min_bin_freq=1):\n # Bin points\n pseudo_labels = np.round(X/bin_size)\n bin_seeds = {}\n for i in pseudo_labels:\n i = tuple(i)\n if i not in bin_seeds:\n bin_seeds[i] = 0\n else:\n bin_seeds[i] += 1\n return bin_seeds", "def get_histogram(self):\n\n values_array = np.array(self.values)\n for bin0 in range(self.bins[0].size):\n bin_inf0 = self.bins[0][bin0]\n try: bin_sup0 = self.bins[0][bin0 + 1]\n except IndexError: bin_sup0 = self.vmax[0]\n values = values_array[\n (values_array[:, 0] >= bin_inf0)\n *(values_array[:, 0] < bin_sup0)][:, 1]\n for bin1 in range(self.bins[1].size):\n bin_inf1 = self.bins[1][bin1]\n try: bin_sup1 = self.bins[1][bin1 + 1]\n except IndexError: bin_sup1 = self.vmax[1]\n self.hist[bin0*self.Nbins[1] + bin1, 2] = (\n np.sum((values >= bin_inf1)*(values < bin_sup1)))\n\n if np.sum(self.hist[:, 2]) > 0: # there are binned values\n self.hist[:, 2] /= np.sum(self.hist[:, 2])\n return self.hist", "def build_bins(snps, table):\n overlap_bins = collections.defaultdict(list)\n nonoverlap_bins = collections.defaultdict(list)\n for rsid, *key in table:\n k = tuple(key)\n if snps[rsid][1]:\n overlap_bins[k].append(rsid)\n else:\n nonoverlap_bins[k].append(rsid)\n return overlap_bins, nonoverlap_bins", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames", "def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}", "def histogramintegrals(self):\n return {}", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)", "def solution_histogram(s):\n d = dict()\n for c in s:\n d[c] = d.get(c,0) + 1\n return d", "def read_chr(fpath):\n\t# init dict and indices\n\tchrom_dicts={}\n\tstart=0\n\tindex=0\n\n\t# iterate through chromosome scores \n\tfor line in fileinput.input(fpath):\n\t\tx=line.split()\n\t\t\n\t\t# if chromosome skips some region, then normalize the previous window (<100 bp) and init new window \t\n\t\tif len(x)==4:\n\t\t\tif start in chrom_dicts:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\tstart=int(x[2].split(\"=\")[1])\n\t\t\tchrom_dicts[start]=0\n\t\t\tindex=0\n\n\t\t# if not a black region, then make news windows every 100 locations\n\t\tif len(x)==1:\n\t\t\tchrom_dicts[start]+=float(x[0])\n\t\t\tif index==100:\n\t\t\t\tchrom_dicts[start]/=index\n\t\t\t\tindex=0\n\t\t\t\tstart+=100\n\t\t\t\tchrom_dicts[start]=0\n\t\t\tindex+=1\n\t\n\t# track chromosomes that have been binned\n\tprint(\"%s %d\" % (fpath,len(chrom_dicts)))\n\treturn(chrom_dicts)", "def histogram(self):\n if np.size(self.stats['Counts']): # don't do anything to an empty list\n if np.size(self.bins) and not self.redo:\n return self.bins, self.occs, self.thresh\n elif np.size(self.bin_array) > 0: \n self.occs, self.bins = np.histogram(self.stats['Counts'], self.bin_array) # fixed bins. \n else:\n try:\n lo, hi = min(self.stats['Counts'])*0.97, max(self.stats['Counts'])*1.02\n # scale number of bins with number of files in histogram and with separation of peaks\n num_bins = int(15 + self.ind//100 + (abs(hi - abs(lo))/hi)**2*15) \n self.occs, self.bins = np.histogram(self.stats['Counts'], bins=np.linspace(lo, hi, num_bins+1)) # no bins provided by user\n except: \n self.occs, self.bins = np.histogram(self.stats['Counts'])\n else: self.occs, self.bins = np.zeros(10), np.arange(0,1.1,0.1)\n return self.bins, self.occs, self.thresh", "def bin_by(bin_quantity, bins):\n\n hist_item, bin_edges = np.histogram(bin_quantity[~np.isnan(bin_quantity)], bins=bins)\n \n bin_indx = np.zeros(bin_quantity.shape, np.float_)\n\n # Step through bins\n for i in range(len(bin_edges) - 1):\n \n # Index each bin by bin quantify value\n bin_indx[np.where((bin_quantity > bin_edges[i]) & (bin_quantity < bin_edges[i + 1]))] = i + 1\n \n return bin_indx, bin_edges", "def new_counts_dict():\n\n\tIN_FILES = [\"../_semtag_dataset_webanno_tfidf_inimigo.txt\",\"../_semtag_dataset_webanno_tfidf_publico.txt\" ]\n\n\ttxt = []\n\tfor in_file in IN_FILES:\n\t with codecs.open(in_file,\"r\",\"utf-8\") as fid:\n\t txt += fid.readlines()\n\t#words\n\twords = [w for m in txt for w in m.split()]\n\t#unique words\n\twords = list(set(words))\n\t#word index\n\twrd2idx = {w:-1 for w in words}\n\n\tset_trace()\n\t\n\twith open(COUNTS_DIC,\"w\") as fod:\n\t\tcPickle.dump(wrd2idx, fod, cPickle.HIGHEST_PROTOCOL)", "def ana_fixations_spatial_distribution(data_dict):\n counter = collections.Counter()\n for search in data_dict['searches'].viewvalues():\n for fixation in search['path'][1:]: # Exclude first fixation. It is always (0,0).\n counter[tuple(fixation)] += 1\n frequency_map = np.zeros((data_dict['senzory_map'].shape[0],1))\n for i in xrange(len(frequency_map)):\n frequency_map[i] = counter[tuple(data_dict['senzory_map'][i])]\n return np.hstack((data_dict['senzory_map'],frequency_map))" ]
[ "0.6426293", "0.63538736", "0.6245193", "0.5960386", "0.595216", "0.592225", "0.5840666", "0.58318233", "0.5806224", "0.5802384", "0.577433", "0.5734818", "0.5723627", "0.5722175", "0.5704723", "0.57046944", "0.5703294", "0.5689904", "0.5680895", "0.5678474", "0.5669681", "0.56625897", "0.56580657", "0.5635162", "0.5629598", "0.56127614", "0.5603815", "0.55956143", "0.5587014", "0.556781" ]
0.77047056
0
uses np.random.choice to sample SNP indeces from the bin index structure according to a , around a 100 times slower than self created way. the replace=False creates the performance hit
def create_random_sample(idx_bins,count_array): idxs=[] for i,x in enumerate(count_array): if x > 0: idxs.extend(np.random.choice(idx_bins[i],size=x,replace=False)) return idxs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def choice(array, size, replace):\n indices = np.random.choice(len(array), size=size, replace=replace)\n sample = array[indices]\n return sample", "def create_random_sample_alt(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend([ idx_bins[i][ind] for ind in unique_sample_of_int(len(idx_bins[i])-1,x) ] )\n return idxs", "def sample(self):\n sample = np.zeros(self.n, dtype=self.dtype)\n sample[self.np_random.choice(self.n, self.np_random.random_integers(low=self.low_limit, high=self.high_limit),\n replace=False)] = 1\n return sample", "def sampleWithReplacement(population, choiceSize):\n\n n = len(population)\n _random, _int = random.random, int # speed hack\n return [_int(_random()*n) for _ in itertools.repeat(None, choiceSize)]", "def random_subsample(neuron, num):\n\n I = np.arange(neuron.n_soma, neuron.n_node)\n np.random.shuffle(I)\n selected_index = I[0:num - 1]\n selected_index = np.union1d([0], selected_index)\n selected_index = selected_index.astype(int)\n selected_index = np.unique(np.sort(selected_index))\n\n return neuron_with_selected_nodes(neuron, selected_index)", "def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]", "def post(self, s):\n return np.random.choice(self.sample_list)", "def deterministic_sample(choices, n_to_sample, p): # pylint: disable=invalid-name\n\n sample_counts = np.ceil(n_to_sample * p).astype(int)\n\n n_to_remove = np.sum(sample_counts) - n_to_sample\n\n if n_to_remove == 0:\n return choices[counts_to_vector(sample_counts)]\n\n non_zero_mask = sample_counts > 0\n\n removal_indices = np.floor(np.linspace(0.0,\n np.sum(non_zero_mask),\n n_to_remove,\n endpoint=False)).astype(int)\n\n tmp = sample_counts[non_zero_mask]\n tmp[removal_indices] = tmp[removal_indices] - 1\n\n sample_counts[non_zero_mask] = tmp\n\n assert np.sum(sample_counts) == n_to_sample\n\n samples = choices[counts_to_vector(sample_counts)]\n\n return samples", "def sample(self, probs):\n all_abstain = (self.label_matrix == -1).sum(axis=1) == self.label_matrix.shape[1]\n self.is_in_pool = (self.ground_truth_labels == -1) & ~ all_abstain & (self.y_train != -1)\n self.valid_buckets = np.unique(self.unique_inverse[self.is_in_pool])\n self.is_valid_bucket = np.array([\n True if i in self.valid_buckets else False for i in range(len(self.unique_idx))])\n self.bucket_probs = probs.detach().numpy()[self.unique_idx]\n\n pick = random.uniform(0, 1)\n if pick < self.randomness:\n # Choose random bucket instead of following a specific query strategy\n chosen_bucket = np.random.choice(self.valid_buckets)\n else:\n chosen_bucket = np.random.choice(self.query())\n\n return random.choice(np.where((self.unique_inverse == chosen_bucket) & self.is_in_pool)[0])", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n repeat_ = num_expected // pos_inds.numel()\n return torch.cat((pos_inds.repeat(repeat_), self.random_choice(pos_inds, num_expected % pos_inds.numel())))\n else:\n return self.random_choice(pos_inds, num_expected)", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def draw_random_sample(choices, probabilities, n):\n values = np.array(range(len(choices)))\n probs = np.array(probabilities)\n bins = np.add.accumulate(probs)\n inds = values[np.digitize(random_sample(n), bins)]\n samples = []\n for i in inds:\n samples.append(deepcopy(choices[int(i)]))\n return samples", "def sample_with_replacement(a, size):\n return \"\".join([random.SystemRandom().choice(a) for x in range(size)])", "def _random_sample(self, pool_size, train_mask, sample_size):\n\n candidate_mask = -train_mask\n\n if 0 < self.sample_size < np.sum(candidate_mask):\n unlabelled_index = np.where(candidate_mask)[0]\n candidate_index = self.rng.choice(unlabelled_index, self.sample_size, replace=False)\n candidate_mask = np.zeros(pool_size, dtype=bool)\n candidate_mask[candidate_index] = True\n\n return candidate_mask", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def samplingWithReplacement(m):\n return [ random.randrange(m) for i in range(m) ]", "def sampleNo(xvar, yvar, N, avoididx):\n\n allidx = np.arange(0, len(xvar)*len(yvar)) # flattened array of all indices in mesh\n noidx = np.setxor1d(allidx, avoididx) #allidx - avoididx\n #noidx = np.array(list(set(allidx) - set(avoididx)))\n nosampleidx = np.random.choice(noidx, size=N,replace=False)\n newavoididx = np.sort(np.hstack((avoididx, nosampleidx)))\n rowidx,colidx = np.unravel_index(nosampleidx, (len(yvar), len(xvar)))\n samples = []\n for row,col in zip(rowidx, colidx):\n xp = xvar[col]\n yp = yvar[row]\n samples.append((xp, yp))\n\n return (samples, newavoididx)", "def neighborhood_sample(solution, ssize):\n n_runs = len(solution)\n n_clusters = len(solution[0])\n \n swaps_per_run = n_clusters * (n_clusters-1) // 2\n max_size = n_runs * swaps_per_run\n \n ssize = min(ssize, max_size)\n \n combinations = list(itertools.combinations(range(n_clusters), 2))\n selection = random.sample(range(max_size), ssize)\n \n nb = [0] * ssize\n \n for i_nb, i_sel in enumerate(selection):\n i_run = i_sel // swaps_per_run\n i_comb = i_sel % swaps_per_run\n \n i1, i2 = combinations[i_comb]\n swapped = solution[i_run][:]\n swapped[i1], swapped[i2] = swapped[i2], swapped[i1]\n nb[i_nb] = solution[:i_run] + [swapped] + solution[i_run+1:]\n \n return selection, nb", "def _sample_pairs_nbp(data, frac, size_cap=np.int(1e6)):\n sample_size = int(len(data) * (len(data) - 1) / 2 * frac)\n sample_size = min(sample_size, size_cap)\n pairs = np.empty((sample_size, 2))\n for i in numba.prange(sample_size):\n pair = np.random.choice(data, size=2, replace=False)\n pair.sort()\n pairs[i] = pair\n return pairs", "def draw_bs_sample(data):\n return np.random.choice(data, size=len(data))", "def sample(self, n):\n raise NotImplementedError", "def draw_bs_sample(data):\n return rg.choice(data, size=len(data))", "def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)", "def sample_uniform(instance, params):\n subpop = np.random.randint(params['N'])\n return sample_from_subpop(instance, params, subpop)", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n return pos_inds\n else:\n return self.random_choice(pos_inds, num_expected)", "def get_selected_subsamples(sample_func, clusters, trajs_dict, visit_profile, Nsample, false_rate=80):\n print('The desired false rate is %f'%(false_rate/Nsample))\n crter = 0\n done_first_round = False\n nclusters = len(clusters)\n \n print('Start the first selection until the number of potential profiles is more than Nsample')\n while crter < Nsample:\n i = np.random.choice(range(nclusters))\n if len(clusters[i]) > Nsample*5 or len(clusters[i]) < Nsample: continue\n # try sampling\n selected_spl, plist_spl = sample_func(trajs_dict, plist=None, usrs=clusters[i])\n # do the deterministic attack\n a2 = get_trick_mat(clusters[i] , selected_spl, visit_profile)\n nonzero_list = [np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(a2)] \n crter = np.sum(nonzero_list)\n \n print('Finish the first round selection, %d candidates are selected from cluster %d'%(crter, i))\n round_one_usrs = np.array(clusters[i])[nonzero_list]\n \n crter2 = 0; len_rone = len(round_one_usrs)\n print('Start the second selection until false rate %f'%(false_rate/Nsample))\n while crter2 < false_rate:\n final_selected_usrs = round_one_usrs[np.random.choice(len_rone, Nsample, replace=False)]\n tmp = get_trick_mat(final_selected_usrs, selected_spl, visit_profile)\n crter2 = np.sum([np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(tmp)])\n print('Final false rate for deterministic attack%f'%(crter2/Nsample))\n return selected_spl, final_selected_usrs, plist_spl", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def random_sampling(self, n_subset):\n t = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[INFO] {} - Random sampling with replacement ...\".format(t))\n subset_list = []\n training_set = self\n subset_size = math.ceil(training_set.n_samples / n_subset)\n # create subsets\n for i in range(n_subset):\n # run a permutation to mix all samples (sampling with replacement)\n self.permutation()\n # always draw the first samples\n start_idx = 0\n stop_idx = subset_size\n subset = deepcopy(training_set)\n subset.data = subset.data[start_idx:stop_idx][:]\n subset.labels = subset.labels[start_idx:stop_idx][:]\n subset.labels_onehot = subset.labels_onehot[start_idx:stop_idx][:]\n subset.n_samples = stop_idx - start_idx\n subset.true_distribution = subset._get_true_distribution()\n subset.set_batch_size(training_set.batch_size)\n subset_list.append(subset)\n print(\"\\tSubset shape {}\".format(subset.data.shape))\n return subset_list" ]
[ "0.658136", "0.6326986", "0.63266313", "0.62573075", "0.62280476", "0.621461", "0.6199276", "0.61707175", "0.6155235", "0.6087847", "0.6087402", "0.60716915", "0.60477567", "0.6047303", "0.6019673", "0.6011627", "0.60087085", "0.59920645", "0.59877354", "0.5935229", "0.59194636", "0.5912306", "0.59042937", "0.58819777", "0.5863008", "0.5852923", "0.5852749", "0.58490765", "0.5840986", "0.5835697" ]
0.68804896
0
uses unique_sample_of_int to sample SNP indeces from the bin index structure according to a
def create_random_sample_alt(idx_bins,count_array): idxs=[] for i,x in enumerate(count_array): if x > 0: idxs.extend([ idx_bins[i][ind] for ind in unique_sample_of_int(len(idx_bins[i])-1,x) ] ) return idxs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_random_sample(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend(np.random.choice(idx_bins[i],size=x,replace=False))\n return idxs", "def unique_sample_of_int(max,size):\n idxs=set()\n num_left = size - len(idxs)\n while num_left > 0:\n idxs = idxs.union(set(np.random.random_integers(0,max,size=num_left)))\n num_left = size - len(idxs)\n return idxs", "def _generate_sample_indexes(random_state, n_samples, n_samples_bootstrap):\n # Obtain the random state\n random_state = check_random_state(random_state)\n\n # Obtain the indexes for the samples taking\n # into account the total number of samples\n # and the number of samples to be taken\n sample_indexes = random_state.randint(0, n_samples, n_samples_bootstrap)\n\n # Return them\n return sample_indexes", "def _generate_sample_indices(random_state, n_samples):\n random_instance = check_random_state(random_state)\n sample_indices = random_instance.randint(0, n_samples, n_samples)\n\n return sample_indices", "def random_subsample(neuron, num):\n\n I = np.arange(neuron.n_soma, neuron.n_node)\n np.random.shuffle(I)\n selected_index = I[0:num - 1]\n selected_index = np.union1d([0], selected_index)\n selected_index = selected_index.astype(int)\n selected_index = np.unique(np.sort(selected_index))\n\n return neuron_with_selected_nodes(neuron, selected_index)", "def binary_sample(x):\n return np.random.binomial(1, p=x)", "def sampleIndex(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return i\r\n i += 1", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "def sample(self, seg_logit, seg_label):", "def get_sampled_ids(self):\n seed = 123\n #initiate two lists, to save randomly picked positive and negative cases respectively\n positiveIds = []\n negativeIds = []\n i = 0\n print \"==> resampling ... \",\n while len(positiveIds)+len(negativeIds)<self.ntotal:\n # start a loop from 0 to total size of the new sampe\n # if it catches a number divisable by the sought ratio, update the list of positive cases ids\n # otherwise keep update the list of negative cases ids\n try:\n if i%int(100 / self.posRate) == 0: \n positiveIds.append(self.posId.next())\n else:\n negativeIds.append(self.negId.next())\n except:\n print \"Enter posRate higher than the initial rate\"\n break\n i+=1\n print \"Done sampling\"\n print \"positive:\", len(positiveIds)\n print \"negative:\", len(negativeIds)\n print \"final size:\", len(positiveIds)+len(negativeIds)\n #return sorted list of the two list of ids combined\n return sorted(positiveIds+negativeIds)", "def sample(self, n):\n raise NotImplementedError", "def create_bin_indeces(snp_dict,sig_snp):\n for i in sig_snp.keys():\n dig=np.digitize(snp_dict[i]['afs'],sig_snp[i]['afs_hist']['bins'])\n # bin indeces are shifted +1 against histogram count indeces\n dig -= 1\n indx_bins=defaultdict(list)\n for j,x in enumerate(dig):\n indx_bins[x].append(j)\n for j in indx_bins.keys():\n indx_bins[j]=np.array(indx_bins[j])\n snp_dict[i]['bin_idx']=indx_bins\n return True", "def generate_S_from_binning(t_binning):\n S= np.zeros(shape=(len(t_binning), len(np.unique(t_binning))))\n for i, t_ in enumerate(t_binning):\n S[i][t_]= 1\n return S", "def deterministic_sample(choices, n_to_sample, p): # pylint: disable=invalid-name\n\n sample_counts = np.ceil(n_to_sample * p).astype(int)\n\n n_to_remove = np.sum(sample_counts) - n_to_sample\n\n if n_to_remove == 0:\n return choices[counts_to_vector(sample_counts)]\n\n non_zero_mask = sample_counts > 0\n\n removal_indices = np.floor(np.linspace(0.0,\n np.sum(non_zero_mask),\n n_to_remove,\n endpoint=False)).astype(int)\n\n tmp = sample_counts[non_zero_mask]\n tmp[removal_indices] = tmp[removal_indices] - 1\n\n sample_counts[non_zero_mask] = tmp\n\n assert np.sum(sample_counts) == n_to_sample\n\n samples = choices[counts_to_vector(sample_counts)]\n\n return samples", "def generate_integer_tuples(self, means, rng=None, use_cuda=False, relative_range=None):\n\n b, k, c, rank = means.size()\n\n # ints is the same size as ind, but for every index-tuple in ind, we add an extra axis containing the 2^rank\n # integerized index-tuples we can make from that one real-valued index-tuple\n # ints = torch.cuda.FloatTensor(batchsize, n, 2 ** rank + additional, rank) if use_cuda else FloatTensor(batchsize, n, 2 ** rank, rank)\n\n \"\"\"\n Generate nearby tuples\n \"\"\"\n fm = self.floor_mask[None, None, None, :].expand(b, k, c, 2 ** rank, rank)\n\n neighbor_ints = means.data[:, :, :, None, :].expand(b, k, c, 2 ** rank, rank).contiguous()\n\n neighbor_ints[fm] = neighbor_ints[fm].floor()\n neighbor_ints[~fm] = neighbor_ints[~fm].ceil()\n\n neighbor_ints = neighbor_ints.long()\n\n \"\"\"\n Sample uniformly from all integer tuples\n \"\"\"\n\n sampled_ints = torch.cuda.FloatTensor(b, k, c, self.gadditional, rank) if use_cuda \\\n else torch.FloatTensor(b, k, c, self.gadditional, rank)\n\n sampled_ints.uniform_()\n sampled_ints *= (1.0 - EPSILON)\n\n rng = torch.cuda.FloatTensor(rng) if use_cuda else torch.FloatTensor(rng)\n rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(sampled_ints)\n\n sampled_ints = torch.floor(sampled_ints * rngxp).long()\n\n \"\"\"\n Sample uniformly from a small range around the given index tuple\n \"\"\"\n rr_ints = torch.cuda.FloatTensor(b, k, c, self.radditional, rank) if use_cuda \\\n else torch.FloatTensor(b, k, c, self.radditional, rank)\n\n rr_ints.uniform_()\n rr_ints *= (1.0 - EPSILON)\n\n rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints) # bounds of the tensor\n rrng = torch.cuda.FloatTensor(relative_range) if use_cuda \\\n else torch.FloatTensor(relative_range) # bounds of the range from which to sample\n\n rrng = rrng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints)\n\n # print(means.size())\n mns_expand = means.round().unsqueeze(3).expand_as(rr_ints)\n\n # upper and lower bounds\n lower = mns_expand - rrng * 0.5\n upper = mns_expand + rrng * 0.5\n\n # check for any ranges that are out of bounds\n idxs = lower < 0.0\n lower[idxs] = 0.0\n\n idxs = upper > rngxp\n lower[idxs] = rngxp[idxs] - rrng[idxs]\n\n # print('means', means.round().long())\n # print('lower', lower)\n\n rr_ints = (rr_ints * rrng + lower).long()\n\n all = torch.cat([neighbor_ints, sampled_ints, rr_ints] , dim=3)\n\n return all.view(b, k, -1, rank) # combine all indices sampled within a chunk", "def sampler_to_samples(sampler, nburnin=0):\n ndim = sampler.chain.shape[-1]\n return sampler.chain[:, nburnin:, :].reshape((-1, ndim))", "def sample(self, observation):\n raise NotImplementedError", "def sample(self, nsamples):\n return self.dist.sample(nsamples)", "def sample(self,p0=None,nsamp=None): \r\n raise NotImplementedError('Need to implement sample function')", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n ixes = []\n for t in xrange(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.argmax(p)#np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "def sample_n_unique(sampling_f, n):\n res = []\n while len(res) < n:\n candidate = sampling_f()\n if candidate not in res:\n res.append(candidate)\n return res", "def _sample_pairs_nbp(data, frac, size_cap=np.int(1e6)):\n sample_size = int(len(data) * (len(data) - 1) / 2 * frac)\n sample_size = min(sample_size, size_cap)\n pairs = np.empty((sample_size, 2))\n for i in numba.prange(sample_size):\n pair = np.random.choice(data, size=2, replace=False)\n pair.sort()\n pairs[i] = pair\n return pairs", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size,1))\n x[seed_ix] = 1\n ixes = []\n for t in xrange(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size,1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "def sample_from_prior(self, n_samples):\n pass", "def sample(self):\n sample = np.zeros(self.n, dtype=self.dtype)\n sample[self.np_random.choice(self.n, self.np_random.random_integers(low=self.low_limit, high=self.high_limit),\n replace=False)] = 1\n return sample", "def stratified_sample(self, batch_size, rng):\n if self._total_priority() == 0.0:\n raise Exception('Cannot sample from an empty sum tree.')\n\n indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size),\n batch_size, self.depth)\n return np.minimum(indices - self.low_idx, self.highest_set)", "def bootstrap_idxs(n, rng: np.random.Generator = None):\n if rng is None or type(rng) is not np.random.Generator:\n rng = np.random.default_rng(rng)\n in_bag = rng.integers(low=0, high=n, size=n)\n out_bag = np.array(list(set(range(n)) - set(in_bag)))\n return in_bag, out_bag", "def _uniform_user_sampling(self, n_samples):\n #print(\"Generating %s random training samples\\n\" % str(n_samples))\n \n sgd_users = np.random.choice(list(self.train_users),size=n_samples)\n sgd_ni = np.random.choice(list(self.train_objects),size=(n_samples*2))#*2 \n i = 0\n sgd_pos_items, sgd_neg_items = [], []\n for sgd_user in sgd_users:\n pos_item = np.random.choice(self.data[sgd_user].indices)\n \n neg_item = sgd_ni[i]\n while neg_item in self.data[sgd_user].indices:\n i = i+1\n neg_item = sgd_ni[i]\n \n sgd_pos_items.append(pos_item)\n sgd_neg_items.append(neg_item)\n i = i+1\n\n return sgd_users, sgd_pos_items, sgd_neg_items" ]
[ "0.6870971", "0.62472403", "0.620097", "0.6074846", "0.58651906", "0.5833519", "0.5804097", "0.5745069", "0.5745069", "0.5739732", "0.5676855", "0.56738466", "0.5668472", "0.5629761", "0.5609549", "0.5605954", "0.5604341", "0.559622", "0.5593045", "0.5586711", "0.5555854", "0.5552733", "0.553344", "0.553044", "0.552809", "0.5513562", "0.55027723", "0.5491044", "0.5485588", "0.54738617" ]
0.69555473
0