query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
An instructor should be able to initiate a jitsi live.
def test_api_video_instructor_initiate_jitsi_live(self): video = factories.VideoFactory( id="27a23f52-3379-46a2-94fa-697b59cfe3c7", playlist__title="foo bar", playlist__lti_id="course-v1:ufr+mathematics+00001", ) jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": True} # initiate a live video, # It should generate a key file with the Unix timestamp of the present time now = datetime(2018, 8, 8, tzinfo=pytz.utc) live_info = { "medialive": { "input": { "id": "medialive_input_1", "endpoints": ["https://live_endpoint1", "https://live_endpoint2"], }, "channel": {"id": "medialive_channel_1"}, }, "mediapackage": { "id": "mediapackage_channel_1", "endpoints": { "hls": { "id": "endpoint1", "url": "https://channel_endpoint1/live.m3u8", }, }, }, } with mock.patch.object(timezone, "now", return_value=now), mock.patch.object( api, "create_live_stream", return_value=live_info ): response = self.client.post( f"/api/videos/{video.id}/initiate-live/", {"type": "jitsi"}, HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 200) content = json.loads(response.content) self.assertEqual( content, { "description": video.description, "id": str(video.id), "title": video.title, "active_stamp": None, "is_ready_to_show": True, "show_download": True, "upload_state": "pending", "thumbnail": None, "timed_text_tracks": [], "urls": { "manifests": { "hls": "https://channel_endpoint1/live.m3u8", }, "mp4": {}, "thumbnails": {}, }, "should_use_subtitle_as_transcript": False, "has_transcript": False, "playlist": { "id": str(video.playlist.id), "title": "foo bar", "lti_id": "course-v1:ufr+mathematics+00001", }, "live_state": "creating", "live_info": { "medialive": { "input": { "endpoints": [ "https://live_endpoint1", "https://live_endpoint2", ], } }, "jitsi": { "domain": "meet.jit.si", "external_api_url": "https://meet.jit.si/external_api.js", "config_overwrite": {}, "interface_config_overwrite": {}, }, }, "live_type": "jitsi", "xmpp": None, }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)", "def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()", "def run_experiment():\n pass", "def start():", "def start():", "def start():", "def start():", "def launch(self):", "def test_api_video_instructor_start_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=IDLE,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n jwt_token.payload[\"user\"] = {\"id\": \"56255f3807599c377bf0e5bf072359fd\"}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"), mock.patch.object(\n api, \"create_room\"\n ) as mock_create_room, mock.patch(\n \"marsha.core.serializers.xmpp_utils.generate_jwt\"\n ) as mock_jwt_encode:\n mock_jwt_encode.return_value = \"xmpp_jwt\"\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n mock_create_room.assert_called_once_with(video.id)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": \"pending\",\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": \"starting\",\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": {\n \"bosh_url\": \"https://xmpp-server.com/http-bind?token=xmpp_jwt\",\n \"websocket_url\": None,\n \"conference_url\": f\"{video.id}@conference.xmpp-server.com\",\n \"jid\": \"xmpp-server.com\",\n },\n },\n )", "def start_experiment():\r\n check_parameters()\r\n try:\r\n EXP.start()\r\n except InputError as inst:\r\n tkMessageBox.showinfo(inst.expr, inst.msg)", "def start(self):\n ...", "def started(self):", "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def start(self) -> None:", "def start(self) -> None:", "def start (self):\n pass", "def start (self):\n pass", "def start(self):\r\n pass", "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def launch(lti=lti):\n\n # example of getting lti data from the request\n # let's just store it in our session\n session['lis_person_name_full'] = request.form.get('lis_person_name_full')\n session['custom_canvas_user_id'] = request.form.get('custom_canvas_user_id')\n session['roles'] = request.form.get('roles')\n session['custom_canvas_course_id'] = request.form.get('custom_canvas_course_id')\n #take the user id and fetch all their recorded videos\n\n #videoList = range(1,10) #get video list\n #student = \"no\" # if student\n\n\n #assume we have three videos videoid63 videoid65 videoid64\n #videos = [\"videoid65\", \"videoid63\", \"videoid64\"]\n #if teacher quiz list\n\n\n\n # Write the lti params to the console\n app.logger.info(json.dumps(request.form, indent=2))\n\n if \"Learner\" in session['roles']:\n \t#launch student\n \treturn render_template('launchstudent.htm.j2', lis_person_name_full=session['lis_person_name_full'], student_id= session['custom_canvas_user_id'])\n \n if \"Instructor\" in session['roles']:\n \t#launch teacher\n \treturn render_template('launchteacher.htm.j2', lis_person_name_full=session['lis_person_name_full'], roles=session['roles'], course_id= session['custom_canvas_course_id'])" ]
[ "0.60261226", "0.6015295", "0.59643716", "0.59009105", "0.58533597", "0.58318335", "0.58318335", "0.58318335", "0.58318335", "0.58011377", "0.5790651", "0.57562137", "0.5751726", "0.56952894", "0.56641877", "0.56325126", "0.56325126", "0.56237036", "0.56237036", "0.5600287", "0.55835086", "0.5571586", "0.5571586", "0.5571586", "0.5571586", "0.5571586", "0.5571586", "0.5571586", "0.5571586", "0.553753" ]
0.69384843
0
Anonymous users are not allowed to start a live.
def test_api_video_start_live_anonymous_user(self): video = factories.VideoFactory() response = self.client.post(f"/api/videos/{video.id}/start-live/") self.assertEqual(response.status_code, 401) content = json.loads(response.content) self.assertEqual( content, {"detail": "Authentication credentials were not provided."} )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_anonymous():\n return False", "def test_api_video_initiate_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/initiate-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def is_anonymous(self):\n return False", "def is_anonymous(self):\r\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_api_video_stop_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/stop-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_livesession_read_anonymous(self):\n livesession = AnonymousLiveSessionFactory()\n response = self.client.get(self._get_url(livesession.video, livesession))\n self.assertEqual(response.status_code, 401)\n self.assertEqual(\n response.json(), {\"detail\": \"Authentication credentials were not provided.\"}\n )" ]
[ "0.6592946", "0.6494612", "0.6479494", "0.64649665", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63279337", "0.63114077", "0.6263845", "0.61632085", "0.6097136", "0.6067805", "0.6044313" ]
0.6924362
0
An instructor with read_only set to true should not be able to start a live.
def test_api_video_instructor_start_live_in_read_only(self): video = factories.VideoFactory() jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": False} response = self.client.post( f"/api/videos/{video.id}/start-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 403)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def isReadOnly(self) -> bool:\n ...", "def isReadOnly(self) -> bool:\n ...", "def test_api_video_instructor_patch_video_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n data = {\"upload_state\": \"ready\"}\n\n response = self.client.patch(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_initiate_upload_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-upload/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_update_video_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n data = {\"upload_state\": \"ready\"}\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 403)", "def IsReadOnly(self) -> bool:", "def test_no_tab_flag_unset(self, enable_proctored_exams, enable_timed_exams):\n self.setup_course(enable_proctored_exams, enable_timed_exams)\n\n self.instructor.is_staff = True\n self.instructor.save()\n self._assert_proctoring_tab_available(False)", "def is_instructor(self):\n # pylint: disable=no-member\n return self.xmodule_runtime.get_user_role() == 'instructor'", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def test_creator_group_not_enabled(self):\r\n self.assertTrue(has_access(self.user, CourseCreatorRole()))", "def raise_not_editable(self, viewer):\n if viewer.has_perm(\"bookwyrm.edit_instance_settings\"):\n return\n raise PermissionDenied()", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def not_test_without_user(self):\n # TODO", "def test_no_proctoring_tab_non_global_staff(self, enable_proctored_exams, enable_timed_exams):\n self.setup_course(enable_proctored_exams, enable_timed_exams)\n\n self.instructor.is_staff = False\n self.instructor.save()\n self._assert_proctoring_tab_available(False)", "def test_api_video_read_detail_as_instructor_in_read_only(self):\n video = factories.VideoFactory(upload_state=\"ready\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n # Get the video linked to the JWT token\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)", "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_read_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n role=INSTRUCTOR,\n organization=self.organization,\n )\n\n self.assert_user_cannot_read(organization_access.user, self.live)", "def is_instructor(self):\n return bool(LTI_ROLES[INSTRUCTOR] & self.roles)", "def is_explicitly_locked(self):\n return self.subsection_visibility == 'staff_only'", "def test_api_video_instructor_delete_video_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.delete(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def has_instructor(self, user, allow_superusers=True):\n return (user.is_superuser and allow_superusers) or len(self.instructors.filter(id=user.id)) > 0", "def read_only(self):\n return bool(self.__read_only)", "def setReadOnly(self, state: bool) -> None:\n ...", "def test_has_instructor_access_for_class(self):\r\n ret_val = has_instructor_access_for_class(self.instructor, self.course.id)\r\n self.assertEquals(ret_val, True)", "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)" ]
[ "0.72525626", "0.6742537", "0.6489153", "0.6489153", "0.6405917", "0.6348487", "0.63340545", "0.62907255", "0.6168628", "0.6149776", "0.6135148", "0.6042868", "0.6026938", "0.5999581", "0.5999581", "0.5999581", "0.5999581", "0.599148", "0.5984983", "0.5965824", "0.5955471", "0.5947544", "0.5933007", "0.59259635", "0.59116244", "0.5881613", "0.5869765", "0.58681196", "0.58660877", "0.58649933" ]
0.7389538
0
A student should not be able to start a live.
def test_api_video_student_start_live(self): video = factories.VideoFactory() jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = ["student"] response = self.client.post( f"/api/videos/{video.id}/start-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 403) content = json.loads(response.content) self.assertEqual( content, {"detail": "You do not have permission to perform this action."} )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def test_api_video_student_initiate_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def test_course_beta_period(self):\r\n self.assertFalse(self.course.has_started())\r\n\r\n # student user shouldn't see it\r\n self.assertFalse(has_access(self.normal_student, 'load', self.course))\r\n\r\n # now the student should see it\r\n self.assertTrue(has_access(self.beta_tester, 'load', self.course))", "def test_api_livesession_student_cant_read_attendances(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n # livesession with consumer_site\n live_session = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n is_registered=True,\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = LiveSessionLtiTokenFactory(live_session=live_session)\n\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_with_no_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\"\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def not_test_without_user(self):\n # TODO", "def test_no_proctoring_tab_non_global_staff(self, enable_proctored_exams, enable_timed_exams):\n self.setup_course(enable_proctored_exams, enable_timed_exams)\n\n self.instructor.is_staff = False\n self.instructor.save()\n self._assert_proctoring_tab_available(False)", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_seat_not_available(self):\n\n user1 = User.objects.create(username=\"user1\", password=\"\", email=\"[email protected]\")\n user2 = User.objects.create(username=\"user2\", password=\"\", email=\"[email protected]\")\n\n course = Course.objects.first()\n course.student.add(user1)\n course.student.add(user2)\n\n self.assertFalse(course.is_seat_available())", "def test_dark_launch_enrolled_student(self):\r\n\r\n # Make courses start in the future\r\n now = datetime.datetime.now(pytz.UTC)\r\n tomorrow = now + datetime.timedelta(days=1)\r\n self.course.start = tomorrow\r\n self.test_course.start = tomorrow\r\n self.course = self.update_course(self.course)\r\n self.test_course = self.update_course(self.test_course)\r\n\r\n self.assertFalse(self.course.has_started())\r\n self.assertFalse(self.test_course.has_started())\r\n\r\n # First, try with an enrolled student\r\n self.login(self.enrolled_user)\r\n\r\n # shouldn't be able to get to anything except the light pages\r\n self._check_non_staff_light(self.course)\r\n self._check_non_staff_dark(self.course)\r\n self._check_non_staff_light(self.test_course)\r\n self._check_non_staff_dark(self.test_course)", "def test_no_course_id(self):\n run_nbgrader([\"quickstart\"], retcode=1)", "def test_with_unknown_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\n \"http://purl.imsglobal.org/vocab/lis/v2/membership#Learner\",\n \"http://purl.imsglobal.org/vocab/lis/v2/uknownrole/unknown#Unknown\",\n ]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def test_non_live_course(self):\n future_course = self.create_future_course()\n self.create_user_for_course(future_course, CourseUserType.ENROLLED)\n\n url = course_home_url(future_course)\n response = self.client.get(url)\n start_date = strftime_localized(future_course.start, 'SHORT_DATE')\n expected_params = QueryDict(mutable=True)\n expected_params['notlive'] = start_date\n expected_url = '{url}?{params}'.format(\n url=reverse('dashboard'),\n params=expected_params.urlencode()\n )\n self.assertRedirects(response, expected_url)", "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def check_vulnerability(self):\n\t\tpass", "def life_critical():\n return True", "def can_run(self):\n return True", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def can_run(self):\n\t\treturn self._start is None", "def test_bad_student(self):\r\n staff_page = self._goto_staff_page()\r\n staff_page.answer_problem()\r\n\r\n staff_debug_page = staff_page.open_staff_debug_info()\r\n staff_debug_page.delete_state('INVALIDUSER')\r\n msg = staff_debug_page.idash_msg[0]\r\n self.assertEqual(u'Failed to delete student state. '\r\n 'User does not exist.', msg)", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_negative_is_active_of_homework():\n assert not expired_hw.is_active()", "def can_play_stage(stamina, free_slots):\n if free_slots >= 5 and stamina > 0:\n return True\n return False", "def noyable(self):\n return False", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])" ]
[ "0.64666325", "0.6193658", "0.61624396", "0.6158128", "0.6136004", "0.6102498", "0.60907173", "0.6051432", "0.6016664", "0.5931232", "0.5912021", "0.5900177", "0.5882163", "0.5858718", "0.5842952", "0.58379006", "0.57815033", "0.5769379", "0.5767614", "0.57554597", "0.57400346", "0.5715933", "0.5713564", "0.5687593", "0.5658758", "0.5652075", "0.56490916", "0.56380236", "0.5630826", "0.5630531" ]
0.6614195
0
An instructor should not start a video when not in live mode.
def test_api_instructor_start_non_live_video(self): video = factories.VideoFactory( id="27a23f52-3379-46a2-94fa-697b59cfe3c7", upload_state=random.choice([s[0] for s in STATE_CHOICES]), ) jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": True} # start a live video, with mock.patch.object(api, "start_live_channel"): response = self.client.post( f"/api/videos/{video.id}/start-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_student_start_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def navigate_to_video_no_render(self):\r\n self._install_course_fixture()\r\n self._navigate_to_courseware_video_no_render()", "def _navigate_to_courseware_video_no_render(self):\r\n self._navigate_to_courseware_video()\r\n self.video.wait_for_video_class()", "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def check(self):\n #\n # *****************\n # *****************\n # TODO: Check really if video is valid\n # *****************\n # *****************\n return True", "def continue_video(self):\n if self.current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n elif not self.current_paused:\n print(\"Cannot continue video: Video is not paused\")\n elif self.current_paused:\n print(\"Continuing video:\", self.current_video.title)", "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "def test_api_video_student_initiate_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def test_api_video_instructor_start_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=IDLE,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n jwt_token.payload[\"user\"] = {\"id\": \"56255f3807599c377bf0e5bf072359fd\"}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"), mock.patch.object(\n api, \"create_room\"\n ) as mock_create_room, mock.patch(\n \"marsha.core.serializers.xmpp_utils.generate_jwt\"\n ) as mock_jwt_encode:\n mock_jwt_encode.return_value = \"xmpp_jwt\"\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n mock_create_room.assert_called_once_with(video.id)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": \"pending\",\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": \"starting\",\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": {\n \"bosh_url\": \"https://xmpp-server.com/http-bind?token=xmpp_jwt\",\n \"websocket_url\": None,\n \"conference_url\": f\"{video.id}@conference.xmpp-server.com\",\n \"jid\": \"xmpp-server.com\",\n },\n },\n )", "def play_video(self):\n raise NotImplementedError(\n \"This method needs to be implemented by a derived class\"\n )", "def test_api_livesession_read_attendances_admin_video_unknown(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n video.delete()\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 404)", "def continue_video(self):\n if self._current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n return\n elif not self._paused:\n print(\"Cannot continue video: Video is not paused\")\n return\n print(f\"Continuing video: {self._current_video.title}\")\n self._paused = False", "def continue_video(self):\n\n if self.is_paused:\n print(f\"Continuing video: {self.playing_now}\")\n self.is_paused = False\n elif self.is_playing and self.is_paused is False:\n print(\"Cannot continue video: Video is not paused\")\n elif self.is_playing is False:\n print(\"Cannot continue video: No video is currently playing\")", "def test_api_video_start_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/start-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_api_video_instructor_stop_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=RUNNING,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": PENDING,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": STOPPING,\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": None,\n },\n )", "def enable_video(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_video\")" ]
[ "0.7412882", "0.7358776", "0.7240957", "0.7188807", "0.7049525", "0.68243253", "0.67984855", "0.67984855", "0.67984855", "0.67984855", "0.6778227", "0.67452276", "0.67433876", "0.66061795", "0.65485644", "0.6529168", "0.6495579", "0.6489755", "0.648626", "0.648626", "0.6448444", "0.64078975", "0.63686544", "0.63555014", "0.6340524", "0.6314354", "0.6311207", "0.6264887", "0.6263739", "0.62598014" ]
0.7704749
0
An instructor should not start a video when not in live mode.
def test_api_instructor_start_non_idle_live(self): video = factories.VideoFactory( id="27a23f52-3379-46a2-94fa-697b59cfe3c7", upload_state=PENDING, live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != "idle"]), live_type=RAW, ) jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": True} # start a live video, with mock.patch.object(api, "start_live_channel"): response = self.client.post( f"/api/videos/{video.id}/start-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_student_start_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def navigate_to_video_no_render(self):\r\n self._install_course_fixture()\r\n self._navigate_to_courseware_video_no_render()", "def _navigate_to_courseware_video_no_render(self):\r\n self._navigate_to_courseware_video()\r\n self.video.wait_for_video_class()", "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def check(self):\n #\n # *****************\n # *****************\n # TODO: Check really if video is valid\n # *****************\n # *****************\n return True", "def continue_video(self):\n if self.current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n elif not self.current_paused:\n print(\"Cannot continue video: Video is not paused\")\n elif self.current_paused:\n print(\"Continuing video:\", self.current_video.title)", "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "def test_api_video_student_initiate_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def test_api_video_instructor_start_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=IDLE,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n jwt_token.payload[\"user\"] = {\"id\": \"56255f3807599c377bf0e5bf072359fd\"}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"), mock.patch.object(\n api, \"create_room\"\n ) as mock_create_room, mock.patch(\n \"marsha.core.serializers.xmpp_utils.generate_jwt\"\n ) as mock_jwt_encode:\n mock_jwt_encode.return_value = \"xmpp_jwt\"\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n mock_create_room.assert_called_once_with(video.id)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": \"pending\",\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": \"starting\",\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": {\n \"bosh_url\": \"https://xmpp-server.com/http-bind?token=xmpp_jwt\",\n \"websocket_url\": None,\n \"conference_url\": f\"{video.id}@conference.xmpp-server.com\",\n \"jid\": \"xmpp-server.com\",\n },\n },\n )", "def play_video(self):\n raise NotImplementedError(\n \"This method needs to be implemented by a derived class\"\n )", "def test_api_livesession_read_attendances_admin_video_unknown(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n video.delete()\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 404)", "def continue_video(self):\n if self._current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n return\n elif not self._paused:\n print(\"Cannot continue video: Video is not paused\")\n return\n print(f\"Continuing video: {self._current_video.title}\")\n self._paused = False", "def continue_video(self):\n\n if self.is_paused:\n print(f\"Continuing video: {self.playing_now}\")\n self.is_paused = False\n elif self.is_playing and self.is_paused is False:\n print(\"Cannot continue video: Video is not paused\")\n elif self.is_playing is False:\n print(\"Cannot continue video: No video is currently playing\")", "def test_api_video_start_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/start-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_api_video_instructor_stop_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=RUNNING,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": PENDING,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": STOPPING,\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": None,\n },\n )", "def enable_video(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_video\")" ]
[ "0.7703713", "0.7358513", "0.7240945", "0.7188635", "0.7049232", "0.68231356", "0.67985535", "0.67985535", "0.67985535", "0.67985535", "0.67787534", "0.6745583", "0.67433345", "0.6606759", "0.65474766", "0.6529702", "0.649597", "0.6488909", "0.64857394", "0.64857394", "0.64489394", "0.6407827", "0.63680226", "0.6355477", "0.63407516", "0.6315033", "0.63114077", "0.6264346", "0.626384", "0.6258287" ]
0.74117947
1
Anonymous users are not allowed to stop a live.
def test_api_video_stop_live_anonymous_user(self): video = factories.VideoFactory() response = self.client.post(f"/api/videos/{video.id}/stop-live/") self.assertEqual(response.status_code, 401) content = json.loads(response.content) self.assertEqual( content, {"detail": "Authentication credentials were not provided."} )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disallow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = False\n update.message.reply_text(\"Temprarily allowed disabled!\")", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_stop_live_staff_or_user(self):\n for user in [factories.UserFactory(), factories.UserFactory(is_staff=True)]:\n self.client.login(username=user.username, password=\"test\")\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/stop-live/\")\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_stop_fails_on_no_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.user)\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/stop_test/1')\n self.assert403(response)", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def on_access_deny(self, handler):\n print \"User with {0} has been DENIED access.\".format(\n handler.client_address[0]\n )\n time.sleep(2) # lets annoy user if it is denied access", "def test_api_video_start_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/start-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def get_everyone_denied(self):", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def is_anonymous():\n return False", "def temporarily_allow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = True\n update.message.reply_text(\"Temprarily allowed!\")", "def is_anonymous(self):\n return False", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_unpause_server_fails_as_user(self):\n with self.assertRaises(Forbidden):\n self.servers_client.unpause_server(self.server.id)", "def is_anonymous(self):\r\n return False", "def test_post_forbid_anonymous(self):\n self.check_post_forbid_anonymous('FORBID_ANONYMOUS')", "def test_api_video_initiate_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/initiate-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False", "def is_anonymous(self):\n return False" ]
[ "0.65399635", "0.64476043", "0.6439552", "0.63720864", "0.6289802", "0.6223873", "0.62178606", "0.61278325", "0.607359", "0.60633457", "0.6044045", "0.597465", "0.59742105", "0.59253925", "0.59094024", "0.5898963", "0.5882345", "0.5861994", "0.58303726", "0.58122665", "0.57837534", "0.5739864", "0.5739864", "0.5739864", "0.5739864", "0.5739864", "0.5739864", "0.5739864", "0.5739864", "0.5739864" ]
0.7234297
0
An instructor with read_only set to true should not be able to stop a live.
def test_api_video_instructor_stop_live_in_read_only(self): video = factories.VideoFactory() jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": False} response = self.client.post( f"/api/videos/{video.id}/stop-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 403)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_patch_video_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n data = {\"upload_state\": \"ready\"}\n\n response = self.client.patch(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 403)", "def isReadOnly(self) -> bool:\n ...", "def isReadOnly(self) -> bool:\n ...", "def test_api_video_instructor_update_video_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n data = {\"upload_state\": \"ready\"}\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_instructor_delete_video_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.delete(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def IsReadOnly(self) -> bool:", "def test_no_tab_flag_unset(self, enable_proctored_exams, enable_timed_exams):\n self.setup_course(enable_proctored_exams, enable_timed_exams)\n\n self.instructor.is_staff = True\n self.instructor.save()\n self._assert_proctoring_tab_available(False)", "def test_api_video_instructor_initiate_upload_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-upload/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_video_read_detail_as_instructor_in_read_only(self):\n video = factories.VideoFactory(upload_state=\"ready\")\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n # Get the video linked to the JWT token\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def read_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only\")", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def is_read_only(self):\n return (self.get_name().startswith(\"b\")\n or self.get_name() == \"jump_cond\" # meta-instruction\n or self.get_name() == \"j\"\n or self.get_name() == \"ld\"\n or self.get_name() == \"lw\"\n or self.get_name() == \"lb\")", "def test_no_proctoring_tab_non_global_staff(self, enable_proctored_exams, enable_timed_exams):\n self.setup_course(enable_proctored_exams, enable_timed_exams)\n\n self.instructor.is_staff = False\n self.instructor.save()\n self._assert_proctoring_tab_available(False)", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def raise_not_editable(self, viewer):\n if viewer.has_perm(\"bookwyrm.edit_instance_settings\"):\n return\n raise PermissionDenied()", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def private(self) -> bool:\n return pulumi.get(self, \"private\")", "def not_test_without_user(self):\n # TODO", "def test_read_by_organization_instructor(self):\n organization_access = OrganizationAccessFactory(\n role=INSTRUCTOR,\n organization=self.organization,\n )\n\n self.assert_user_cannot_read(organization_access.user, self.live)", "def setReadOnly(self, state: bool) -> None:\n ...", "def read_only(self):\n return bool(self.__read_only)", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def is_explicitly_locked(self):\n return self.subsection_visibility == 'staff_only'", "def is_instructor(self):\n # pylint: disable=no-member\n return self.xmodule_runtime.get_user_role() == 'instructor'", "def is_instructor(self):\n return bool(LTI_ROLES[INSTRUCTOR] & self.roles)" ]
[ "0.7276562", "0.7149556", "0.6529597", "0.64743584", "0.64743584", "0.6461077", "0.63935566", "0.63030887", "0.6153621", "0.61088896", "0.5998145", "0.59547323", "0.59547323", "0.59547323", "0.59547323", "0.5901826", "0.5891755", "0.5890304", "0.58862036", "0.5877192", "0.58404106", "0.583824", "0.58353746", "0.58268535", "0.5812322", "0.5791287", "0.5783848", "0.5783536", "0.57739365", "0.57654756" ]
0.7423588
0
A student should not be able to stop a live.
def test_api_video_student_stop_live(self): video = factories.VideoFactory() jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = ["student"] response = self.client.post( f"/api/videos/{video.id}/stop-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 403) content = json.loads(response.content) self.assertEqual( content, {"detail": "You do not have permission to perform this action."} )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def stopCond(self):\n\t\treturn False", "def stop_check(self):\n pass", "def test_stop_fails_on_no_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.user)\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/stop_test/1')\n self.assert403(response)", "def test_api_livesession_student_cant_read_attendances(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n # livesession with consumer_site\n live_session = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n is_registered=True,\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = LiveSessionLtiTokenFactory(live_session=live_session)\n\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def test_course_beta_period(self):\r\n self.assertFalse(self.course.has_started())\r\n\r\n # student user shouldn't see it\r\n self.assertFalse(has_access(self.normal_student, 'load', self.course))\r\n\r\n # now the student should see it\r\n self.assertTrue(has_access(self.beta_tester, 'load', self.course))", "def _stop(self):\n return True", "def test_api_video_student_start_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def have_i_lost(self):\n if self.life_points <= 0:\n self.running = False", "def test_not_unopposed(self):\n s1 = self.battle.create_skirmish(self.alice, 2) # Attack 2\n s1.react(self.bob, 1) # --Attack 1\n s1.resolve()\n self.assertFalse(s1.unopposed)", "def stop(self):\n print(\"Stopping accessory.\")", "def _stop(self):", "def stop(self, *args, **kwargs):\n return self(AbilityId.STOP, *args, **kwargs)", "def sub_stop_requested(self, value):\n if str(value).lower() == \"stop\":\n self.__gameCanceled = True\n self.__logging(\"Method \\\"sub_stopRequested()\\\" Stop is requested and successful set\")\n else:\n self.__logging(\"Method \\\"sub_stopRequested()\\\" Stop not requested\")", "def stop_run(arn=None):\n pass", "def violated(self) -> bool:\n ...", "def test_api_video_stop_live_staff_or_user(self):\n for user in [factories.UserFactory(), factories.UserFactory(is_staff=True)]:\n self.client.login(username=user.username, password=\"test\")\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/stop-live/\")\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def stop():", "def stop():", "def stop():", "def stop():", "def stop() -> None:", "def stop(self) -> None:", "def stop(self) -> None:", "def stop(self):\n self.alive = False", "def stop(self):\n self.alive = False", "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)" ]
[ "0.665526", "0.66009367", "0.6460251", "0.63614786", "0.63535726", "0.6301615", "0.60067385", "0.6005226", "0.59533995", "0.59468013", "0.59030604", "0.58920294", "0.5869129", "0.5864219", "0.58633035", "0.58615416", "0.58441806", "0.5843927", "0.5821259", "0.58041114", "0.5799612", "0.5799612", "0.5799612", "0.5799612", "0.57854164", "0.5783875", "0.5783875", "0.5775846", "0.5775846", "0.5769186" ]
0.697601
0
An instructor should not stop a video when not in live mode.
def test_api_instructor_stop_non_live_video(self): video = factories.VideoFactory( id="27a23f52-3379-46a2-94fa-697b59cfe3c7", upload_state=random.choice([s[0] for s in STATE_CHOICES]), ) jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": True} # start a live video, with mock.patch.object(api, "stop_live_channel"): response = self.client.post( f"/api/videos/{video.id}/stop-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def stop_video(self):\n stopped_video = self.video_id\n if stopped_video == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(f\"Stopping video: {stopped_video}\")", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def stop_video(self):\n if self.is_playing:\n print(f\"Stopping video: {self.playing_now}\")\n self.is_playing = False\n else:\n print(\"Cannot stop video: No video is currently playing\")", "def stop_video(self):\n global value\n if value>0:\n value=0\n print(f\"Stopping video: {name}\")\n else:\n print(\"Cannot stop video: No video is currently playing\")", "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def stop_video(self):\n\n if self.current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(\"Stopping video:\", self.current_video.title)\n self.current_video = None", "def stop_video(self):\n if self._current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n print(f\"Stopping video: {self._current_video.title}\")\n self._current_video = None\n self._paused = False", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def stop_video(self):\n if self.now_playing_videoid:\n # remove the current video id from the record\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n print(f\"Stopping video: {video_playing.title}\")\n self.now_playing_videoid = ''\n self.pause = False\n else: \n print(f\"Cannot stop video: No video is currently playing\")\n\n # print(\"stop_video needs implementation\")", "def stop_video(self):\n\n # Enabling all the buttons, the speedCombo and the checkbox\n self.enable_btns()\n\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState or self.mediaPlayer.state() == QMediaPlayer.PausedState:\n self.mediaPlayer.stop()\n else:\n pass", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def test_api_video_instructor_stop_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=RUNNING,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": PENDING,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": STOPPING,\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": None,\n },\n )", "def stop_videos(self):\n if ((not self.playing) and (self.worker is None)) or (self.shutdown):\n return\n self.enable_video_buttons(False, False, False)\n self.shutdown = True\n\n # Force the background worker to leave run()\n self.worker.force_stop()", "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def continue_video(self):\n if self.current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n elif not self.current_paused:\n print(\"Cannot continue video: Video is not paused\")\n elif self.current_paused:\n print(\"Continuing video:\", self.current_video.title)", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def continue_video(self):\n\n if self.is_paused:\n print(f\"Continuing video: {self.playing_now}\")\n self.is_paused = False\n elif self.is_playing and self.is_paused is False:\n print(\"Cannot continue video: Video is not paused\")\n elif self.is_playing is False:\n print(\"Cannot continue video: No video is currently playing\")", "def _control_stop(self):\n self.player.stop()", "def test_api_video_stop_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/stop-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def stop_state(self):\n\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False", "def stop_current_episode(self):\n raise NotImplementedError", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")" ]
[ "0.7796268", "0.7427205", "0.72466075", "0.71685565", "0.7167635", "0.7152092", "0.70547813", "0.70006377", "0.6970477", "0.6923031", "0.69106203", "0.6893699", "0.6771923", "0.6740988", "0.66896695", "0.6677808", "0.65654933", "0.6487636", "0.6487632", "0.6487632", "0.6466423", "0.6459791", "0.64578325", "0.6434778", "0.6421908", "0.6400722", "0.6369808", "0.6369808", "0.6369808", "0.6369808" ]
0.7894922
0
An instructor should not stop a video when not in live state.
def test_api_instructor_stop_non_running_live(self): video = factories.VideoFactory( id="27a23f52-3379-46a2-94fa-697b59cfe3c7", upload_state=PENDING, live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != "running"]), live_type=RAW, ) jwt_token = AccessToken() jwt_token.payload["resource_id"] = str(video.id) jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] jwt_token.payload["permissions"] = {"can_update": True} # start a live video, with mock.patch.object(api, "stop_live_channel"): response = self.client.post( f"/api/videos/{video.id}/stop-live/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_instructor_stop_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def stop_video(self):\n stopped_video = self.video_id\n if stopped_video == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(f\"Stopping video: {stopped_video}\")", "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_video_student_stop_live(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [\"student\"]\n\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"You do not have permission to perform this action.\"}\n )", "def stop_video(self):\n if self.is_playing:\n print(f\"Stopping video: {self.playing_now}\")\n self.is_playing = False\n else:\n print(\"Cannot stop video: No video is currently playing\")", "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def stop_video(self):\n global value\n if value>0:\n value=0\n print(f\"Stopping video: {name}\")\n else:\n print(\"Cannot stop video: No video is currently playing\")", "def stop_video(self):\n if self._current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n print(f\"Stopping video: {self._current_video.title}\")\n self._current_video = None\n self._paused = False", "def stop_video(self):\n\n if self.current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(\"Stopping video:\", self.current_video.title)\n self.current_video = None", "def stop_video(self):\n if self.now_playing_videoid:\n # remove the current video id from the record\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n print(f\"Stopping video: {video_playing.title}\")\n self.now_playing_videoid = ''\n self.pause = False\n else: \n print(f\"Cannot stop video: No video is currently playing\")\n\n # print(\"stop_video needs implementation\")", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def test_api_video_instructor_stop_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n upload_state=PENDING,\n live_state=RUNNING,\n live_info={\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n },\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n\n self.assertEqual(\n content,\n {\n \"description\": video.description,\n \"id\": str(video.id),\n \"title\": video.title,\n \"active_stamp\": None,\n \"is_ready_to_show\": True,\n \"show_download\": True,\n \"upload_state\": PENDING,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"urls\": {\n \"manifests\": {\n \"hls\": \"https://channel_endpoint1/live.m3u8\",\n },\n \"mp4\": {},\n \"thumbnails\": {},\n },\n \"should_use_subtitle_as_transcript\": False,\n \"has_transcript\": False,\n \"playlist\": {\n \"id\": str(video.playlist.id),\n \"title\": \"foo bar\",\n \"lti_id\": \"course-v1:ufr+mathematics+00001\",\n },\n \"live_state\": STOPPING,\n \"live_info\": {\n \"medialive\": {\n \"input\": {\n \"endpoints\": [\n \"https://live_endpoint1\",\n \"https://live_endpoint2\",\n ],\n }\n },\n },\n \"live_type\": RAW,\n \"xmpp\": None,\n },\n )", "def test_api_video_instructor_start_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def stop_video(self):\n\n # Enabling all the buttons, the speedCombo and the checkbox\n self.enable_btns()\n\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState or self.mediaPlayer.state() == QMediaPlayer.PausedState:\n self.mediaPlayer.stop()\n else:\n pass", "def stop_videos(self):\n if ((not self.playing) and (self.worker is None)) or (self.shutdown):\n return\n self.enable_video_buttons(False, False, False)\n self.shutdown = True\n\n # Force the background worker to leave run()\n self.worker.force_stop()", "def stop_state(self):\n\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False", "def stop_current_episode(self):\n raise NotImplementedError", "def continue_video(self):\n\n if self.is_paused:\n print(f\"Continuing video: {self.playing_now}\")\n self.is_paused = False\n elif self.is_playing and self.is_paused is False:\n print(\"Cannot continue video: Video is not paused\")\n elif self.is_playing is False:\n print(\"Cannot continue video: No video is currently playing\")", "def continue_video(self):\n if self.current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n elif not self.current_paused:\n print(\"Cannot continue video: Video is not paused\")\n elif self.current_paused:\n print(\"Continuing video:\", self.current_video.title)", "def stop(self,event=None):\r\n # If no video data\r\n if self.isEmpty():\r\n return\r\n if self.hasAudio:\r\n mixer.music.stop()\r\n self.state = VideoPlayer.State.STOPPED\r\n self.progress = 0\r\n self.startTimestamp = time.time()", "def test_api_video_stop_live_anonymous_user(self):\n video = factories.VideoFactory()\n\n response = self.client.post(f\"/api/videos/{video.id}/stop-live/\")\n\n self.assertEqual(response.status_code, 401)\n content = json.loads(response.content)\n self.assertEqual(\n content, {\"detail\": \"Authentication credentials were not provided.\"}\n )", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def continue_video(self):\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n if self.pause:\n self.pause = False\n print(f\"Continuing video: {video_playing.title}\")\n elif not video_playing:\n print(f\"Cannot continue video: No video is currently playing\")\n else:\n print(\"Cannot continue video: Video is not paused\")\n # print(\"continue_video needs implementation\")", "def test_api_livesession_read_attendances_admin_video_unknown(self):\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\n \"started_at\": \"1533686400\",\n },\n live_type=JITSI,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n video.delete()\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 404)", "def test_api_video_instructor_initiate_live_in_read_only(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": False}\n\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 403)", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "def paused(self) -> bool:" ]
[ "0.7944129", "0.7291849", "0.72207254", "0.719873", "0.7040603", "0.7018957", "0.69474876", "0.6940812", "0.6808219", "0.67891145", "0.67476165", "0.6694016", "0.6679444", "0.66699386", "0.6599011", "0.6586042", "0.65245074", "0.64716214", "0.6442532", "0.6427986", "0.6426531", "0.6362865", "0.63485664", "0.6307103", "0.6307103", "0.62909114", "0.628408", "0.6281159", "0.62750876", "0.6266122" ]
0.7828574
1
Confirm update video live state.
def test_api_video_update_live_state(self): video = factories.VideoFactory( id="a1a21411-bf2f-4926-b97f-3c48a124d528", upload_state=PENDING, live_state=IDLE, live_info={}, live_type=RAW, ) data = { "logGroupName": "/aws/lambda/dev-test-marsha-medialive", "state": "running", } signature = generate_hash("shared secret", json.dumps(data).encode("utf-8")) now = datetime(2018, 8, 8, tzinfo=pytz.utc) with mock.patch.object(timezone, "now", return_value=now): response = self.client.patch( f"/api/videos/{video.id}/update-live-state/", data, content_type="application/json", HTTP_X_MARSHA_SIGNATURE=signature, ) video.refresh_from_db() self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content), {"success": True}) self.assertEqual(video.live_state, RUNNING) self.assertEqual( video.live_info, { "cloudwatch": {"logGroupName": "/aws/lambda/dev-test-marsha-medialive"}, "started_at": "1533686400", }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_update_live_state_idle(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=CREATING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"idle\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n },\n )", "def __check_video(self):\n stream_status = self.communications.get_video()\n if self.__video_status[\"last_video_streaming\"] != stream_status: # Set initial video status\n self.__video_status[\"last_video_streaming\"] = stream_status\n self.__live_video_stream(stream_status)", "def test_api_video_update_live_state_unknown_video(self):\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n \"/api/videos/9087c52d-cb87-4fd0-9b57-d9f28a0c69cb/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n self.assertEqual(response.status_code, 404)", "def test_api_video_update_live_state_invalid_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n invalid_state = random.choice(\n [s[0] for s in LIVE_CHOICES if s[0] not in [IDLE, RUNNING, STOPPED]]\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": invalid_state,\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n json.loads(response.content),\n {\"state\": [f'\"{invalid_state}\" is not a valid choice.']},\n )", "def activate_video(video: dict):\n\tif video.get('state')=='INACTIVE':\n\t\tvideo_id = video.get('id')\n\t\tjson = { 'state': 'ACTIVE' }\n\t\tprint(f'Activating video ID {video_id}: {get_cms().UpdateVideo(video_id=video_id, json_body=json).status_code}')", "def test_success(self):\n with build_video(self.user, shortlink='') as video:\n eq_(video.shortlink, '')\n video = self._send(video, 'asdf')\n eq_(video.shortlink, 'asdf')\n eq_(video.state, 'pending')", "def test_api_video_update_live_state_same_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPED,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_not_called()\n create_mediapackage_harvest_job_mock.assert_not_called()\n\n self.assertEqual(response.status_code, 200)", "def test_api_video_update_detail_token_user_upload_state(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n response = self.client.get(\n f\"/api/videos/{video.id}/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n data = json.loads(response.content)\n self.assertEqual(data[\"upload_state\"], \"pending\")\n data[\"upload_state\"] = \"ready\"\n\n response = self.client.put(\n f\"/api/videos/{video.id}/\",\n data,\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 200)\n video.refresh_from_db()\n self.assertEqual(video.upload_state, \"ready\")", "def test_api_video_update_live_state_invalid_signature(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"invalid secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(video.live_state, IDLE)", "def update_video(conn: sqlite3.Connection, cols_vals: dict, verbose=False):\n video_id = cols_vals.pop('id')\n query_string = generate_unconditional_update_query(list(cols_vals.keys()))\n values = list(cols_vals.values())\n values.append(video_id)\n if execute_query(conn, query_string, tuple(values)):\n if verbose:\n logger.info(f'Updated video {video_id!r}')\n return True", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def test_api_video_update_live_state_stopped(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock, mock.patch.object(\n api, \"close_room\"\n ) as mock_close_room:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_called_once()\n create_mediapackage_harvest_job_mock.assert_called_once()\n mock_close_room.assert_called_once_with(video.id)\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, STOPPED)\n self.assertEqual(video.upload_state, HARVESTING)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n \"stopped_at\": \"1533686400\",\n },\n )", "def pause_video(self):\n if self.is_paused() is not None:\n print(\"Video already paused: {}\".format(self.is_paused()._title))\n elif self.is_playing() is not None:\n print(\"Pausing video: {}\".format(self.is_playing()._title))\n self.is_playing()._status = 2\n else:\n print(\"Cannot pause video: No video is currently playing\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def continue_video(self):\n if self.is_playing() is not None:\n print(\"Cannot continue video: Video is not paused\")\n elif self.is_paused() is not None:\n print(\"Continuing video: {}\".format(self.is_paused()._title))\n self.is_paused()._status = 1\n else:\n print(\"Cannot continue video: No video is currently playing\")", "async def button(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n query = update.callback_query\n\n # CallbackQueries need to be answered, even if no notification to the user is needed\n # Some clients may have trouble otherwise. See https://core.telegram.org/bots/api#callbackquery\n await query.answer()\n\n await query.edit_message_text(\n text=f\"🎬 Selected video: {self.filename_from_path(query.data)}\"\n )\n try:\n with open(query.data, \"rb\") as video_stream:\n await context.bot.send_video(context._chat_id, video=video_stream)\n except FileNotFoundError:\n await query.edit_message_text(\n text=f\"🎬 Selected video {self.filename_from_path(query.data)} not found\"\n )", "def continue_video(self):\n\n if self.is_paused:\n print(f\"Continuing video: {self.playing_now}\")\n self.is_paused = False\n elif self.is_playing and self.is_paused is False:\n print(\"Cannot continue video: Video is not paused\")\n elif self.is_playing is False:\n print(\"Cannot continue video: No video is currently playing\")", "def liveview(self):\n if self.liveviewButton.isChecked():\n self.save = False\n self.channelsOpen()\n self.liveviewStart()\n\n else:\n self.liveviewStop()", "def on_live(self, action):\n log.info(\"Click live updates\")\n if action == False:\n self.ui.actionContinue_Live_Updates.setChecked(True)\n\n self.ui.actionPause_Live_Updates.setChecked(False)\n self.live_updates = True", "def continue_video(self):\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n if self.pause:\n self.pause = False\n print(f\"Continuing video: {video_playing.title}\")\n elif not video_playing:\n print(f\"Cannot continue video: No video is currently playing\")\n else:\n print(\"Cannot continue video: Video is not paused\")\n # print(\"continue_video needs implementation\")", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def on_pause(self, action):\n log.info(\"Pause live updates: %s\", action)\n if action == False:\n self.ui.actionPause_Live_Updates.setChecked(True)\n\n self.ui.actionContinue_Live_Updates.setChecked(False)\n self.live_updates = False", "def pause_video(self):\n\n if self.is_playing and self.is_paused is False:\n print(f\"Pausing video: {self.playing_now}\")\n self.is_paused = True\n elif self.is_paused:\n print(f\"Video already paused: {self.playing_now}\")\n elif self.is_playing is False:\n print(\"Cannot pause video: No video is currently playing\")", "def pause_video(self):\n global value\n if value==1:\n value=2\n print(f\"Pausing video: {name}\")\n elif value==0:\n print(f\"Cannot pause video: No video is currently playing\")\n else:\n print(f\"Video already paused: {name}\")", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def continue_video(self):\n if self.current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n elif not self.current_paused:\n print(\"Cannot continue video: Video is not paused\")\n elif self.current_paused:\n print(\"Continuing video:\", self.current_video.title)", "def continue_video(self):\n if self._current_video is None:\n print(\"Cannot continue video: No video is currently playing\")\n return\n elif not self._paused:\n print(\"Cannot continue video: Video is not paused\")\n return\n print(f\"Continuing video: {self._current_video.title}\")\n self._paused = False", "def pause_video(self):\n if self._paused:\n print(f\"Video already paused: {self._current_video.title}\")\n return\n elif self._current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n return\n print(f\"Pausing video: {self._current_video.title}\")\n self._paused = True", "def liveview(self):\n if self.liveviewButton.isChecked():\n# self.save = False\n self.paramChangedInitialize()\n self.openShutter(\"red\")\n self.liveviewStart()\n\n else:\n self.liveviewStop()" ]
[ "0.669423", "0.66067946", "0.6488981", "0.63831115", "0.6315425", "0.6314889", "0.6285026", "0.6158163", "0.6078076", "0.59171903", "0.5905363", "0.5857383", "0.58428895", "0.5838473", "0.5838473", "0.5814329", "0.579707", "0.57459235", "0.5738861", "0.5720667", "0.5709396", "0.5705734", "0.56969553", "0.56745154", "0.5637312", "0.5628523", "0.56032616", "0.56026417", "0.55881274", "0.5571546" ]
0.66788983
1
Updating state to stopped should set video to DELETED if manifest is missing.
def test_api_video_update_live_state_stopped_missing_manifest(self): video = factories.VideoFactory( id="a1a21411-bf2f-4926-b97f-3c48a124d528", upload_state=PENDING, live_state=STOPPING, live_info={"mediapackage": {"channel": {"id": "channel1"}}}, live_type=RAW, ) data = { "logGroupName": "/aws/lambda/dev-test-marsha-medialive", "state": "stopped", } signature = generate_hash("shared secret", json.dumps(data).encode("utf-8")) now = datetime(2018, 8, 8, tzinfo=pytz.utc) with mock.patch.object(timezone, "now", return_value=now), mock.patch( "marsha.core.api.delete_aws_element_stack" ) as delete_aws_element_stack_mock, mock.patch( "marsha.core.api.create_mediapackage_harvest_job" ) as create_mediapackage_harvest_job_mock, mock.patch( "marsha.core.api.delete_mediapackage_channel" ) as delete_mediapackage_channel_mock, mock.patch.object( api, "close_room" ) as mock_close_room: create_mediapackage_harvest_job_mock.side_effect = ManifestMissingException response = self.client.patch( f"/api/videos/{video.id}/update-live-state/", data, content_type="application/json", HTTP_X_MARSHA_SIGNATURE=signature, ) delete_aws_element_stack_mock.assert_called_once() create_mediapackage_harvest_job_mock.assert_called_once() delete_mediapackage_channel_mock.assert_called_once() mock_close_room.assert_called_once_with(video.id) video.refresh_from_db() self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content), {"success": True}) self.assertEqual(video.live_state, None) self.assertEqual(video.upload_state, DELETED) self.assertEqual(video.live_info, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_update_live_state_unknown_video(self):\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n \"/api/videos/9087c52d-cb87-4fd0-9b57-d9f28a0c69cb/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n self.assertEqual(response.status_code, 404)", "def allow_video(self, video_id):\n video = self._video_library.get_video(video_id)\n if not self._video_library.get_video(video_id):\n print(\"Cannot remove flag from video: Video does not exist\")\n return\n if not video.flag:\n print(\"Cannot remove flag from video: Video is not flagged\")\n return\n print(f\"Successfully removed flag from video: {video.title}\")\n video.set_flag(None)", "def test_api_video_update_live_state_stopped(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock, mock.patch.object(\n api, \"close_room\"\n ) as mock_close_room:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_called_once()\n create_mediapackage_harvest_job_mock.assert_called_once()\n mock_close_room.assert_called_once_with(video.id)\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, STOPPED)\n self.assertEqual(video.upload_state, HARVESTING)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n \"stopped_at\": \"1533686400\",\n },\n )", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def allow_video(self, video_id):\n if self._video_library.get_video(video_id) is None:\n print(\"Cannot remove flag from video: Video does not exist\")\n elif not self._video_library.get_video(video_id).flagged:\n print(\"Cannot remove flag from video: Video is not flagged\")\n else:\n print(f\"Successfully removed flag from video: {self._video_library.get_video(video_id).title}\")\n self._video_library.get_video(video_id).flagged = False\n self._video_library.get_video(video_id).flag_reason = \"Not supplied\"", "def __check_video(self):\n stream_status = self.communications.get_video()\n if self.__video_status[\"last_video_streaming\"] != stream_status: # Set initial video status\n self.__video_status[\"last_video_streaming\"] = stream_status\n self.__live_video_stream(stream_status)", "def stop_video(self):\n stopped_video = self.video_id\n if stopped_video == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(f\"Stopping video: {stopped_video}\")", "def stop_video(self):\n global value\n if value>0:\n value=0\n print(f\"Stopping video: {name}\")\n else:\n print(\"Cannot stop video: No video is currently playing\")", "def stop_state(self):\n\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def stop_video(self):\n if self.now_playing_videoid:\n # remove the current video id from the record\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n print(f\"Stopping video: {video_playing.title}\")\n self.now_playing_videoid = ''\n self.pause = False\n else: \n print(f\"Cannot stop video: No video is currently playing\")\n\n # print(\"stop_video needs implementation\")", "def stop(self,event=None):\r\n # If no video data\r\n if self.isEmpty():\r\n return\r\n if self.hasAudio:\r\n mixer.music.stop()\r\n self.state = VideoPlayer.State.STOPPED\r\n self.progress = 0\r\n self.startTimestamp = time.time()", "def flag_video(self, video_id, flag_reason=\"Not supplied\"):\n video = self._video_library.get_video(video_id)\n if not video:\n print(\"Cannot flag video: Video does not exist\")\n return\n if video.flag is not None:\n print(\"Cannot flag video: Video is already flagged\")\n return\n video.set_flag(flag_reason)\n if self._current_video and self._current_video.video_id == video.video_id:\n self.stop_video()\n print(f\"Successfully flagged video: {video._title} (reason: {flag_reason})\")", "def test_api_video_update_live_state_same_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPED,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_not_called()\n create_mediapackage_harvest_job_mock.assert_not_called()\n\n self.assertEqual(response.status_code, 200)", "def reset_activated(self):\n logging.debug(\"Reset activated\")\n\n if self.playerType == VLC:\n\n self.pause_video()\n if self.playMode == FFMPEG:\n\n self.FFmpegGlobalFrame = 0 # position to init\n if self.simultaneousMedia:\n self.FFmpegGlobalFrame2 = 0 # position to init\n\n self.ffmpegTimerOut()\n\n else: # playmode VLC\n\n self.mediaplayer.set_time(0)\n\n # second video together\n if self.simultaneousMedia:\n self.mediaplayer2.set_time(0)\n\n self.timer_out()\n self.timer_spectro_out()", "def test_api_video_update_live_state_invalid_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n invalid_state = random.choice(\n [s[0] for s in LIVE_CHOICES if s[0] not in [IDLE, RUNNING, STOPPED]]\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": invalid_state,\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n json.loads(response.content),\n {\"state\": [f'\"{invalid_state}\" is not a valid choice.']},\n )", "def onStateChanged(self):\n state = self.mediaPlayer.state()\n if state == 0:\n self.onVideoStop()\n elif state == 1:\n self.onVideoStart()\n elif state == 2:\n self.onVideoPause()\n else:\n raise ValueError(\"Unknown state {}\".format(state))", "def stop_video(self):\n if self._current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n print(f\"Stopping video: {self._current_video.title}\")\n self._current_video = None\n self._paused = False", "def test_api_video_update_live_state_invalid_signature(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"invalid secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(video.live_state, IDLE)", "def on_worker_unpaused(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)\n self.unpausing = False", "def flag_video(self, video_id, flag_reason=\"\"):\n object=self._video_library.get_video(video_id)\n \n print(f\"{object}\")", "def stop_video(self):\n if self.is_playing:\n print(f\"Stopping video: {self.playing_now}\")\n self.is_playing = False\n else:\n print(\"Cannot stop video: No video is currently playing\")", "async def set_stopped(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Stopped if value else models.player.PlayState.Playing)", "def test_api_video_update_live_state_idle(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=CREATING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"idle\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n },\n )", "def test_api_video_update_live_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now):\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, RUNNING)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n \"started_at\": \"1533686400\",\n },\n )", "def stop_video(self):\n\n if self.current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(\"Stopping video:\", self.current_video.title)\n self.current_video = None", "def continue_video(self):\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n if self.pause:\n self.pause = False\n print(f\"Continuing video: {video_playing.title}\")\n elif not video_playing:\n print(f\"Cannot continue video: No video is currently playing\")\n else:\n print(\"Cannot continue video: Video is not paused\")\n # print(\"continue_video needs implementation\")", "def stop_videos(self):\n if ((not self.playing) and (self.worker is None)) or (self.shutdown):\n return\n self.enable_video_buttons(False, False, False)\n self.shutdown = True\n\n # Force the background worker to leave run()\n self.worker.force_stop()" ]
[ "0.61517084", "0.6147886", "0.6037828", "0.6032262", "0.6032262", "0.5964834", "0.5944277", "0.5937539", "0.58444035", "0.58058584", "0.57293636", "0.5729041", "0.5718966", "0.5702582", "0.5674354", "0.566203", "0.5659448", "0.5617334", "0.55615175", "0.554622", "0.5542855", "0.55365", "0.5529589", "0.55154234", "0.55078197", "0.5474792", "0.546477", "0.5448725", "0.54390967", "0.543812" ]
0.6312514
0
Live state update with an invalid signature should fails.
def test_api_video_update_live_state_invalid_signature(self): video = factories.VideoFactory( id="a1a21411-bf2f-4926-b97f-3c48a124d528", upload_state=PENDING, live_state=IDLE, live_type=RAW, ) data = { "logGroupName": "/aws/lambda/dev-test-marsha-medialive", "state": "running", } signature = generate_hash("invalid secret", json.dumps(data).encode("utf-8")) response = self.client.patch( f"/api/videos/{video.id}/update-live-state/", data, content_type="application/json", HTTP_X_MARSHA_SIGNATURE=signature, ) video.refresh_from_db() self.assertEqual(response.status_code, 403) self.assertEqual(video.live_state, IDLE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_update_live_state_unknown_video(self):\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n \"/api/videos/9087c52d-cb87-4fd0-9b57-d9f28a0c69cb/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n self.assertEqual(response.status_code, 404)", "def test_api_video_update_live_state_invalid_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n invalid_state = random.choice(\n [s[0] for s in LIVE_CHOICES if s[0] not in [IDLE, RUNNING, STOPPED]]\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": invalid_state,\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n json.loads(response.content),\n {\"state\": [f'\"{invalid_state}\" is not a valid choice.']},\n )", "def test_block_bad_state(self):\n pass", "def test_update_node_state_smartfail(self):\n pass", "def test_update_study_state_missing(self):\n with self.assertRaises(ValueError):\n self.storage.update_study_state('missing',\n study_pb2.StudySpec.STATE_ENABLED)", "def test_wrong_signature(self):\r\n response = requests.post(self.launch_uri, data=self.payload)\r\n self.assertIn('Wrong LTI signature', response.content)", "def invalid(self, state):\n log(\"current state '{0}' cannot process event '{1}'\".format(state.name, self.name))", "def invalid(self):\n pass", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def test_update_state(self):\n pass", "def test_update_state4(self):\n pass", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def test_update_state2(self):\n pass", "def test_api_video_update_live_state_same_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPED,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_not_called()\n create_mediapackage_harvest_job_mock.assert_not_called()\n\n self.assertEqual(response.status_code, 200)", "def assume_state(self, state):\n s = self.get_state()\n if s.id != state:\n raise GrblStateError(s)", "def dummy_update( self ):\r\n pass", "def test_webhook_bad_signature(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Invalid Travis CI webhook signature for status update %d.'\n % self.status_update.pk)", "def test_update_state3(self):\n pass", "def test_api_video_update_live_state_idle(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=CREATING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"idle\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n },\n )", "def test_update_state1(self):\n pass", "def test_api_video_update_live_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now):\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, RUNNING)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n \"started_at\": \"1533686400\",\n },\n )", "def state_failsafe_validate(cfg, app, win, events):", "def test_block_bad_signature(self):\n pass", "def test_fail_signature_fragment_value_wrong(self):\n # Don't forget to adjust the change transaction, in order to ensure\n # the bundle has a zero balance.\n self.bundle[5].value = -1\n self.bundle[-1].value += 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 5 has invalid value (expected 0, actual -1).',\n ],\n )", "def check_state(self):\n pass", "def test_state_after_failure(self):\n pass", "def test_api_video_update_live_state_stopped_missing_manifest(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPING,\n live_info={\"mediapackage\": {\"channel\": {\"id\": \"channel1\"}}},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock, mock.patch(\n \"marsha.core.api.delete_mediapackage_channel\"\n ) as delete_mediapackage_channel_mock, mock.patch.object(\n api, \"close_room\"\n ) as mock_close_room:\n create_mediapackage_harvest_job_mock.side_effect = ManifestMissingException\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_called_once()\n create_mediapackage_harvest_job_mock.assert_called_once()\n delete_mediapackage_channel_mock.assert_called_once()\n mock_close_room.assert_called_once_with(video.id)\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, None)\n self.assertEqual(video.upload_state, DELETED)\n self.assertEqual(video.live_info, None)", "def state(self, state: str) -> None:", "def test_a_renew_non_active_license(self):\n self.assertTrue(self.status.is_ready(), \"The license is active, non active state awaited\")\n with self.assertRaisesRegexp(IOError, 'PUT .* HTTP error 4[0-9][0-9]$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, self.end+2*self.ADAY)", "def test_wrong_pwmerchactive_update_11(self, _authorization):\n user1.headers['x-token'] = user1.headers['x-token'] + '1'\n user1.pwmerchactive(method='update', params={'m_lid': user1.merchant1.lid, 'payway': 'visamc', 'is_active': True})\n assert user1.resp_pwmerchactive == {'code': -32034, 'data': {'field': 'token', 'reason': 'Expired or invalid',\n 'value': user1.headers['x-token']}, 'message': 'EStateUnauth'}" ]
[ "0.7028745", "0.70109797", "0.6085967", "0.6063225", "0.6047554", "0.5965111", "0.59549403", "0.59207034", "0.59040105", "0.58996487", "0.58742315", "0.5871743", "0.58040553", "0.57979226", "0.5776625", "0.5771004", "0.5761674", "0.5754233", "0.57024837", "0.5696791", "0.5632528", "0.5595588", "0.5565617", "0.548462", "0.5482038", "0.5470042", "0.546648", "0.54643524", "0.5450666", "0.5443921" ]
0.79467356
0
Live state update with an invalid state should fails.
def test_api_video_update_live_state_invalid_state(self): video = factories.VideoFactory( id="a1a21411-bf2f-4926-b97f-3c48a124d528", upload_state=PENDING, live_state=IDLE, live_type=RAW, ) invalid_state = random.choice( [s[0] for s in LIVE_CHOICES if s[0] not in [IDLE, RUNNING, STOPPED]] ) data = { "logGroupName": "/aws/lambda/dev-test-marsha-medialive", "state": invalid_state, } signature = generate_hash("shared secret", json.dumps(data).encode("utf-8")) response = self.client.patch( f"/api/videos/{video.id}/update-live-state/", data, content_type="application/json", HTTP_X_MARSHA_SIGNATURE=signature, ) video.refresh_from_db() self.assertEqual(response.status_code, 400) self.assertEqual(video.live_state, IDLE) self.assertEqual( json.loads(response.content), {"state": [f'"{invalid_state}" is not a valid choice.']}, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invalid(self, state):\n log(\"current state '{0}' cannot process event '{1}'\".format(state.name, self.name))", "def test_update_node_state_smartfail(self):\n pass", "def test_state_after_failure(self):\n pass", "def state_failsafe_validate(cfg, app, win, events):", "def test_block_bad_state(self):\n pass", "def test_api_video_update_live_state_invalid_signature(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"invalid secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(video.live_state, IDLE)", "def test_api_video_update_live_state_unknown_video(self):\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n \"/api/videos/9087c52d-cb87-4fd0-9b57-d9f28a0c69cb/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n self.assertEqual(response.status_code, 404)", "def state_failsafe_do(cfg, app, win, events):", "def invalid(self):\n pass", "def test_update_study_state_missing(self):\n with self.assertRaises(ValueError):\n self.storage.update_study_state('missing',\n study_pb2.StudySpec.STATE_ENABLED)", "def test_update_state(self):\n pass", "def on_fail(self):\n self.state = FAILURE\n if self.graph_uuid:\n graph_dict = self.app.backend_adapter.graph_get(self.graph_uuid)\n graph = Graph.from_dict(graph_dict)\n current = graph.vertex_by_uuid(self.uuid)\n current.state = self.state\n current.error = self.error\n current.exc = self.exc\n self.update_graph_states(graph=graph, vertex=current, graph_state=self.state, error=self.error)\n self._reject()", "def state_not_changed(self, curstate, event, *args, **kwargs):", "def state_not_changed(self, curstate, event, *args, **kwargs):", "def test_validate_self_invalid_transition_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.transitions['q5'] = self.dtm1.transitions['q0']\n self.dtm1.validate_self()", "def test_validate_self_invalid_transition_result_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.transitions['q0']['y'] = ('q5', 'y', 'R')\n self.dtm1.validate_self()", "def assume_state(self, state):\n s = self.get_state()\n if s.id != state:\n raise GrblStateError(s)", "def test_update_state2(self):\n pass", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def is_invalid(self):\n self._is_valid = False", "def test_update_state1(self):\n pass", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "async def test_api_state_change_with_bad_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.post(\n \"/api/states/test_entity.that_does_not_exist\", json={}\n )\n\n assert resp.status == HTTPStatus.BAD_REQUEST", "def test_update_state3(self):\n pass", "def check_state(self):\n pass", "def mark_failure(self):\n LOGGER.debug('Marking current_state as: %s', self.States.FAILED)\n self.current_state = self.States.FAILED", "def test_ensure_state_untouched_if_not_necessary(self, setState):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('QE')\n setState.assert_not_called()", "def test_update_state4(self):\n pass", "def changeState(curr, new):\n trans = Transitions()\n if new not in trans.states():\n raise TaskStateException(\"New '%s' status is not valid\" %new)\n if curr not in trans:\n raise TaskStateException(\"Current '%s' status is not valid\" %curr)\n if new not in trans[curr]:\n raise TaskStateException(\"Transition from '%s' to '%s' is forbidden.\" %(curr, new))\n ## transition is valid\n return new" ]
[ "0.6867225", "0.6746515", "0.670642", "0.6673287", "0.66131973", "0.65411866", "0.6478215", "0.6466603", "0.64601433", "0.64423186", "0.64142185", "0.6396009", "0.6346287", "0.6346287", "0.63296247", "0.63008726", "0.6247246", "0.62421614", "0.62149596", "0.6208199", "0.6187225", "0.61514515", "0.6148859", "0.61394465", "0.61194676", "0.60997415", "0.60807407", "0.60724664", "0.6071325", "0.60497874" ]
0.75381476
0
Live state update with an unknown video should fails.
def test_api_video_update_live_state_unknown_video(self): data = { "logGroupName": "/aws/lambda/dev-test-marsha-medialive", "state": "running", } signature = generate_hash("shared secret", json.dumps(data).encode("utf-8")) response = self.client.patch( "/api/videos/9087c52d-cb87-4fd0-9b57-d9f28a0c69cb/update-live-state/", data, content_type="application/json", HTTP_X_MARSHA_SIGNATURE=signature, ) self.assertEqual(response.status_code, 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_video_update_live_state_invalid_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n invalid_state = random.choice(\n [s[0] for s in LIVE_CHOICES if s[0] not in [IDLE, RUNNING, STOPPED]]\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": invalid_state,\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n json.loads(response.content),\n {\"state\": [f'\"{invalid_state}\" is not a valid choice.']},\n )", "def test_api_video_update_live_state_idle(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=CREATING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"idle\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, IDLE)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n },\n )", "def test_api_video_update_live_state_invalid_signature(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"invalid secret\", json.dumps(data).encode(\"utf-8\"))\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(video.live_state, IDLE)", "def test_api_video_update_live_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=IDLE,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"running\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now):\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, RUNNING)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n \"started_at\": \"1533686400\",\n },\n )", "def test_api_video_update_live_state_same_state(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPED,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_not_called()\n create_mediapackage_harvest_job_mock.assert_not_called()\n\n self.assertEqual(response.status_code, 200)", "def __check_video(self):\n stream_status = self.communications.get_video()\n if self.__video_status[\"last_video_streaming\"] != stream_status: # Set initial video status\n self.__video_status[\"last_video_streaming\"] = stream_status\n self.__live_video_stream(stream_status)", "def test_api_video_update_live_state_stopped(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPING,\n live_info={},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock, mock.patch.object(\n api, \"close_room\"\n ) as mock_close_room:\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_called_once()\n create_mediapackage_harvest_job_mock.assert_called_once()\n mock_close_room.assert_called_once_with(video.id)\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, STOPPED)\n self.assertEqual(video.upload_state, HARVESTING)\n self.assertEqual(\n video.live_info,\n {\n \"cloudwatch\": {\"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\"},\n \"stopped_at\": \"1533686400\",\n },\n )", "def test_api_instructor_start_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_instructor_stop_non_live_video(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=random.choice([s[0] for s in STATE_CHOICES]),\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_instructor_stop_non_running_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"running\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"stop_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/stop-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def flag_video(self, video_id, flag_reason=\"\"):\n print(\"flag_video needs implementation\")", "def test_api_video_update_live_state_stopped_missing_manifest(self):\n video = factories.VideoFactory(\n id=\"a1a21411-bf2f-4926-b97f-3c48a124d528\",\n upload_state=PENDING,\n live_state=STOPPING,\n live_info={\"mediapackage\": {\"channel\": {\"id\": \"channel1\"}}},\n live_type=RAW,\n )\n data = {\n \"logGroupName\": \"/aws/lambda/dev-test-marsha-medialive\",\n \"state\": \"stopped\",\n }\n signature = generate_hash(\"shared secret\", json.dumps(data).encode(\"utf-8\"))\n\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch(\n \"marsha.core.api.delete_aws_element_stack\"\n ) as delete_aws_element_stack_mock, mock.patch(\n \"marsha.core.api.create_mediapackage_harvest_job\"\n ) as create_mediapackage_harvest_job_mock, mock.patch(\n \"marsha.core.api.delete_mediapackage_channel\"\n ) as delete_mediapackage_channel_mock, mock.patch.object(\n api, \"close_room\"\n ) as mock_close_room:\n create_mediapackage_harvest_job_mock.side_effect = ManifestMissingException\n response = self.client.patch(\n f\"/api/videos/{video.id}/update-live-state/\",\n data,\n content_type=\"application/json\",\n HTTP_X_MARSHA_SIGNATURE=signature,\n )\n delete_aws_element_stack_mock.assert_called_once()\n create_mediapackage_harvest_job_mock.assert_called_once()\n delete_mediapackage_channel_mock.assert_called_once()\n mock_close_room.assert_called_once_with(video.id)\n\n video.refresh_from_db()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content), {\"success\": True})\n self.assertEqual(video.live_state, None)\n self.assertEqual(video.upload_state, DELETED)\n self.assertEqual(video.live_info, None)", "def test_api_instructor_start_non_idle_live(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n upload_state=PENDING,\n live_state=random.choice([s[0] for s in LIVE_CHOICES if s[0] != \"idle\"]),\n live_type=RAW,\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # start a live video,\n with mock.patch.object(api, \"start_live_channel\"):\n response = self.client.post(\n f\"/api/videos/{video.id}/start-live/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)", "def test_api_livesession_read_detail_unknown_video(self):\n starting_at = timezone.now() + timedelta(days=5)\n video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)\n livesession = AnonymousLiveSessionFactory(video=video)\n # token with no user information\n jwt_token = PlaylistAccessTokenFactory()\n response = self.client.get(\n self._get_url(livesession.video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assert_response_resource_not_accessible(response)", "def test_error(self):\n with build_video(self.user) as video:\n video = self._send(video, None)\n eq_(video.state, 'error')", "def media_status_changed(self, state):\n if state == self.MediaStatus.LoadedMedia.value:\n QMetaObject.invokeMethod(self.video_player, \"play\", Qt.QueuedConnection)\n self.timer.start(self.timer_interval)\n elif state == self.MediaStatus.EndOfMedia.value:\n if self.state_time_remain > self.timer_interval / 1000:\n QMetaObject.invokeMethod(self.video_player, \"play\", Qt.QueuedConnection)", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def onStateChanged(self):\n state = self.mediaPlayer.state()\n if state == 0:\n self.onVideoStop()\n elif state == 1:\n self.onVideoStart()\n elif state == 2:\n self.onVideoPause()\n else:\n raise ValueError(\"Unknown state {}\".format(state))", "def activate_video(video: dict):\n\tif video.get('state')=='INACTIVE':\n\t\tvideo_id = video.get('id')\n\t\tjson = { 'state': 'ACTIVE' }\n\t\tprint(f'Activating video ID {video_id}: {get_cms().UpdateVideo(video_id=video_id, json_body=json).status_code}')", "def continue_video(self):\n global value\n if value==2:\n global name\n value=1\n print(f\"Continuing video: {name}\")\n elif value==1:\n\n\n print(f\"Cannot continue video: Video is not paused\")", "def videostart_failed(self):\n # type: () -> bool\n return self._videostart_failed", "def video_no_found(error):\n return {'message': 'video does not exist'}, 404", "def play_video(self, video_id):\n videos = self._video_library.get_all_videos()\n video_id_list = []\n for vid in videos:\n video_id_list.append(vid.video_id)\n if vid.video_id == video_id:\n if vid.video_id not in self.flagged_videos.keys():\n if self.is_playing is False:\n print(f\"Playing video: {vid.title}\")\n self.playing_now = vid.title\n self.is_playing = True\n self.is_paused = False\n elif self.is_playing is True:\n print(f\"Stopping video: {self.playing_now}\")\n print(f\"Playing video: {vid.title}\")\n self.playing_now = vid.title\n self.is_paused = False\n if video_id not in video_id_list:\n print(\"Cannot play video: Video does not exist\")\n if video_id in self.flagged_videos.keys():\n print(f\"Cannot play video: Video is currently flagged (reason: {self.flagged_videos[video_id]})\")", "def check(self):\n #\n # *****************\n # *****************\n # TODO: Check really if video is valid\n # *****************\n # *****************\n return True", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def test_api_video_instructor_initiate_live_invalid_type(self):\n video = factories.VideoFactory(\n id=\"27a23f52-3379-46a2-94fa-697b59cfe3c7\",\n playlist__title=\"foo bar\",\n playlist__lti_id=\"course-v1:ufr+mathematics+00001\",\n )\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n jwt_token.payload[\"permissions\"] = {\"can_update\": True}\n\n # initiate a live video,\n # It should generate a key file with the Unix timestamp of the present time\n now = datetime(2018, 8, 8, tzinfo=pytz.utc)\n live_info = {\n \"medialive\": {\n \"input\": {\n \"id\": \"medialive_input_1\",\n \"endpoints\": [\"https://live_endpoint1\", \"https://live_endpoint2\"],\n },\n \"channel\": {\"id\": \"medialive_channel_1\"},\n },\n \"mediapackage\": {\n \"id\": \"mediapackage_channel_1\",\n \"endpoints\": {\n \"hls\": {\n \"id\": \"endpoint1\",\n \"url\": \"https://channel_endpoint1/live.m3u8\",\n },\n },\n },\n }\n with mock.patch.object(timezone, \"now\", return_value=now), mock.patch.object(\n api, \"create_live_stream\", return_value=live_info\n ):\n response = self.client.post(\n f\"/api/videos/{video.id}/initiate-live/\",\n {\"type\": \"invalid\"},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 400)\n content = json.loads(response.content)\n\n self.assertEqual(content, {\"type\": ['\"invalid\" is not a valid choice.']})" ]
[ "0.7784363", "0.72019297", "0.7105363", "0.7011971", "0.68896437", "0.65897465", "0.6572008", "0.643296", "0.6379981", "0.6292681", "0.6288871", "0.6288871", "0.627914", "0.6160698", "0.61350566", "0.61238045", "0.59614325", "0.58788806", "0.58284414", "0.5731805", "0.5718009", "0.57121396", "0.570344", "0.56815505", "0.56602806", "0.5624756", "0.5624756", "0.5624756", "0.5624756", "0.5616062" ]
0.80395156
0
Computes the min edit distance from source to target
def min_edit_distance(source, target): source_words = source.split() target_words = target.split() m = len(source_words) n = len(target_words) distance = [[0 for i in range(n+1)] for j in range(m+1)] for i in range(1, m+1): distance[i][0] = distance[i-1][0] + 1 for j in range(1, n+1): distance[0][j] = distance[0][j-1] + 1 for i in range(1, m+1): for j in range(1, n+1): distance[i][j] = min(distance[i-1][j]+1, distance[i][j-1]+1, distance[i-1][j-1] + (0 if source_words[i-1] == target_words[j-1] else 1)) return distance[m][n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_edit_distance_pro(\n source: str,\n target: str,\n del_cost=1,\n ins_cost=1,\n sub_cost=2,\n):\n # Use three tables for up, left and northwest or,\n # Uses one tables and represents up,\n # left and northwest as 1, 3, 5 respectivly?\n # 1: up, 3: left, 5: northwest, 4: up + left\n # 6: up + northwest, 8: left + northwest,\n # 9: up + left + northwest\n source_len = len(source)\n target_len = len(target)\n\n # Init matrix\n matrix = [[0 for _ in range(target_len + 1)]\n for _ in range(target_len + 1)]\n backtrace_table = deepcopy(matrix)\n backtrace_table[0][0] = -1\n for idx in range(1, source_len + 1):\n matrix[idx][0] = idx\n backtrace_table[idx][0] = 1\n for idx in range(1, target_len + 1):\n matrix[0][idx] = idx\n backtrace_table[0][idx] = 3\n\n traces_map = [1, 3, 5]\n min_cost = 0\n for i in range(1, source_len + 1):\n for j in range(1, target_len + 1):\n up = matrix[i - 1][j] + del_cost # pylint: disable=invalid-name\n left = matrix[i][j - 1] + ins_cost\n northwest = matrix[i - 1][j - 1] \\\n + (sub_cost if source[i - 1] != target[j - 1] else 0)\n traces = [up, left, northwest]\n min_cost = min(traces)\n backtrace_table[i][j] = sum(traces_map[idx] for idx in range(3) \\\n if traces[idx] == min_cost)\n matrix[i][j] = min_cost\n\n alignment = trace_back(backtrace_table, source, target)\n print('\\n'.join(' '.join(triple[idx] for triple in alignment)\n for idx in range(3)))\n\n return min_cost, alignment", "def min_edit_distance(source, target, del_cost = 1, ins_cost = 1, sub_cost = 2):\r\n n = len(source)\r\n m = len(target)\r\n D = np.zeros((n+1, m+1))\r\n for i in range(1, n+1):\r\n D[i, 0] = D[i-1, 0] + del_cost\r\n for j in range(1, m+1):\r\n D[0, j] = D[0, j-1] + ins_cost\r\n \r\n for i in range(1, n+1):\r\n for j in range(1, m+1):\r\n deletion = D[i-1, j] + del_cost\r\n insertion = D[i, j-1] + ins_cost\r\n substitution = D[i-1, j-1] + calculate_sub_cost(source[i-1], target[j-1], sub_cost)\r\n D[i, j] = min(deletion, insertion, substitution)\r\n backtrace = []\r\n if substitution == D[i,j]:\r\n backtrace.append('diagonal')\r\n if insertion == D[i,j]:\r\n backtrace.append('up')\r\n if deletion == D[i,j]:\r\n backtrace.append('left')\r\n return D[n, m]", "def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)", "def distance(self, source, target):\r\n raise NotImplementedError('Distance calculation not implemented yet')", "def edit_distance(self, other):\r\n union = len(self) + len(other)\r\n return 1.0 - 2.0*(self.intersection(other)/union)", "def edit_distance(self, other):\n union = len(self) + len(other)\n return 1.0 - 2.0*(self.intersection(other)/union)", "def leveinshtein_distance(source,target):\r\n\t#Step 1\r\n\ts_len=len(source)\r\n\tt_len=len(target)\r\n\tcost=0\r\n\tif(s_len==0):\r\n\t\treturn t_len\r\n\tif(t_len==0):\r\n\t\treturn s_len\r\n\tprint(\"Dimensions:\\n\\tN:%d\\n\\tM:%d\"%(s_len,t_len))\r\n\t#Step 2\r\n\tmatrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)]\r\n\t#Initialize first row 0..s_len\r\n\tfor idx in range(0,s_len+1):\r\n\t\tmatrix[idx][0]=idx\r\n\t#Initialize the first column 0..t_len\r\n\tfor idx in range(0, t_len+1):\r\n\t\tmatrix[0][idx]=idx\r\n\tprint(\"===Original===\")\r\n\tprint_matrix(matrix,source,target)\r\n\t#Step 3\r\n\tfor i in range(1,s_len+1):\r\n\t\tch=source[i-1]\r\n\t\t#print(ch)\r\n\t\t#Step 4\r\n\t\tfor j in range(1,t_len+1):\r\n\t\t\t#print(\">%s\"%target[j-1])\r\n\t\t\t#Step 5\r\n\t\t\tif ch==target[j-1]:\r\n\t\t\t\tcost=0\r\n\t\t\telse:\r\n\t\t\t\tcost=1\r\n\t\t\t#Step 6\r\n\t\t\t\r\n\t\t\t#print(\"(i,j)=>(%d,%d)\"%(i,j))\r\n\t\t\t#print(matrix[i][j])\r\n\t\t\tmatrix[i][j]=minimum(\r\n\t\t\t\tmatrix[i-1][j]+1,\r\n\t\t\t\tmatrix[i][j-1]+1,\r\n\t\t\t\tmatrix[i-1][j-1]+cost\r\n\t\t\t)\r\n\tprint(\"===Final Matrix===\")\r\n\tprint_matrix(matrix,source,target)\r\n\treturn matrix[s_len-1][t_len-1]", "def minimum_distance(self, state, *args, **kwargs):\n raise NotImplementedError", "def shortestPath(self, source, target):\n dist = {}\n prev = {}\n q = []\n for y,a in enumerate(self.sm):\n for x,b in enumerate(self.sm[y]):\n dist[(x,y)] = sys.maxint\n prev[(x,y)] = None\n q.append((x,y))\n dist[source] = 0\n\n while len(q) is not 0:\n # find the node with minimum value (u)\n d = deepcopy(dist)\n while True:\n b = dict(map(lambda item: (item[1],item[0]), d.items()))\n u = b[min(b.keys())]\n if u not in q:\n d.pop(u)\n else:\n break\n\n if dist[u] == sys.maxint: # remaining nodes are inaccessible\n break\n\n q.remove(u)\n\n\n if u == target: # target found\n break\n\n for v in self.getNeighbors(u):\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n s = []\n u = target\n while prev[u] is not None:\n s.append(u)\n u = prev[u]\n s.reverse()\n\n return s", "def create_cost_soft_min_distance_valid(self, c, s, v):\n c_shape = c.get_shape().as_list(); \n s_shape = s.get_shape().as_list();\n \n #expand matrices\n cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]); \n mm = tf.reduce_max(v); #hack for batch size = 1\n ss = tf.slice(s, [0,0,0], [-1,mm,-1]);\n ss = tf.reshape(ss, [s_shape[0], s_shape[1], s_shape[2], 1]);\n ss = tf.transpose(ss, perm = [0,3,2,1]);\n cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);\n ss = tf.tile(ss, [1, c_shape[0], 1, 1]);\n \n #pairwise distances\n dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));\n dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here \n \n #softmin\n distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,\"float32\"), dist2)), dist2),reduction_indices = 1);\n return tf.reduce_mean(distmin);", "def distance( self, source, target ):\n return nx.shortest_path_length(self._G, source, target)", "def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))", "def create_cost_soft_min_distance(self, c, s):\n c_shape = c.get_shape().as_list(); \n s_shape = s.get_shape().as_list();\n \n #expand matrices\n cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]); \n ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);\n ss = tf.transpose(ss, perm = [0,3,2,1]);\n cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);\n ss = tf.tile(ss, [1, c_shape[0], 1, 1]);\n \n #pairwise distances\n dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));\n dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here \n \n #softmin\n distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,\"float32\"), dist2)), dist2),reduction_indices = 1);\n return tf.reduce_mean(distmin);", "def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])", "def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance", "def min_path(vs, es, source, target):\n dijkstra(vs, es, source, stop = target)\n test = target\n result = []\n while test != source:\n e = test._ss_edge\n result.append(e)\n test = e.v1 if e.v1 != test else e.v2\n assert test == source and test._ss_edge is None\n return result[::-1]", "def distance(currX, currY, targetX, targetY):\n return abs(currX - targetX) + abs(currY - targetY)", "def min_dst(tet1, tet2, allow_zero=True):\n dists = ssd.cdist(tet1, tet2)\n if not allow_zero:\n dists[dists == 0] = np.inf\n return dists.min(axis=1)\n\n #dists = np.empty(tet1.shape[0])\n #for i, t1 in enumerate(tet1):\n # min_dist = np.sum((tet2 - t1) ** 2, axis=1)\n # if not allow_zero:\n # dists[i] = np.min(min_dist[min_dist != 0])\n # else:\n # dists[i] = np.min(min_dist)\n #return np.sqrt(dists)", "def calculateDistance(self, source, destination):\n dx = source.getX() - destination.getX();\n dy = source.getY() - destination.getY();\n return int(math.ceil(math.sqrt(dx * dx + dy * dy)))", "def minimalDistance(a1, a2, b1, b2):\n adir = a2 - a1\n bdir = b2 - b1\n amid = a1 + 0.5 * adir\n s = b1 - amid\n A = np.dot(bdir, bdir)\n B_2 = np.dot(bdir, s)\n lambda_beta = - B_2 / A\n bOpt = lambda_beta * bdir + b1\n s = a1 - bOpt\n A = np.dot(adir, adir)\n B_2 = np.dot(adir, s)\n lambda_alpha = - B_2 / A\n aOpt = lambda_alpha * adir + a1\n Delta = bOpt - aOpt\n return np.sqrt(np.dot(Delta, Delta))", "def edit_distance(x, y):\n\n global recursion_depth\n global num_function_calls\n recursion_depth += 1\n num_function_calls += 1\n indent = \" \" * recursion_depth\n print(\"%sBEGIN edit_distance(\\\"%s\\\", \\\"%s\\\")\" % (indent, x, y))\n n = len(x)\n m = len(y)\n if n == 0:\n ed = m\n elif m == 0:\n ed = n\n else:\n ed1 = edit_distance(x, y[0:m-1]) + 1\n ed2 = edit_distance(x[0:n-1], y) + 1\n ed3 = edit_distance(x[0:n-1], y[0:m-1]) + (1 if x[-1] != y[-1] else 0)\n ed = min(ed1, ed2, ed3)\n print(\"%sEND edit_distance(\\\"%s\\\", \\\"%s\\\")\" % (indent, x, y))\n recursion_depth -= 1\n return ed", "def extended_min_edit_distance(x: str, y: str) -> tuple:\n m = _get_edit_distance_matrix(x, y)\n\n o = _get_coordinates_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n\n coordinates = (i - 1, j - 1)\n\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n _min = -1\n if m[i][j - 1] + 1 < m[i - 1][j] + 1:\n _min = m[i][j - 1] + 1\n coordinates = (i, j - 1)\n else:\n _min = m[i - 1][j] + 1\n coordinates = (i - 1, j)\n\n if m[i - 1][j - 1] + 1 < _min:\n _min = m[i - 1][j - 1] + 1\n coordinates = (i - 1, j - 1)\n\n m[i][j] = _min\n o[i][j] = coordinates\n\n return m[len(x)][len(y)], o", "def minimum_distance(object_1, object_2):\n\n # package import\n import numpy as np\n\n # main algorithm\n minimum_distance = 100000\n\n for coord_1 in object_1:\n for coord_2 in object_2:\n distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)\n if distance_btwn_coords == 0:\n minimum_distance = distance_btwn_coords\n return float(minimum_distance)\n elif distance_btwn_coords < minimum_distance:\n minimum_distance = distance_btwn_coords\n\n return float(minimum_distance)", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def find_minimum_path_cost(cls, start_point: Coordination, current_point: Coordination) -> int:\n return abs(start_point.x - current_point.x) + abs(start_point.y - current_point.y)", "def min_edit_distance(x: str, y: str, return_matrix: bool = False) -> object:\n m = _get_edit_distance_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n # How do we obtain the m[i][j] value?\n # We need to look at three positions while iterating:\n # 1. m[i - 1][j -1]\n # 2. m[i][j - 1]\n # 3. m[i - 1][j]\n\n # x[i - 1] and y[j - 1] are the characters.\n\n # Note: i and j start from 1.\n\n # If the characters are equal, we don't need to perform any of the\n # operations: insertion, deletion or substitution, and the minimum\n # edit distance to convert x[i - 1] to y[j - 1] is the same as the\n # one to convert x[i] to s[j], because, as stated above, x[i - 1]\n # and y[j - 1] are equal, so we don't have to perform any other\n # operation.\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n m[i][j] = min(m[i - 1][j - 1] + 1, m[i - 1]\n [j] + 1, m[i][j - 1] + 1)\n\n return m[len(x)][len(y)] if not return_matrix else m", "def test_distance_source(self):\n s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0])\n s2 = Source([[20, 20], [20, 30]], values=[1.0, 2.0])\n assert(s1.distance(s2) == sqrt(200))", "def minimum_edit_distance(seq1,seq2):\n if len(seq1) > len(seq2):\n seq1,seq2 = seq2,seq1\n distances = range(len(seq1) + 1)\n for index2,char2 in enumerate(seq2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(seq1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]", "def calc_source_blend_distance(source,blend,RC,log):\n\n\n log.info('\\n')\n\n source.calc_distance(log)\n\n try:\n log.info('Inferred source distance: '+str(source.D)+' +/- '+str(source.sig_D)+' pc')\n\n log.info('Inferred source distance if its a small red giant: '+\\\n str(source.D_small_giant)+' +/- '+str(source.sig_D_small_giant)+' pc')\n\n log.info('Inferred source distance if its a large red giant: '+\\\n str(source.D_large_giant)+' +/- '+str(source.sig_D_large_giant)+' pc')\n\n except AttributeError:\n pass\n\n blend.calc_distance(log)\n\n try:\n log.info('Inferred blend distance: '+str(blend.D)+' +/- '+str(blend.sig_D)+' pc')\n\n except AttributeError:\n pass\n\n (Rstar, sig_Rstar) = stellar_radius_relations.scale_source_distance(source.ang_radius, source.sig_ang_radius, RC.D*1000.0 ,log)\n\n source.radius = Rstar\n source.sig_radius = sig_Rstar\n\n return source, blend", "def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance" ]
[ "0.7693602", "0.75446385", "0.72816", "0.6812453", "0.67283833", "0.671338", "0.6683665", "0.6606871", "0.6497808", "0.6490708", "0.6487655", "0.6399363", "0.6332539", "0.6325396", "0.6324086", "0.62027526", "0.6196767", "0.6151955", "0.61486185", "0.61167824", "0.6116629", "0.6087147", "0.6073233", "0.606817", "0.60672206", "0.60616386", "0.6025575", "0.5995501", "0.5973242", "0.59714925" ]
0.764925
1
Computes the word error rate between output and ideal word error rate = minimum edit distance/no of words in ideal sentence
def word_error_rate(output, ideal): return min_edit_distance(output, ideal)/len(ideal.split())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def word_error_rate(hypotheses, references):\n\n scores = 0\n words = 0\n len_diff = len(references) - len(hypotheses)\n if len_diff > 0:\n raise ValueError(\"Uneqal number of hypthoses and references: \"\n \"{0} and {1}\".format(len(hypotheses), len(references)))\n elif len_diff < 0:\n hypotheses = hypotheses[:len_diff]\n\n for h, r in zip(hypotheses, references):\n h_list = h.split()\n r_list = r.split()\n words += len(r_list)\n scores += __levenshtein(h_list, r_list)\n if words != 0:\n wer = 1.0*scores/words\n else:\n wer = float('inf')\n return wer, scores, words", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def computeErrorRate(test_set, words_likely_tags):\n # initiate vars\n known_words = {} # those two dictionaries are optional, just for debuging\n unknown_words = {} # those two dictionaries are optional, just for debuging\n correct_predictions = 0\n total_predictions = 0\n correct_unknown_predictions = 0\n total_unknown_predictions = 0\n\n for i in range(len(test_set)): # iterate sentences\n test_sent = test_set[i]\n for j in range(len(test_sent)): # iterate words in sent\n w = test_sent[j][WORD]\n t = test_sent[j][TAG]\n\n # known words\n if w in words_likely_tags:\n if w in known_words:\n known_words[w][COUNTER_SHOWS] += 1\n if t == words_likely_tags[w]: # same tag\n known_words[w][COUNTER_EQUAL] += 1\n correct_predictions += 1\n else:\n if t == words_likely_tags[w]: # same tag\n known_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 1}\n correct_predictions += 1\n else:\n known_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 0}\n\n total_predictions += 1\n # unknown words\n else: # w not in words_likely_tags, treat w as unknown_word\n if w in unknown_words:\n unknown_words[w][COUNTER_SHOWS] += 1\n if t == UNKNOWN_TAG:\n # same tag as our model predicts for unknown words\n unknown_words[w][COUNTER_EQUAL] += 1\n correct_unknown_predictions += 1\n else:\n if t == UNKNOWN_TAG: # same tag\n unknown_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 1}\n correct_unknown_predictions += 1\n else:\n unknown_words[w] = {COUNTER_SHOWS: 1, COUNTER_EQUAL: 0}\n\n total_unknown_predictions += 1\n\n # print('correct_predictions......... = ', correct_predictions)\n # print('total_predictions........... = ', total_predictions)\n # print('correct_unknown_predictions. = ', correct_unknown_predictions)\n # print('total_unknown_predictions... = ', total_unknown_predictions)\n err_rate_known = 1 - correct_predictions/total_predictions\n err_rate_unknown = 1 - correct_unknown_predictions/total_unknown_predictions\n # total_err = err_rate_known + err_rate_unknown\n tot_pred = total_predictions + total_unknown_predictions\n corr_pred = correct_predictions + correct_unknown_predictions\n total_err = 1 - corr_pred/tot_pred\n\n return err_rate_known, err_rate_unknown, total_err", "def word_err(word: str, estimate: str):\n return 0 if word == estimate else 1", "def wordSimilarityRatio(sent_1,sent_2):", "def _compute_sentence_statistics(pred_words: List[str], target_words: List[List[str]]) ->Tuple[Tensor, Tensor]:\n tgt_lengths = tensor(0.0)\n best_num_edits = tensor(2e+16)\n for tgt_words in target_words:\n num_edits = _translation_edit_rate(tgt_words, pred_words)\n tgt_lengths += len(tgt_words)\n if num_edits < best_num_edits:\n best_num_edits = num_edits\n avg_tgt_len = tgt_lengths / len(target_words)\n return best_num_edits, avg_tgt_len", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def ari(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n if num_words <= 0:\n return 0\n\n letter_count = sum([len(token) for token in doc if not token.is_punct])\n letter_to_words = letter_count / num_words\n words_to_sents = num_words / num_sentences\n return 4.71 * letter_to_words + 0.5 * words_to_sents - 21.43", "def score(self, sentence):\n\n\n # TODO your code here\n score = 0.0 \n prevWord = \"\"\n prevPrevWord = \"\"\n newSentence = []\n for word in sentence:\n newSentence += word.split()\n for currentWord in sentence:\n currentWord = currentWord.strip(STRIP_CHARS)\n currentWord = currentWord.lower()\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n trigramCount = self.trigramCounts[trigram]\n if trigramCount > 0:\n score += math.log(max(self.trigramCounts[trigram] - DISCOUNT, 0)*len(self.trigramCounts) + DISCOUNT*self.followingCounts[(prevPrevWord, prevWord)]*self.continuationCounts[currentWord])\n # Subtraction by 1 removes the add one count from the laplace\n # smoothing\n score -= math.log((self.bigramCounts[(prevPrevWord, prevWord)]) * len(self.trigramCounts))\n elif self.bigramCounts[(prevWord, currentWord)] > 0:\n score += math.log(self.bigramCounts[(prevWord, currentWord)]*BI_BACKOFF_COEFFICIENT)\n score -= math.log(self.totalBigramCounts)\n else:\n count = self.unigramCounts[currentWord]\n score += math.log(count * UNI_BACKOFF_COEFFICIENT)\n score -= math.log(self.total)\n else:\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n return -score", "def corrected_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / math.sqrt(2 * n_words)", "def smog(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n if num_sentences < 30 or num_words == 0:\n return 0\n num_poly = _get_num_syllables(doc, min_syllables=3)\n return 1.0430 * sqrt(num_poly * 30 / num_sentences) + 3.1291", "def bad_start_rate(labelled,str):\n#\tlabelled = RawClaim.objects.exclude(correcttrim=\"\")\n\tfiltered = set([l for l in labelled if fixstring(l.sentence).startswith(str)])\n\twrong = set([l for l in filtered if l.correcttrim!=\"X\"])\n\tright = filtered - wrong\n\treturn (float(len(right))/len(filtered),wrong,right)", "def total_estimated_words(self):\n return len(self.sentence) / 5", "def score(self, sentence):\n score = 0.0\n V = len(self.f1) # vocabulary size\n for token in sentence:\n if token in self.f1: score += self.f1[token]\n else: score -= math.log10(self.total + V)\t\t # OOV \n return score", "def error_ratio(original, corrected):\n\n original = TextBlob(original)\n corrected = TextBlob(corrected)\n error_ratio = sum(not word in corrected.tokenize() for word in original.tokenize()) / len(original)\n error_ratio_dict = {'error_ratio': error_ratio}\n return error_ratio_dict", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def error(self, documents):\n ###TODO\n sum_1 = 0.0\n for c_id,clust in self.fin_clust.items():\n n = self.sqnorm(self.means[c_id]) \n sum_1 = sum_1 + sum([self.distance(self.docs[dc],self.means[c_id],n) for dc in clust]) \n return round(sum_1,2)", "def coleman_liau(self, doc):\n num_words = _get_num_words(doc)\n if num_words <= 0:\n return 0\n\n num_sentences = _get_num_sentences(doc)\n letter_count = sum(\n [len(token) for token in doc if not token.is_punct and not token.is_digit]\n )\n if letter_count <= 0:\n return 0\n letters_to_words = letter_count / num_words * 100\n sent_to_words = num_sentences / num_words * 100\n return 0.0588 * letters_to_words - 0.296 * sent_to_words - 15.8", "def compute_readability(text):\n total_words = 0\n total_sentences = 0\n total_syllables = 0\n score = 0\n\n words = text.split()\n total_words = len(text.split()) \n total_sentences = count_sentences(text)\n total_syllables = count_syllables(words)\n \n score = 206.835 - 1.015 * ( total_words / total_sentences) - 84.6 * (total_syllables / total_words)\n if score > 90.00:\n answer = 'Texto de nível do 5º ano do Ensino Fundamental, facilmente compreendido por um aluno de 11 anos.'\n elif score <= 90.00 and score > 80.00:\n answer = 'Texto de nível do 6º ano do Ensino Fundamental, inglês coloquial para consumidores.'\n elif score <= 80.00 and score > 70.00:\n answer = 'Texto de nível do 7º ano do Ensino Fundamental, razoavelmente fácil de ler.'\n elif score <= 70.00 and score > 60.00:\n answer = 'Texto de nível do 9º ano do Ensino Fundamental, Inglês simples compreendido por adolescentes de 13 - 15 anos.'\n elif score <= 60.00 and score > 50.00:\n answer = 'Texto de 1º a 3º ano do Ensino Médio, razoavelmente difícil de ler.'\n elif score <= 50.00 and score > 30.00:\n answer = 'Texto de nível Universitário, difícil de ler.'\n else:\n answer = 'Texto de nível de Graduação, muito difícil de ler e mais bem-compreendido por universitários graduados.'\n \n print('Pontuação Total:', score, answer)", "def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score", "def compute_ari(text: str):\n characters = len(text.replace(\" \", \"\").replace(\".\", \"\").replace(\",\", \"\").replace(\";\", \"\"))\n words = text.count(\" \") + 1\n sentences = text.count(\".\")\n\n score = 4.71 * (characters / words) + .5 * (words / sentences) - 21.43\n\n return score", "def score(self, sentence):\n score = 0.0\n prev_word = None\n for token in sentence:\n two_words_count = self.bigram_count[prev_word][token]\n prev_word_count = self.unigram_count[prev_word]\n if (two_words_count > 0):\n score += math.log(two_words_count)\n score -= math.log(prev_word_count)\n else:\n score += math.log(self.backoff_multiplier)\n score += math.log(self.unigram_count[token] + 1.0)\n score -= math.log(self.num_words + self.vocabulary_size)\n prev_word = token\n return score", "def calculatePenalty(words, M):\n tot_len = 0\n for word in words:\n tot_len += len(word) + 1\n tot_len -= 1\n if tot_len > M:\n return None\n return (M - tot_len) ** 2", "def fk_ease(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n num_syllables = _get_num_syllables(doc)\n if num_sentences == 0 or num_words == 0 or num_syllables == 0:\n return 0\n words_per_sent = num_words / num_sentences\n syllables_per_word = num_syllables / num_words\n return 206.835 - (1.015 * words_per_sent) - (84.6 * syllables_per_word)", "def average_word_length(self):\n len_words_only = [len(s) if s.isalpha() else 0 for s in self.text]\n if (len_words_only == 0):\n print('Input file contains no words.')\n return 0, 0, 0\n else:\n return sum(len_words_only) / len(len_words_only), median(len_words_only), mode(len_words_only)", "def _word_accuracy(label_file, pred_file,subword_option=None):\n\n with open(label_file, \"r\", encoding='utf-8') as label_fh:\n with open(pred_file, \"r\", encoding='utf-8') as pred_fh:\n total_acc, total_count = 0., 0.\n for sentence in label_fh:\n sentence = \" \".join(_clean(sentence, subword_option))\n labels = sentence.strip().split(\" \")\n preds = \" \".join(_clean(pred_fh.readline(), subword_option))\n preds = preds.strip().split(\" \")\n match = 0.0\n for pos in range(min(len(labels), len(preds))):\n label = labels[pos]\n pred = preds[pos]\n if label == pred:\n match += 1\n total_acc += 100 * match / max(len(labels), len(preds))\n total_count += 1\n return total_acc / total_count", "def _WordScore(index, normalized_command_word,\n canonical_command_word, canonical_command_length):\n score = 0\n\n # The match can go either way.\n if normalized_command_word in canonical_command_word:\n shorter_word = normalized_command_word\n longer_word = canonical_command_word\n elif canonical_command_word in normalized_command_word:\n shorter_word = canonical_command_word\n longer_word = normalized_command_word\n else:\n return score\n\n # Inner match must be a word boundary.\n hit = longer_word.find(shorter_word)\n if hit > 0 and longer_word[hit-1] != '-':\n return score\n\n # Partial hit.\n score += 10\n\n # Prefer a match in less words.\n if canonical_command_length == 1:\n score += 30\n elif canonical_command_length == 2:\n score += 20\n elif canonical_command_length == 3:\n score += 10\n\n # Prefer a match in order.\n if index == 0:\n score += 25\n elif index == 1:\n score += 15\n else:\n score += 5\n\n # Prefer matching more chars and beginning of word.\n # This also handles minor suffix diffs, like singular vs. plural.\n extra = len(longer_word) - len(shorter_word)\n if extra <= 2:\n extra = 3 - extra\n if longer_word.startswith(shorter_word):\n extra *= 2\n score += extra\n\n # Prefer matching on surface words.\n if index == 0 and canonical_command_length > 1:\n score += 30\n # Also prefer matching on group words.\n elif index > 0 and canonical_command_length > index + 1:\n score += 15\n\n return score", "def word_entropy(self, doc, lemmatized=False):\n # filter out words\n words = [token for token in doc if not token.is_punct and \"'\" not in token.text and not token.is_space]\n # create bag of words\n if lemmatized:\n list_words = [w.lemma_ for w in words]\n else:\n list_words = [w.text for w in words]\n num_words = len(list_words)\n word_freq = Counter(list_words)\n return -sum(\n [\n (word_freq[word] / num_words) * log2(word_freq[word] / num_words)\n for word in word_freq\n ]\n )", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def score(self, sentence):\n # TODO your code here\n\n # initialize count with trained data\n unigram_count = self.count.copy()\n N = self.total\n\n # make a new key for UNK, add-one later\n for token in sentence:\n if token not in unigram_count:\n unigram_count[token] = 0\n\n # calcutate lopP(<s>) + logP(w1) + logP(w2) + ...\n score = 0.0 # P(<s>) = 1\n V = len(unigram_count) # the number of vocab including UNK\n for word in sentence:\n prob = float((unigram_count[word] + 1) / (N + V)) # c(w) + 1 / N + V\n score += math.log(prob)\n\n return score" ]
[ "0.70479745", "0.6861282", "0.6712708", "0.6672853", "0.6627787", "0.65970993", "0.6548495", "0.6483904", "0.6389047", "0.6382813", "0.6372524", "0.6365916", "0.63445747", "0.6335466", "0.63145435", "0.62640274", "0.62636125", "0.62576604", "0.6257261", "0.621895", "0.6209494", "0.6196456", "0.61874026", "0.6186042", "0.6179749", "0.6169891", "0.614836", "0.61419994", "0.6133244", "0.61261535" ]
0.8901293
0
Collects the output shape(s) of a list of Keras tensors. Arguments
def _collect_input_shape(input_tensors): input_tensors = to_list(input_tensors) shapes = [] for x in input_tensors: try: shapes.append(K.int_shape(x)) except TypeError: shapes.append(None) return unpack_singleton(shapes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ExtractInputShapes(inputs):\n if context.executing_eagerly():\n return array_ops.shape_n(inputs)\n sizes = []\n fully_known = True\n for x in inputs:\n input_shape = array_ops.shape(x)\n if not isinstance(input_shape,\n tensor.Tensor) or input_shape.op.type != \"Const\":\n fully_known = False\n break\n sizes.append(input_shape)\n\n if fully_known:\n return sizes\n else:\n return array_ops.shape_n(inputs)", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def compute_output_shape(self, input_shape):\n return [\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\n ]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def get_output_shape(self):\n return self.out.shape.as_list()", "def get_output_shape(self):\n return []", "def compute_output_shape(self, input_shape):\n return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]", "def get_output_shapes(model_data):\n model = schema_fb.Model.GetRootAsModel(model_data, 0)\n\n output_shapes = []\n for subgraph_idx in range(model.SubgraphsLength()):\n subgraph = model.Subgraphs(subgraph_idx)\n for output_idx in range(subgraph.OutputsLength()):\n output_tensor_idx = subgraph.Outputs(output_idx)\n output_tensor = subgraph.Tensors(output_tensor_idx)\n output_shapes.append(output_tensor.ShapeAsNumpy().tolist())\n\n return output_shapes", "def calculate_flatten_output_shapes(operator):\n check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)\n check_input_and_output_types(operator, good_input_types=[FloatTensorType])\n\n input = operator.inputs[0]\n output = operator.outputs[0]\n\n if len(input.type.shape) not in [2, 4]:\n raise RuntimeError(\"Input must be 2-D or 4-D float tensor\")\n\n input_shape = input.type.shape\n output_shape = [input_shape[0], 1]\n\n # Calculate the multiplication of C, H, and W.\n for i in input_shape[1:]:\n if i != \"None\":\n output_shape[1] *= i\n else:\n # If any of C, H, W-dimensions is unknown, the flatten C-dimension is unknown\n output_shape[1] = \"None\"\n break\n\n output.type.shape = output_shape", "def tensorize_outputs(self, outputs):\n ndim = outputs.ndim\n # shape = outputs.shape\n if ndim == 4:\n return outputs\n elif ndim == 2:\n outputs_reshaped = outputs.ravel('F').reshape((self._learning_batch_size,\n self._output_feature_maps,\n self._output_size[0],\n self._output_size[1]))\n return outputs_reshaped\n else:\n raise Exception('Wrong inputs dimension, inputs should be a 4D tensor with '\n 'shape : (batch_size, outputs_channel, img_h, img_w), or a matrix of'\n 'flattened inputs')", "def shape_for_keras(data):\n raise NotImplementedError", "def getOutShapes(self):\n\t\treturn self.output_shape", "def get_output_shape(self) -> List[int]:\n if -1 not in self.output_shape:\n return self.output_shape\n\n total_input_dims = np.prod(self.input_shape)\n\n dim = 1\n for i in self.output_shape:\n if i != -1:\n dim *= i\n missing_dim = int(total_input_dims / dim)\n\n output_shape = self.output_shape\n for ix, dim in enumerate(output_shape):\n if dim == -1:\n output_shape[ix] = missing_dim\n\n return output_shape", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def get_shape(input_tensor):\n return input_tensor.get_shape().as_list()", "def get_output_shape(self):\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[:-1]] + [self.n_units]", "def output_dims(self) -> Optional[Tuple[int]]:\n return None", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return [None, 32, 32, 1]", "def input_shape(self):\n return [None, 32, 32, 1]", "def calculate_size(\n inputs: DETECTED_INPUT_OUTPUT_TYPES, batch_dim: Optional[int]\n ) -> List[int]:\n\n def nested_list_size(inputs: Sequence[Any]) -> List[int]:\n \"\"\" Flattens nested list size. \"\"\"\n if hasattr(inputs, \"tensors\"):\n return nested_list_size(inputs.tensors) # type: ignore\n if isinstance(inputs[0], dict):\n return nested_list_size(list(inputs[0].items()))\n if hasattr(inputs[0], \"size\") and callable(inputs[0].size):\n return list(inputs[0].size())\n if isinstance(inputs, (list, tuple)):\n return nested_list_size(inputs[0])\n return []\n\n size = []\n # pack_padded_seq and pad_packed_seq store feature into data attribute\n if isinstance(inputs, (list, tuple)) and inputs and hasattr(inputs[0], \"data\"):\n size = list(inputs[0].data.size())\n if batch_dim is not None:\n size = size[:batch_dim] + [-1] + size[batch_dim + 1 :]\n\n elif isinstance(inputs, dict):\n # TODO avoid overwriting the previous size every time?\n for _, output in inputs.items():\n size = list(output.size())\n if batch_dim is not None:\n size = [size[:batch_dim] + [-1] + size[batch_dim + 1 :]]\n\n elif isinstance(inputs, torch.Tensor):\n size = list(inputs.size())\n if batch_dim is not None:\n size[batch_dim] = -1\n\n elif isinstance(inputs, (list, tuple)):\n size = nested_list_size(inputs)\n\n else:\n raise TypeError(\n \"Model contains a layer with an unsupported \"\n f\"input or output type: {inputs}\"\n )\n\n return size", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def get_output_shape(self):\n # TODO: return shape without construction of graph\n return self.get_output(comp_next_seq_pos=False).get_shape().as_list()", "def get_sample_shape(inputs):\n return tuple(inputs.size())[1:]", "def compute_output_shape(self, input_shape):\n output_shape = [0] * self.rank\n for d in range(self.rank):\n output_shape[d] = sum(self.paddings[d]) + input_shape[d]\n return tf.TensorShape(output_shape)", "def output_dims(self, qargs=None):\n if qargs is None:\n return self._output_dims\n return tuple(self._output_dims[i] for i in qargs)", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def get_input_tensor_shape(args_list):\n tensor_list = []\n for arg in args_list:\n if isinstance(arg, Tensor):\n tmp_shape = arg.shape\n tmp_type = arg.dtype\n tensor_list.append(PythonTensor(np.ones(tmp_shape), dtype=tmp_type))\n else:\n tensor_list.append(arg)\n\n return tuple(tensor_list)" ]
[ "0.735498", "0.70147294", "0.70147294", "0.70147294", "0.70022446", "0.69870424", "0.69419247", "0.6935082", "0.6802348", "0.67808133", "0.6715975", "0.6681929", "0.66333735", "0.65937126", "0.65777856", "0.656046", "0.654464", "0.65077263", "0.65072185", "0.6503165", "0.6503165", "0.6503165", "0.65003", "0.6482588", "0.6482588", "0.6467166", "0.64444625", "0.6443989", "0.6400245", "0.6386374" ]
0.7279559
1
Try to import all C extensions.
def test_c_extensions_import(): import storm_analysis.dbscan.dbscan_c import storm_analysis.fista.fista_fft_c import storm_analysis.frc.frc_c import storm_analysis.L1H.homotopy_imagea_c import storm_analysis.rolling_ball_bgr.rolling_ball_lib_c import storm_analysis.sa_library.cs_decon_utilities_c import storm_analysis.sa_library.dao_fit_c import storm_analysis.sa_library.grid_c import storm_analysis.sa_library.ia_utilities_c import storm_analysis.sa_library.matched_filter_c import storm_analysis.sa_utilities.fitz_c import storm_analysis.simulator.pf_math_c import storm_analysis.simulator.draw_gaussians_c import storm_analysis.spliner.cubic_spline_c import storm_analysis.spliner.cubic_fit_c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detectExtensions(builder):\n print (\"Checking if C extensions can be compiled, don't be alarmed if \"\n \"a few compile errors are printed.\")\n\n if not builder._compile_helper(\"#define X 1\\n\"):\n print \"Compiler not found, skipping C extensions.\"\n return []\n\n # Extension modules to build.\n exts = [\n Extension(\"twisted.spread.cBanana\",\n [\"twisted/spread/cBanana.c\"],\n define_macros=builder.define_macros),\n ]\n\n # urllib.unquote accelerator\n exts.append( Extension(\"twisted.protocols._c_urlarg\",\n [\"twisted/protocols/_c_urlarg.c\"],\n define_macros=builder.define_macros) )\n\n if sys.platform == 'darwin':\n exts.append(\n Extension(\"twisted.internet.cfsupport\",\n [\"twisted/internet/cfsupport/cfsupport.c\"],\n extra_compile_args=['-w'],\n extra_link_args=['-framework','CoreFoundation',\n '-framework','CoreServices',\n '-framework','Carbon'],\n define_macros=builder.define_macros))\n\n if sys.platform == 'win32':\n exts.append( Extension(\"twisted.internet.iocpreactor._iocp\",\n [\"twisted/internet/iocpreactor/_iocp.c\"],\n libraries=[\"ws2_32\", \"mswsock\"],\n define_macros=builder.define_macros))\n\n return exts", "def load_extensions(self):\n extension_module_name = f\"{utils.get_project_name()}.cogs\"\n for extension in CONF.LOADED_EXTENSIONS:\n try:\n self.load_extension(extension_module_name + \".\" + extension)\n LOG.debug(f\"The extension '{extension.split('.')[0]}' has been successfully loaded\")\n except Exception as e:\n message = f\"Failed to load extension '{extension.split('.')[0]}'\"\n LOG.exception(log.get_log_exception_message(message, e))", "def init_all():\n global _already_initialized\n if _already_initialized:\n return\n\n # Must put this here to avoid extensions re-triggering initialization\n _already_initialized = True\n\n def load_ep(entry_point):\n \"\"\"Loads a given entry point. Warns and logs on failure.\n \"\"\"\n logger.debug('Loading extension: %s', entry_point)\n try:\n func = entry_point.load()\n func()\n except Exception as e:\n msg = (f\"Numba extension module '{entry_point.module}' \"\n f\"failed to load due to '{type(e).__name__}({str(e)})'.\")\n warnings.warn(msg, stacklevel=3)\n logger.debug('Extension loading failed for: %s', entry_point)\n\n eps = importlib_metadata.entry_points()\n # Split, Python 3.10+ and importlib_metadata 3.6+ have the \"selectable\"\n # interface, versions prior to that do not. See \"compatibility note\" in:\n # https://docs.python.org/3.10/library/importlib.metadata.html#entry-points\n if hasattr(eps, 'select'):\n for entry_point in eps.select(group=\"numba_extensions\", name=\"init\"):\n load_ep(entry_point)\n else:\n for entry_point in eps.get(\"numba_extensions\", ()):\n if entry_point.name == \"init\":\n load_ep(entry_point)", "def _load_extensions(path):\n extension_dir = os.environ.get(path, path)\n print(f\"looking for extensions in {extension_dir}\")\n if not os.path.isdir(extension_dir):\n print(f\"No such {extension_dir}\")\n return\n\n import sys \n import importlib\n\n sys.path.append(path)\n imports = [ filename \n for filename in os.listdir(path)\n if not filename.startswith('__') \n and not filename.startswith('.') \n ]\n for filename in imports:\n module_name, _ = os.path.splitext(filename)\n module = importlib.import_module(module_name)\n for attribute_name in dir(module):\n if attribute_name.startswith('__'):\n continue\n globals()[attribute_name] = getattr(module, attribute_name)", "def extensions():\n\n pass", "def import_c_extension(mod_globals):\n c_module = None\n module_name = mod_globals['__name__']\n assert module_name.startswith('BTrees.')\n module_name = module_name.split('.')[1]\n if _should_attempt_c_optimizations():\n c_module = _c_optimizations_available(module_name)\n\n if c_module:\n new_values = dict(c_module.__dict__)\n new_values.pop(\"__name__\", None)\n new_values.pop('__file__', None)\n new_values.pop('__doc__', None)\n mod_globals.update(new_values)\n else:\n # No C extension, make the Py versions available without that\n # extension. The list comprehension both filters and prevents\n # concurrent modification errors.\n for py in [k for k in mod_globals if k.endswith('Py')]:\n mod_globals[py[:-2]] = mod_globals[py]\n\n # Assign the global aliases\n prefix = module_name[:2]\n for name in ('Bucket', 'Set', 'BTree', 'TreeSet'):\n mod_globals[name] = mod_globals[prefix + name]\n\n # Cleanup\n mod_globals.pop('import_c_extension', None)", "def _get_extension_imports() -> str:\n scss_imports = \"\"\n\n for ext in (simple_bulma_path / \"extensions\").iterdir():\n\n if is_enabled(ext):\n for src in get_sass_files(ext):\n scss_imports += f\"@import '{src.as_posix()}';\\n\"\n\n return scss_imports", "def load_extensions(self, *exts):\n for ext in exts:\n try:\n self.load_extension(ext)\n logger.info(f\"Successfully loaded cog {ext}.\")\n except Exception:\n logger.error(f\"Failed to load cog: {ext}: {format_exc()}\")\n\n logger.info(\"Cog loading complete.\")", "def build_extensions(self):\n numpy_incl = resource_filename('numpy', 'core/include')\n for ext in self.extensions:\n ext.include_dirs.append(numpy_incl)\n\n # This explicitly calls the superclass method rather than the\n # usual super() invocation because distutils' build_class, of\n # which Cython's build_ext is a subclass, is an old-style class\n # in Python 2, which doesn't support `super`.\n cython_build_ext.build_extensions(self)", "def cythonize_extensions(extensions):\n from Cython.Build import cythonize\n with cd(config.script_dir/'src'):\n cythonized = cythonize(\n extensions,\n language_level=3,\n nthreads=4,\n annotate=config.debug,\n # https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-directives # noqa: E501\n compiler_directives={\n 'binding': True,\n 'boundscheck': False,\n 'wraparound': False,\n 'profile': config.debug and not config.pypy,\n 'linetrace': config.debug and not config.pypy,\n 'always_allow_keywords': True,\n 'embedsignature': True,\n 'emit_code_comments': True,\n 'initializedcheck': False,\n 'nonecheck': False,\n 'optimize.use_switch': True,\n # Warns about any variables that are implicitly declared\n # without a cdef declaration\n 'warn.undeclared': False,\n 'warn.unreachable': True,\n 'warn.maybe_uninitialized': False,\n 'warn.unused': True,\n 'warn.unused_arg': False,\n 'warn.unused_result': False,\n 'warn.multiple_declarators': True,\n },\n )\n for cy in cythonized:\n cy.sources[0] = 'src/' + cy.sources[0]\n return cythonized", "def load_cogs(self):\n\n path = \"cogs/\" # Should always have a trailing slash\n import_path = path.replace(\"/\", \".\")\n extensions: list[str] = [\n import_path + file.replace(\".py\", \"\")\n for file in os.listdir(path)\n if os.path.isfile(f\"{path}{file}\")\n ]\n\n for extension in extensions:\n try:\n self.load_extension(extension)\n except errors.ExtensionAlreadyLoaded:\n pass\n\n log.info(f\"Loaded {len(self.commands)} commands from {len(self.cogs)} cogs\")", "def define_extensions(use_cython, use_openmp):\n if sys.platform.startswith('win'):\n # compile args from\n # https://msdn.microsoft.com/en-us/library/fwkeyyhe.aspx\n link_args = []\n compile_args = ['/O2', '/openmp']\n else:\n link_args = []\n compile_args = ['-Wno-unused-function', '-Wno-maybe-uninitialized', '-O3', '-ffast-math']\n if use_openmp:\n compile_args.append('-fopenmp')\n link_args.append('-fopenmp')\n\n if 'anaconda' not in sys.version.lower():\n compile_args.append('-march=native')\n\n # recommended approach is that the user can choose not to\n # compile the code using cython, they can instead just use\n # the .c file that's also distributed\n # http://cython.readthedocs.io/en/latest/src/reference/compilation.html#distributing-cython-modules\n src_ext = '.pyx' if use_cython else '.c'\n names = ['pairwise3']\n modules = [Extension(name,\n [os.path.join(name + src_ext)],\n extra_compile_args = compile_args,\n extra_link_args = link_args) for name in names]\n\n if use_cython:\n return cythonize(modules)\n else:\n return modules", "def import_all():\n import theory", "def init_extensions(self, package, module):\n\n pass", "def extensions():\n exts = []\n exts.append(\n Extension(\n 'pytng.pytng',\n sources=glob('pytng/src/compression/*.c') + glob(\n 'pytng/src/lib/*.c') + ['pytng/pytng.pyx'],\n include_dirs=[\n \"pytng/include/\", \"{}/include\".format(sys.prefix),\n np.get_include()\n ],\n library_dirs=[\"{}/lib\".format(sys.prefix)],\n libraries=['z'], ))\n\n return cythonize(exts, gdb_debug=False)", "def CompExtension_init():\n return _libsbml.CompExtension_init()", "def get_loaded_extensions():\n raise NotImplementedError()", "def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module", "def install_all(self):\n self.install_native_host()\n self.install_extension()", "def python_like_exts():\r\n exts = []\r\n for lang in sourcecode.PYTHON_LIKE_LANGUAGES:\r\n exts.extend(list(sourcecode.ALL_LANGUAGES[lang]))\r\n return ['.' + ext for ext in exts]", "def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))", "def get_extension_funcs():\n raise NotImplementedError()", "def __load_cogs(self):\n for cog in self.__cogs.get():\n logging.info('loading %s', cog)\n self.load_extension(cog)", "def build_from_c_and_cpp_files(extensions):\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx', '.py'):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources = sources", "def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)", "def import_all():\n import sys\n\n # obviously this is a hack for now... What's the right way to learn\n # the directory that holds the plugins directory? I don't want the\n # directory itself, because I *think* we might get name conflicts if we\n # import them directly. (I'm fuzzy about how that works. Can you\n # import \"x\" from one path and \"x\" from another path, and have them both\n # around with the same name? sys.modules suggests no.\n pdir = \"/home/sandro/riftr\"\n sys.path.append(pdir)\n \n dir = \"plugins\"\n ids = {}\n for filename in os.listdir(pdir + \"/\" + dir):\n if filename.endswith(\".py\") and not filename[0] == \"_\":\n local = filename[0:-3]\n module_name = dir + \".\" + local\n #print \n #print module_name\n m = __import__(module_name)\n mm = getattr(m, local)\n #print \"=> \", mm\n for (name, entry) in mm.__dict__.items():\n if getattr(entry, \"__doc__\", False) and getattr(entry, \"id\", False):\n if entry.id.startswith(dir+\".\"):\n # because they used \"__name__\"\n entry.id = entry.id[len(dir+\".\"):]\n if entry.id in ids:\n raise RuntimeError, (\"Duplicate id: %s used in %s and %s\" %\n entry.id, ids[entry.id], filename)\n ids[entry.id] = filename\n #print \"registering\", name, entry\n register(entry)\n \n # I wonder why issubclass doesn't work for me like this.\n #if type(entry).__name__ in [ \"classobj\", \"type\" ]:\n # print \"is type/class\", name, entry\n # print issubclass(entry, object)\n # print issubclass(entry, Plugin)\n # print issubclass(entry, InputPlugin)\n\n\n sys.path.pop(-1)", "async def load_all_extensions(self, reload=False):\n succeeded = {}\n for extension in get_extensions():\n try:\n if reload or extension not in self.cogs_loaded:\n self.load_extension(f'cogs.{extension}')\n l.info(f\"Loaded extension '{extension}'\")\n self.cogs_loaded.add(extension)\n succeeded[extension] = True\n except Exception as e:\n error = f\"{extension}\\n {type(e).__name__} : {e}\"\n l.error(f\"Failed to load extension '{error}'\")\n succeeded[extension] = False\n if succeeded:\n l.info(LOG_SEP)\n return succeeded", "def test_override_builtin_extension_without_explicit_flag(self):\n with self.assertRaises(ValueError):\n PyLoader.register()", "def consider_env(self): \n for spec in self._envlist(\"PYLIB\"):\n self.import_module(spec)", "def extend_import_paths(paths):\n for path in paths:\n dir = os.path.abspath(path if os.path.isdir(path) else os.path.dirname(path))\n while(os.path.exists(os.path.join(dir, '__init__.py'))):\n dir = os.path.dirname(dir)\n sys.path.append(dir)" ]
[ "0.6752059", "0.6606153", "0.6383072", "0.63468283", "0.6211412", "0.6153936", "0.6096747", "0.60393804", "0.597067", "0.5909593", "0.5909499", "0.59058255", "0.58827597", "0.58437383", "0.5838516", "0.58294934", "0.581349", "0.57937706", "0.5778554", "0.5777526", "0.5745322", "0.57345146", "0.57251656", "0.56982756", "0.5682937", "0.5663024", "0.5649856", "0.56136084", "0.55984026", "0.55899185" ]
0.6875448
0
Finds all allowed value in the set of possible arrays This is done with binary OR. Values that are always 1 or 2 will never change. If either, they will become a 3 If they remain 0 > there is no solution
def find_allowed(possible_arrays): allowed = [0] * len(possible_arrays[0]) for array in possible_arrays: allowed = [x | y for x, y in zip(allowed, array)] return allowed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self, i, j):\n possible=np.ones((10), np.int)\n for k in range(0, 9):\n if k==j: continue\n possible[self.a[i][k]]=0\n\n for k in range(0, 9):\n if k==i: continue\n possible[self.a[k][j]]=0\n for a1 in range(0, 3):\n for b1 in range(0, 3):\n if (i//3)*3+a1==i and (j//3)*3+b1==j:\n continue\n possible[self.a[(i//3)*3+a1][(j//3)*3+b1]]=0\n return possible", "def any(self):\n valid = False\n solution = []\n while not valid:\n soln = []\n for dec in self.decisions:\n soln.append(random.randint(dec.low, dec.high))\n valid = self.ok(soln)\n if valid:\n solution = soln\n return solution", "def challenge1(data, num_values=2, target_value=2020):\n data_array = [data for _ in range(num_values)]\n for values in product(*data_array):\n if np.sum(values) == target_value:\n print(\"Solution found.\")\n return np.product(values)", "def possibilities(board):\n return board[np.where(board == 0)]", "def solveit(static_arr):\n arr = np.asarray(static_arr)\n if arr.shape != (9, 9):\n raise ValueError('Sudoku array must be 9x9')\n\n if np.all(arr) or is_solved(arr):\n return arr\n\n possnum = [[], [], []]\n\n for i in iter1d:\n possnum[0].append(wantset - set(arr[:, i])) # Each row\n possnum[1].append(wantset - set(arr[i, :])) # Each col\n\n # Each 3x3 block\n for j in iterblock:\n for i in iterblock:\n possnum[2].append(wantset - set(arr[j:j+3, i:i+3].ravel()))\n\n yy, xx = np.where(arr == 0)\n possd = np.empty((9, 9), dtype=object)\n possd.fill(None)\n minnum = 99\n minpos = None\n\n for i, idx in enumerate(zip(yy, xx)):\n iy, ix = idx\n iq = (ix // 3) + ((iy // 3) * 3)\n num_for_cell = possnum[0][ix] & possnum[1][iy] & possnum[2][iq]\n n_match = len(num_for_cell)\n if n_match > 0:\n possd[idx] = num_for_cell\n if n_match < minnum:\n minnum = n_match\n minpos = idx\n\n # print('Starting at {} with {} possibilities'.format(minpos, minnum))\n n = list(possd[minpos])[0]\n arr[minpos] = n\n possd[minpos] = None\n yy, xx = possd.nonzero()\n for idx in zip(yy, xx):\n if n in possd[idx]:\n possd[idx].remove(n)\n\n return solveit(arr)", "def majority_logical(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = itertools.combinations(bit_arrays, MINIMUM_MAJORITY)\n answer = map(all, answer)\n answer = any(answer)\n return answer", "def allowed_mods():\n mods = [Mod.EZ, Mod.HD, Mod.HR, Mod.DT, Mod.HT, Mod.FL]\n mod_powerset = chain.from_iterable(combinations(mods, r) for r in range(len(mods) + 1))\n combos = []\n for p in mod_powerset:\n combined_mod = Mod(0)\n for m in p:\n combined_mod |= m\n combos.append(combined_mod)\n allowed = tuple(c for c in combos if valid_mod(c))\n return allowed", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"k\"]):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['k']} (exclusive).\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n X_union = (X == 0) | (X == 1)\n for k in range(2, kwargs[\"n\"] + 1):\n X_union = X_union | (X == k)\n\n assert (\n X_union.all()\n ), f\"x should be equal to integer from 0 to {kwargs['n']} (inclusive).\"", "def only_choice(values):\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n for digit in '123456789':\n for unit in unitlist:\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n assign_value(values, dplaces[0], digit)\n return values", "def get_feasible_set(self):\n\t\tfeasible_set = np.zeros((0, self.codelength),dtype=int)\n\t\tfor index, x in np.ndenumerate(\n\t\t\tnp.empty(shape=[self.Ncolors] * self.codelength)):\n\t\t\tcombination = np.array(index) + 1\n\t\t\tif self.feasible(combination):\n\t\t\t\tfeasible_set = np.vstack((feasible_set, combination))\n\t\treturn feasible_set", "def all_cases(self):\n return product(*self.all_case_vals())", "def test_invalid_inputs(self):\n f = gtrutils.check_petition_combos\n \n self.assertFalse( f(-1, 1, [], False, False))\n self.assertFalse( f( 0, 1, [], False, False))\n self.assertFalse( f( 1, 0, [], False, False))\n self.assertFalse( f( 1, 1, [-1], False, False))\n self.assertFalse( f( 1,-1, [], False, False))\n self.assertFalse( f( 1, 1, [1], False, False)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], True, True)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1,3], True, True)) # n_off_role can never be 1\n\n self.assertFalse( f( 3, 0, [2,3,3], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 3, 0, [2,3,3], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 2, 0, [2,3,3], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 2, 0, [2,3,3], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 5, 1, [6,6], True, False)) # n_off_role can never be 1", "def init_constraints(self):\n for (row, col), curr_value in np.ndenumerate(self.final_values):\n self.possible_values[row][col] = [] # Initialize empty list\n if curr_value == 0: # If the final value is 0 then the position is vacant\n for value in range(1, 10): # Iterate through all possible values (1, 9) and check if they are possible\n if self.__is_valid_value(row, col, value):\n self.possible_values[row][col].append(value) # Append possible values to the corresponding list\n return", "def limit_possible(self, *values):\n self.possible.intersection_update(values)", "def solve(self):\n\n n = 0\n\n # Keep iterating while incrementing the allowed combination length until\n # a combination that can accommodate the defined load has been found.\n while True:\n n += 1\n combos = self.fit_n_bins(n=n)\n if combos:\n return combos", "def _generateResults_combosToBitmasks(self, all, want_combos):\n names = {}\n iter = enumerate(all)\n try:\n i, name = next(iter)\n while True:\n names[name] = (1 << i)\n i, name = next(iter)\n except StopIteration:\n pass\n print \"combosToBitmasks names:\", names\n results = []\n for combo in want_combos:\n mask = 0\n for name in combo:\n if name in names:\n mask |= names[name]\n results.append(mask)\n return results", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def _encode_check_unknown(values, uniques, return_mask=False):\n uniques_set = set(uniques)\n diff = list(set(values) - uniques_set)\n if return_mask:\n if diff:\n valid_mask = [val in uniques_set for val in values]\n else:\n valid_mask = [True] * len(values)\n return diff, valid_mask\n else:\n return diff", "def solution(data):\n\t\tif data:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def possible_values(self) -> Set[int]:\n return {x for x in SudokuTile.UNIVERSE_OF_TILE_VALUES if\n (x not in self._row) and\n (x not in self._column) and\n (x not in self._box)}", "def test_no_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [ 0], False, False))\n\n self.assertFalse( f( 1, 0, [], False, False))\n self.assertFalse( f( 1, 1, [2], False, False))\n self.assertFalse( f( 1, 1, [3], False, False))\n self.assertFalse( f( 1, 1, [4], False, False))\n\n self.assertTrue( f( 1, 1, [], False, False))\n self.assertFalse( f( 1, 2, [], False, False))\n self.assertFalse( f( 1, 3, [], False, False))\n\n self.assertFalse( f( 2, 1, [], False, False))\n self.assertTrue( f( 2, 2, [], False, False))\n self.assertFalse( f( 2, 3, [], False, False))\n\n self.assertFalse( f( 3, 1, [], False, False))\n self.assertFalse( f( 3, 2, [], False, False))\n self.assertTrue( f( 3, 3, [], False, False))\n\n self.assertTrue( f(13,13, [], False, False))\n\n self.assertFalse( f( 1, 1, [0,0,0,3], False, False))\n self.assertFalse( f( 2, 1, [0,0,0,3], False, False))\n self.assertFalse( f( 3, 1, [0,0,0,3], False, False))", "def reduce_puzzle(values):\r\n\r\n ''' Your solution here '''\r\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\r\n stalled = False\r\n while not stalled:\r\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\r\n values = eliminate(values)\r\n values = only_choice(values)\r\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\r\n stalled = solved_values_before == solved_values_after\r\n if len([box for box in values.keys() if len(values[box]) == 0]):\r\n return False\r\n return values", "def solve(self):\n\n n = 0\n\n # Keep iterating while incrementing the allowed combination length until\n # a combination with a capacity identical to the defined load has been\n # found.\n while True:\n n += 1\n combos = self.fit_n_bins(n=n)\n if combos and sum(combos[0]) == self.load:\n return combos", "def fn(vals):\n total = odd = 0 \n for i, x in enumerate(vals): \n if vals[0] == x: \n total += 1\n if i&1: odd += 1\n elif vals[0] ^ x != (1 << n) - 1: return inf\n ans = inf \n if len(vals) <= 2*total <= len(vals)+1: ans = min(ans, odd)\n if len(vals)-1 <= 2*total <= len(vals): ans = min(ans, total - odd)\n return ans", "def zero_comb(num_list):\n return {tuple(sorted(n)) for n in combinations(num_list, 3) if sum(n) == 0}", "def solve(self):\n for x in range(9):\n for y in range(9):\n if self.arr[x][y] == 0:\n for value in range(1,10):\n if self.is_possible_value(x,y,value):\n self.arr[x][y] = value\n self.solve()\n self.arr[x][y] = 0 \n return\n print(np.matrix(self.arr))", "def check_solution(generation):\n # Calcula restrições para indivíduos da geração\n restrs = [restricoes(ind) for ind in generation]\n\n solution = []\n for i, retr in enumerate(restrs):\n if retr == 0:\n solution.append(generation[i])\n return solution, restrs", "def only_choice(values):\n for unit in unitlist:\n for d in '123456789':\n # array of boxes for the digit d\n destinations = [b for b in unit if d in values[b]]\n if len(destinations) == 1:\n values = assign_value(values, destinations[0], d)\n return values", "def find_solutions(self, board: list, x: int, y: int):\n col_nums = [] # empty list for column numbers != 0\n for z in range(9):\n if not board[z][x] == 0:\n col_nums.append(board[z][x]) # appends numbers != 0\n # list containing numbers != 0 for the given row\n row_nums = [n for n in board[y] if not n == 0]\n\n # Finds group for x & y\n x_group = self.find_group(x)\n y_group = self.find_group(y)\n group_nums = [] # empty list containing the cell's group numbers\n for i in y_group:\n for j in x_group:\n if not board[i][j] == 0:\n group_nums.append(board[i][j]) # appends numbers != 0\n\n # Set containing all unavailable numbers without duplicates\n numbers = set(col_nums + row_nums + group_nums)\n # Creates list with possible solutions\n possible_solutions = [n for n in range(1, 10) if n not in numbers]\n\n return possible_solutions" ]
[ "0.6063571", "0.5989684", "0.58360726", "0.5805874", "0.57882935", "0.5782648", "0.572063", "0.5671718", "0.56716937", "0.5633513", "0.56065935", "0.5559587", "0.55376446", "0.55248064", "0.5514542", "0.5503843", "0.5489493", "0.54865366", "0.54118717", "0.5406459", "0.53880244", "0.5351923", "0.5323398", "0.53030556", "0.5294594", "0.52806807", "0.5279186", "0.52675146", "0.5262685", "0.52548295" ]
0.730627
0
Deprecated, use make_xml instead
def prepare(*args, **kwargs): LOGGER.warning( "readalongs.api.prepare() is deprecated. Please use make_xml() instead." ) return make_xml(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()", "def from_xml(cls, xml_data, system, id_generator):\r\n xml_object = etree.fromstring(xml_data)\r\n system.error_tracker(\"WARNING: the <{0}> tag is deprecated. Please do not use in new content.\"\r\n .format(xml_object.tag))\r\n\r\n if len(xml_object) == 1:\r\n for (key, val) in xml_object.items():\r\n xml_object[0].set(key, val)\r\n\r\n return system.process_xml(etree.tostring(xml_object[0]))\r\n else:\r\n xml_object.tag = 'sequential'\r\n return system.process_xml(etree.tostring(xml_object))", "def from_xml(cls, xml_data, system, id_generator):\r\n\r\n xml_object = etree.fromstring(xml_data)\r\n system.error_tracker('WARNING: the <{tag}> tag is deprecated. '\r\n 'Instead, use <customtag impl=\"{tag}\" attr1=\"...\" attr2=\"...\"/>. '\r\n .format(tag=xml_object.tag))\r\n\r\n tag = xml_object.tag\r\n xml_object.tag = 'customtag'\r\n xml_object.attrib['impl'] = tag\r\n\r\n return system.process_xml(etree.tostring(xml_object))", "def toxml(self) :\n\t\treturn self.doc.toxml()", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def xml(self):\n raise NotImplementedError('This api does not return xml')", "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def get_xml(self):\n return etree.tostring(self.get_etree())", "def toString(doc):\n return doc.toxml()", "def example_xml40(example_xml_file40):\n return etree.fromstring(example_xml_file40.encode('utf-8'))", "def example_xml(example_xml_file):\n return etree.fromstring(example_xml_file.encode('utf-8'))", "def saveToXml(self) -> org.jdom.Element:\n ...", "def create(cls, xml):\n raise Exception('Not Implemented Yet')", "def xml():\n try:\n return Response(render_template(\n 'lti.xml.j2'), mimetype='application/xml'\n )\n except:\n app.logger.error(\"Error with XML.\")\n return return_error('''Error with XML. Please refresh and try again. If this error persists,\n please contact support.''')", "def example_xml42(example_xml_file41):\n return etree.fromstring(example_xml_file42.encode('utf-8'))", "def example_xml43(example_xml_file41):\n return etree.fromstring(example_xml_file43.encode('utf-8'))", "def xml(self):\n return lxml.etree.fromstring(self.content, _XML_PARSER)", "def example_xml41(example_xml_file41):\n return etree.fromstring(example_xml_file41.encode('utf-8'))", "def xml():\n response = make_response(render_template(\"sample.xml\"))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def new(cls, xml):\n return oxml.parse_xml(xml)", "def test_to_xml_from_xml(self):\n xml_panda = self.test_panda.to_xml()\n new_panda = self.test_panda.from_xml(xml_panda)\n self.assertEqual(self.test_panda, new_panda)", "def adaptXmlToXml(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptXmlToXml(self, *args)", "def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder", "def parsexml(self):\n raise NotImplementedError", "def toXml(prefixes=None, closeElement=1, defaultUri='',\n prefixesInScope=None):", "def saveXml(self, buf: java.lang.StringBuilder) -> None:\n ...", "def xml(self, request):\n raise Exception(\"Not Implemented\")", "def test_write_defaults(self):\n xml = (\n u'<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n u'<DocRoot>'\n u'<Elem1>默认جذ</Elem1>'\n u'<Elem2/>'\n u'</DocRoot>'\n )\n io_string = six.StringIO()\n self.builder.write_doc(io_string)\n if six.PY2:\n self.assertEqual(xml.encode('utf-8'), io_string.getvalue())\n else:\n self.assertEqual(xml, io_string.getvalue())", "def new_xmldoc_opml():\n xmldoc = XMLDoc()\n opml = OPML()\n xmldoc.root_element = opml\n\n return (xmldoc, opml)", "def xml(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n return Text(string, token).xml" ]
[ "0.6673175", "0.6491031", "0.6446211", "0.637155", "0.6355076", "0.63387907", "0.6337277", "0.62541366", "0.6239761", "0.619156", "0.6185545", "0.61794513", "0.61569715", "0.61033696", "0.609219", "0.6091051", "0.60459286", "0.60458064", "0.6006322", "0.59886265", "0.59737456", "0.59616035", "0.59543043", "0.59062994", "0.5897145", "0.5871679", "0.58492804", "0.5845504", "0.58402807", "0.5833825" ]
0.73191714
0
Verifies that isotropicDistance refuses non binary input images
def testIsotropicDistanceDepthAcceptance(self): self.assertRaises(MambaError, isotropicDistance, self.im8_1, self.im8_2) self.assertRaises(MambaError, isotropicDistance, self.im32_1, self.im8_2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testIsotropicDistance(self):\n (w,h) = self.im1_1.getSize()\n \n self.im1_1.reset()\n drawSquare(self.im1_1, (w//2-1, h//2-1, w//2+1, h//2+1), 1)\n \n self.im8_3.reset()\n drawSquare(self.im8_3, (w//2-1, h//2-1, w//2+1, h//2+1), 1)\n self.im8_3.setPixel(2, (w//2, h//2))\n isotropicDistance(self.im1_1, self.im8_1)\n (x,y) = compare(self.im8_1, self.im8_3, self.im8_2)\n self.assertTrue(x<0)", "def verify(align_imgs):\n\n g = get_graph()\n with g.as_default():\n # Get input and output tensors\n images_placeholder = g.get_tensor_by_name(\"input:0\")\n embeddings = g.get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = g.get_tensor_by_name(\"phase_train:0\")\n\n # Run forward pass to calculate embeddings\n feed_dict = {images_placeholder: align_imgs,\n phase_train_placeholder: False}\n sess = get_session()\n emb = sess.run(embeddings, feed_dict=feed_dict)\n\n dist = np.sqrt(np.sum(np.square(np.substract(emb[0,:], emb[1,:]))))\n print('distance: %1.4f' % dist)\n\n return '%1.4f' % dist", "def _check_consistency_between_imaging_extractors(self):\n return True", "def verify(image_path, identity, database, model):\n\n encoding = img_to_encoding(image_path, model)\n\n dist = np.sum((encoding - database[identity]) ** 2) ** .5\n\n if dist < .7:\n print(\"It's \" + str(identity) + \", welcome home!\")\n door_open = dist < .7\n else:\n print(\"It's not \" + str(identity) + \", please go away\")\n door_open = dist < .7\n\n\n return dist, door_open", "def verify(image_path, identity, database, model):\r\n \r\n \r\n encoding = img_to_encoding(image_path=image_path, model=model)\r\n \r\n dist = np.linalg.norm(np.subtract(database[identity], encoding))\r\n \r\n if dist<0.7:\r\n print(\"It's \" + str(identity) + \", welcome home!\")\r\n door_open = True\r\n else:\r\n print(\"It's not \" + str(identity) + \", please go away\")\r\n door_open = False\r\n \r\n \r\n return dist, door_open", "def testImageDiffLengthEnforced(self) -> None:\n with self.assertRaises(AssertionError):\n _ = data_types.Result('test', ('win', 'x86'), (1, 2, 3),\n 'build_id')", "def verify_binary_image(self, image_path):\n raise NotImplementedError", "def verify(image_path, identity, database, model):\n\n # Compute the encoding for the image.\n encoding = M.img_to_encoding(image_path, model)\n\n # Compute distance with identity's image\n dist = np.linalg.norm(database[identity] - encoding)\n\n return dist", "def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def test_filter_image():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img0 = cv2.imread(str(f_image))\n img1 = model(f_image)\n\n diff = (img0 - img1).sum()\n\n assert abs(diff) > 0", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def verify(image_path,identity,database,model):\n verify(image_path, identity, database, model):\n encoding=img_to_encoding(image_path,model)\n dist=np.linalg.norm(encoding-database[identity])\n if dist<0.7:\n print(\"It's \"+str(identity)+\",welcome home!\")\n door_open=True\n else:\n print(\"It's not\"+str(identity)+\",please go away\")\n door_open=False\n return dist,door_open", "def diff_image_feature(image0, image1):\n return 0", "def testMissingImage(self):\n self.assertNotIn('no_image', self.data)", "def verify(image_path, database, model):\n\n ### START CODE HERE ###\n\n # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)\n status, encoding = img_to_encoding(image_path, model, resize=True)\n if not status:\n return None, None, encoding\n\n dist = 0\n\n # Step 2: Compute distance with identity's image (≈ 1 line)\n for (name, db_enc) in database.items():\n\n dist += np.linalg.norm(db_enc - encoding)\n\n final_dist = dist / len(database)\n\n # Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)\n if final_dist < 0.7:\n print(\"welcome home!\")\n match = True\n else:\n print(\"please go away\")\n match = False\n\n ### END CODE HERE ###\n\n return final_dist, match, encoding", "def __test_similarity(self):\n\n _, test_loader, _ = create_loaders()\n\n false_counter = 0\n for (image, labels) in test_loader:\n\n output_pytorch = self._model(image).detach().numpy()\n\n im = image.numpy().flatten()\n output_manual = self.run_through_model(im)\n\n if np.allclose(output_pytorch, output_manual, rtol=1e-4, atol=1e-4) is not True:\n false_counter += 1\n\n print(f\"Number of mistakes: {false_counter}\")", "def is_different(image1, image2):\n gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)\n gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)\n\n (score, diff) = compare_ssim(gray1, gray2, full=True)\n diff = (diff * 255).astype(\"uint8\")\n\n thresh = cv2.threshold(diff, 0, 255,\n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n return bool(cnts)", "def check_correctness(classifier_out, mode, image_type):\n labels = image_type.image_data[mode].labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n print(f'Got {num_correct} out of {total} correct: {(num_correct / total) * 100}%')", "def test_wrong_info(self):\r\n i = theano.scalar.basic.int32()\r\n self.assertRaises(NotScalarConstantError, self.validate,\r\n (3, 2, 8, i), (4, 2, 5, 5),\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))\r\n self.assertRaises(NotScalarConstantError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, i),\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))", "def test_antialiasing():\n images = generate_test_images()\n loaded = load_test_images(images)\n print(list(loaded.keys()))\n for description in images.keys():\n assert (images[description] == loaded[description]).all()", "def _check_size(self, img):\n absdiff = num.abs(num.subtract(img.shape, self.expected_size))\n pctdiff = num.true_divide(absdiff, self.expected_size)\n if not num.all(pctdiff <= self.size_tolerance):\n raise StandardError('image size outside form tolerance {} != {}'\n .format(img.shape, self.expected_size))", "def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None", "def test_check_wrong_image(self):\n result = analyzer.check_image_color(\"tests/test_files/non_exists.jpg\")\n self.assertEqual(result, \"Image not found\")", "def _verifyConversion(self, imc_in, imc_out=None):\n if imc_in != const.IMC.GREATER_OF_TWO_HORIZONTAL and \\\n imc_in != const.IMC.MEDIAN_HORIZONTAL and \\\n imc_in != const.IMC.GMRotI50 and \\\n imc_in != const.IMC.RotD50 and \\\n imc_in != const.IMC.RANDOM_HORIZONTAL and \\\n imc_in != const.IMC.HORIZONTAL:\n raise ValueError('unknown IMC %r' % imc_in)", "def verify_fn(img_dir, identity, database, model):\n encoding = encoding_images(img_dir, model)\n # Compute distance with identity's image\n # Note: there are 14 images in eache folders' name, so we should check the equal elemnts of encoding[i] and database[identity][i]\n dist = np.linalg.norm(encoding[0] - database[identity][0])\n # Open the door if dist < 0.7, else don't open\n if dist < 0.7:\n print(\"It's\" + str(identity) + \", welcome home!\")\n door_open = True\n else:\n print(\"It's not \" + str(identity) + \", please go away\")\n door_open = False\n\n return dist, door_open", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def is_isotropic(self):\n return self.fibres is None", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")" ]
[ "0.72642386", "0.619694", "0.6139344", "0.6065631", "0.6022535", "0.60154855", "0.5971038", "0.59305257", "0.5920673", "0.59090304", "0.59090304", "0.589726", "0.58919555", "0.5806894", "0.57667726", "0.57252896", "0.5722722", "0.5716484", "0.5700085", "0.5682196", "0.5666687", "0.56597674", "0.56540227", "0.5643379", "0.56365633", "0.5628015", "0.56230146", "0.561711", "0.5595717", "0.5585494" ]
0.69661176
1
Tests the computation of an isotropic distance
def testIsotropicDistance(self): (w,h) = self.im1_1.getSize() self.im1_1.reset() drawSquare(self.im1_1, (w//2-1, h//2-1, w//2+1, h//2+1), 1) self.im8_3.reset() drawSquare(self.im8_3, (w//2-1, h//2-1, w//2+1, h//2+1), 1) self.im8_3.setPixel(2, (w//2, h//2)) isotropicDistance(self.im1_1, self.im8_1) (x,y) = compare(self.im8_1, self.im8_3, self.im8_2) self.assertTrue(x<0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_calculate_distance():\n\n r1 = np.array([0, 0, 0])\n r2 = np.array([0, 1, 0])\n\n expected_distance = 1\n\n calculated_distance = molecool.calculate_distance(r1, r2)\n\n assert expected_distance == calculated_distance", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def test_distance(self):\n\n def f(a, b):\n if a == b:\n return 0\n if (a in \"UC\" and b in \"UC\") or (a in \"AG\" and b in \"AG\"):\n return 1\n else:\n return 10\n\n # uses identity function by default\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"U\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"UCCCCCUC\"), 3)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").distance(\"CCCCC\"), 5)\n # should use function if supplied\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"\", f), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"U\", f), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"C\", f), 1)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"G\", f), 10)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"UCCCCCUC\", f), 21)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").distance(\"CCCCC\", f), 50)", "def test_tanimoto_distance(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n tanimototo = tanimoto_distance(dist_a, dist_b)\n if i == j:\n assert pytest.approx(tanimototo, 0.0001) == 1\n else:\n assert tanimototo < 1", "def testIsotropicDistanceDepthAcceptance(self):\n self.assertRaises(MambaError, isotropicDistance, self.im8_1, self.im8_2)\n self.assertRaises(MambaError, isotropicDistance, self.im32_1, self.im8_2)", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def test_distance():\n t0 = time.time()\n c1 = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n c2 = coord.CelestialCoord(0.234 * coord.radians, -1.093 * coord.radians)\n c3 = coord.CelestialCoord((pi + 0.234) * coord.radians, -0.342 * coord.radians)\n c4 = coord.CelestialCoord((pi + 0.234) * coord.radians, 0.832 * coord.radians)\n c5 = coord.CelestialCoord(1.832 * coord.radians, -0.723 * coord.radians)\n c6 = coord.CelestialCoord((0.234 + 2.3e-9) * coord.radians, (0.342 + 1.2e-9) * coord.radians)\n t1 = time.time()\n\n a1 = astropy.coordinates.SkyCoord(0.234 * units.radian, 0.342 * units.radian)\n a2 = astropy.coordinates.SkyCoord(0.234 * units.radian, -1.093 * units.radian)\n a3 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, -0.342 * units.radian)\n a4 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, 0.832 * units.radian)\n a5 = astropy.coordinates.SkyCoord(1.832 * units.radian, -0.723 * units.radian)\n a6 = astropy.coordinates.SkyCoord(0.234 + 2.3e-9, 0.342 + 1.2e-9, unit=units.radian)\n t2 = time.time()\n\n coord_dist = [c1.distanceTo(c).rad for c in [c2,c3,c4,c5,c6]]\n t3 = time.time()\n astropy_dist = [a1.separation(a).rad for a in [a2,a3,a4,a5,a6]]\n t4 = time.time()\n\n np.testing.assert_almost_equal(coord_dist, astropy_dist, decimal=12)\n # For the last one, the distance is rather small in radians, so test in arcsec\n np.testing.assert_almost_equal(coord_dist[-1] * (coord.radians/coord.arcsec),\n astropy_dist[-1] * (coord.radians/coord.arcsec), decimal=10)\n\n print('Compare times for distance calculations:')\n print(' Make CelestialCoords: t = ',t1-t0)\n print(' Make SkyCoords: t = ',t2-t1)\n print(' Calculate distances with Coord: t = ',t3-t2)\n print(' Calculate distances with Astropy: t = ',t4-t3)", "def test_poincare_distance(self):\n vector_1 = self.vectors['dog.n.01']\n vector_2 = self.vectors['mammal.n.01']\n\n distance = self.vectors.vector_distance(vector_1, vector_2)\n self.assertTrue(np.allclose(distance, 4.5278745))\n\n distance = self.vectors.vector_distance(vector_1, vector_1)\n self.assertTrue(np.allclose(distance, 0))", "def test_detector_distance(i07_nexus: I07Nexus, detector_distance):\n assert i07_nexus.detector_distance == detector_distance", "def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)", "def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0", "def test_distance_function(self):\n if connection.ops.oracle:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n elif connection.ops.spatialite:\n if connection.ops.spatial_version < (5,):\n # SpatiaLite < 5 returns non-zero distance for polygons and points\n # covered by that polygon.\n ref_dists = [326.61, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4891.20, 8071.64, 9123.95]\n htown = City.objects.get(name=\"Houston\")\n qs = Zipcode.objects.annotate(\n distance=Distance(\"poly\", htown.point),\n distance2=Distance(htown.point, \"poly\"),\n )\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance.m, ref, 2)\n\n if connection.ops.postgis:\n # PostGIS casts geography to geometry when distance2 is calculated.\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance2.m, ref, 2)\n\n if not connection.ops.spatialite:\n # Distance function combined with a lookup.\n hzip = Zipcode.objects.get(code=\"77002\")\n self.assertEqual(qs.get(distance__lte=0), hzip)", "def testSymmetry(self):\n for profile1 in self.profiles:\n for profile2 in self.profiles:\n self.assertEqual(profile1.edit_distance(profile2), profile2.edit_distance(profile1))", "def test_cossim(self):\n metrics = SimilarityMetrics()\n test1 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test1,0.0)\n\n test2 = metrics.cosine_similarity(np.asarray([1,-1]),np.asarray([-1,1]))\n np.testing.assert_almost_equal(test2,-1.0)\n\n test3 = metrics.cosine_similarity(np.asarray([1,1]),np.asarray([1,1]))\n np.testing.assert_almost_equal(test3,1.0)", "def test_get_motif_distance(self):\n \n result = get_motif_distance(pybedtools.BedTool(clipper.test_file(\"motif_distance_test_cluster.bed\")),\n pybedtools.BedTool(clipper.test_file(\"motif_distance_test_motif.bed\")))\n \n #hand calculated results from motif distance\n self.assertListEqual(result, [-12,112, 12])", "def test__compute_tolerance_distance():\n classifier = classifier_module.Classifier(None)\n L1 = [11.2, 41.43, 1.33]\n L2 = [10.9, 41.45, 1.34]\n L3 = [12.0, 41.4412, 1.001]\n L4 = [11.3, 41.15, 1.12]\n L5 = [11.223, 41.0, 1.31]\n AL = [L1, L2, L3, L4, L5]\n symbol = \"a\"\n classifier._compute_tolerance_distance(AL, symbol)\n tolerance_distance_path = \\\n classifier_module.Classifier._get_file_path( \\\n classifier.files[classifier_module.DISTANCE_TOLERANCE_FILE], symbol)\n file_with_tolerance_distance = \\\n open(tolerance_distance_path, 'r')\n tolerance_distance = float(file_with_tolerance_distance.readline())\n file_with_tolerance_distance.close()\n assert fabs(tolerance_distance - 0.5506099238118276) < epsilon", "def test_distance_indices(self):\n s1 = self.RNA(\"AUGC\")\n s2 = self.RNA(\"AAGC\")\n\n def f(x, y):\n if x == 2 or y == 2:\n return 10\n return 0\n\n self.assertEqual(s1.distance(s2, f, use_indices=True), 20)", "def test_distance_aba(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ABC'), '9')", "def test_dist_itslef(self):\n X = [[0, 10], [4, 2]] # Just some points. I've no idea where on globe.\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))\n\n X = [[34.0522, 118.2437], # Lon Angeles\n [37.7749, 122.4194]] # San Francisco\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))", "def test_EstimateDistances(self):\n d = EstimateDistances(self.al, JC69())\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n # excercise writing to file\n d.writeToFile('junk.txt')\n try:\n os.remove('junk.txt')\n except OSError:\n pass # probably parallel", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def test_dice_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score12 == 2 * 2/6, \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def test_equals_distance_buildings():\n for i in range(building_count):\n for j in range(building_count):\n if i == j:\n continue\n rust_result = rust_force.calculate_distance_between_two_buildings(\n rust_buildings[i], rust_buildings[j])\n python_result = calculate_distance_between_two_buildings(\n python_figures[i], python_figures[j], python_positions[i], python_positions[j])\n assert rust_result == python_result", "def test_distance_ad(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AD'), '5')", "def test_distance_aed(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AED'), 'NO SUCH ROUTE')", "def test_distances(self):\n distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances('dog.n.01')\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def test_Euclidian_distances(self):\n \n\t\tm1 = models.vgg11()\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2, method=EUCLIDEAN)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 30.2\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\t# biased not implemented yet in layers\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.00\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)", "def test_statistic(self):\n for seed in range(5):\n\n random_state = np.random.RandomState(seed)\n\n for i in range(4, self.test_max_size + 1):\n arr1 = random_state.rand(i, 1)\n arr2 = random_state.rand(i, 1)\n\n stat = dcor_internals._distance_correlation_sqr_naive(\n arr1, arr2)\n stat_fast = dcor_internals._distance_correlation_sqr_fast(\n arr1, arr2)\n\n self.assertAlmostEqual(stat, stat_fast)", "def test_distance_adc(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ADC'), '13')" ]
[ "0.7066497", "0.6987544", "0.65998495", "0.65983224", "0.6521844", "0.6477065", "0.645209", "0.6424145", "0.63212407", "0.6281343", "0.62577516", "0.62530804", "0.6249498", "0.6235426", "0.61601436", "0.61583465", "0.61443377", "0.6097818", "0.6092339", "0.6088725", "0.6084576", "0.60606205", "0.5996007", "0.59937054", "0.59816414", "0.5967202", "0.5925537", "0.58987683", "0.5895447", "0.5854488" ]
0.7764714
0
Verifies that the edge is correctly drawn
def testDrawEdge(self): (w,h) = self.im8_1.getSize() for thick in range(10): self.im8_1.reset() drawEdge(self.im8_1, thick) self.im8_3.fill(255) drawSquare(self.im8_3, (thick, thick, w-1-thick, h-1-thick), 0) (x,y) = compare(self.im8_1, self.im8_3, self.im8_2) self.assertTrue(x<0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True", "def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True", "def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True", "def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True", "def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.bottom >= screen_rect.bottom or self.rect.top <= -1:\n\t\t\treturn True", "def test_edges(self):\n\n edge_list = self.g.edges()\n self.assertEqual(42, len(edge_list))\n\n # p1 p3 and p3 p1 are valid edges\n t1 = ('p1', 'p3')\n self.assertTrue(t1 in edge_list)\n\n t2 = ('p3', 'p1')\n self.assertTrue(t2 in edge_list)\n\n made_up = ('z1', 'q123')\n self.assertFalse(made_up in edge_list)\n\n return None", "def isEdge(self,x,y):\r\n return self.matr[x][y]", "def test_case20(self):\n\n result = self.graph1.isEdge(\"supervisor1\",\"student5\")\n self.assertFalse(result)", "def test_case19(self):\n\n result = self.graph1.isEdge(\"supervisor5\",\"student1\")\n self.assertFalse(result)", "def test_case21(self):\n\n result= self.graph1.isEdge(\"supervisor2\",\"student1\")\n\n self.assertFalse(result)", "def isEdge(self,x,y):\n\t\treturn self._matr[x][y]", "def test_case22(self):\n \n result = self.graph1.isEdge(\"supervisor1\",\"student1\")\n\n self.assertTrue(result)", "def IsEdge(self, p_int, p_int_1):\n ...", "def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]", "def check_edges(self):\n start = int(input('Enter start vertex: '))\n end = int(input('Enter end vertex: '))\n if self._graph.is_edge_between(start, end):\n print('There is an edge from ' + str(start) + ' to ' + str(end))\n else:\n print('There is NO edge from ' + str(start) + ' to ' + str(end))", "def check_regularity(edges):\n for a, b in edges:\n counter_a = 0\n counter_b = 0\n for x, y in edges:\n if a == x or a == y:\n counter_a += 1\n if b == x or b == y:\n counter_b += 1\n assert (counter_a > 0) and (counter_b > 0)\n if (counter_a == 1) or (counter_b == 1):\n raise Exception(\"Boundary is not closed.\")\n if (counter_a > 2) or (counter_b > 2):\n raise Exception(\"More than two edges share a node.\")", "def validate_graph(self) -> bool:\n return True", "def draw_edges(self):\n pass", "def valid_ray(self, row, col):\n # if row nor col is at an edge space, returns False\n if row != 0 and row != 9 and col != 0 and col != 9:\n return False\n # ensures no corner spaces have been selected\n if row == 0 or row == 9:\n if col > 8 or col < 1:\n return False\n if col == 0 or col == 9:\n if row > 8 or row < 1:\n return False\n return True", "def validate(self):\r\n if self.eid is None:\r\n if self._inV is None:\r\n raise ValidationError('in vertex must be set before saving new edges')\r\n if self._outV is None:\r\n raise ValidationError('out vertex must be set before saving new edges')\r\n super(Edge, self).validate()", "def test_isobutene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n image.add_line((500, 400), (587, 450))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]],\n [[587, 450]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]],\n [[500, 400, 587, 450]]\n ])\n )", "def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def isEdge(self, x, y):\n return y in self._dictOut[x]", "def check_corners(self, vertices, corners):\n assert_allclose(vertices['ul'], corners[0])\n assert_allclose(vertices['ur'], corners[1])\n assert_allclose(vertices['lr'], corners[2])\n assert_allclose(vertices['ll'], corners[3])", "def edge_ground(X):\n gradient_x = img_conv(X, kernel_sobel_x)\n gradient_y = img_conv(X, kernel_sobel_x.transpose())\n mag = (gradient_x ** 2.0 + gradient_y ** 2.0) ** 0.5\n is_edge = mag > 1.0\n return is_edge.astype('f')", "def checkInBound(self,value,checkEdge):\n assert(checkEdge==0 or checkEdge==1)\n if checkEdge==0: # width\n assert(value>=0 and value<self.w)\n else:\n assert(value>=0 and value<self.h)", "def isEdge(self,x,y):\n\t\treturn y in self._dict[x]" ]
[ "0.697433", "0.68805933", "0.6687636", "0.6635503", "0.66265905", "0.65967", "0.6589389", "0.6577165", "0.65077263", "0.64738965", "0.6442234", "0.64329076", "0.64071053", "0.636453", "0.635672", "0.6261035", "0.6224357", "0.6167584", "0.608596", "0.6079706", "0.60442454", "0.6037041", "0.6035435", "0.6030654", "0.6019541", "0.6016916", "0.60060376", "0.6", "0.5999982", "0.59650826" ]
0.76946634
0
Verifies the saturated (ceiling) addition for 32bit images
def testCeilingAdd(self): (w,h) = self.im32_1.getSize() self.im32_1.fill(0xffffff80) for i in range(256): self.im32_3.fill(i) ceilingAdd(self.im32_1, self.im32_3, self.im32_2) vol = computeVolume(self.im32_2)//(w*h) value = min(0xffffff80+i, 0xffffffff) self.assertTrue(vol==value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testCeilingAddConst(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0xffffff80)\n for i in range(256):\n ceilingAddConst(self.im32_1, i, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = min(0xffffff80+i, 0xffffffff)\n self.assertTrue(vol==value)", "def test_bit_add_overflow_saturate(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_SATURATE, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([255] * 5)\n assert bins[self.five_255_bin] == expected_result", "def conditional_guard(src, dst):\n int64_count = 0\n float64_count = 0\n float16_count = 0\n if src in int64_types or dst in int64_types:\n int64_count = 1\n if src in float64_types or dst in float64_types:\n float64_count = 1\n if src in float16_types or dst in float16_types:\n float16_count = 1\n if float16_count > 0:\n print(\"#ifdef cl_khr_fp16\")\n if float64_count > 0:\n #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be\n print(\"#ifdef cl_khr_fp64\")\n return 1 + float16_count\n elif int64_count > 0:\n print(\"#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)\")\n return 1 + float16_count\n return float16_count", "def needspadding(self):\n return self.datasize % 2 != 0", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def testMulRealConst(self):\n self.im8_1.fill(1)\n \n self.im8_3.fill(1)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=False)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_3.fill(2)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=True)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(10)\n self.im8_3.fill(15)\n mulRealConst(self.im8_1, 1.5, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im32_1.fill(1000)\n self.im32_3.fill(1500)\n mulRealConst(self.im32_1, 1.5, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(200)\n self.im8_3.fill(255)\n self.im32_3.fill(260)\n mulRealConst(self.im8_1, 1.3, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n mulRealConst(self.im8_1, 1.3, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)", "def test_bit_add_overflow_wrap(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def _verify(self, dimension):\n value = self.texture.__getattribute__(dimension)\n while value > 1:\n div_float = float(value) / 2.0\n div_int = int(div_float)\n if not (div_float == div_int):\n raise Exception('image %s is %d, which is not a power of 2' % (\n dimension, self.texture.__getattribute__(dimension)))\n value = div_int", "def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0", "def test_bigSum():\n\n assert bigSum() == 20000000100000000", "def testFloorSubConst(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0x80)\n for i in range(256):\n floorSubConst(self.im32_1, i, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = max(0x80-i, 0)\n self.assertTrue(vol==value)", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def longdouble_lte_float64():\n return np.longdouble(2**53) == np.longdouble(2**53) + 1", "def testCeiling(self):\n\t\tself.assertTrue(calcFlopWins(2, 3) <= 1.0)", "def _cutoff32(value):\n return value % 0x100000000", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def is_perfect_square():", "def test_bit_add_bit_size_out_of_range(self):\n ops = [bitwise_operations.bit_add(self.test_bin_zeroes, 0, 41, 1, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def less_compare_float32(data_x, data_y):\n shape_inputs = get_shape(data_x)\n # minimun num of float32 2**(-126)\n data_min = akg.lang.ascend.broadcast(tvm.const(2**(-126), dtype=\"float32\"), shape_inputs, \"float32\")\n data_zero = akg.lang.ascend.broadcast(dc.zero_const(\"float32\"), shape_inputs, \"float32\")\n res_sub = topi.subtract(data_y, data_x)\n res_min = topi.minimum(res_sub, data_min)\n res_max = topi.maximum(res_min, data_zero)\n # max num of float32 is 2**126\n # but cce can only support 2**62, so use 62 * 62 * 2 to adaptor 126\n res_mul_fierst = topi.multiply(res_max, tvm.const(2**62, dtype=\"float32\"))\n res_mul_second = topi.multiply(res_mul_fierst, tvm.const(2**62, dtype=\"float32\"))\n res = topi.multiply(res_mul_second, tvm.const(2**2, dtype=\"float32\"))\n\n return res", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def is_32_bit_int_compatible( i ) :\r\n\r\n #\r\n # can we convert the input to an integer value ? \r\n #\r\n\r\n try : \r\n\r\n ## debug:\r\n ## print \"compat: \", i, truncate_pyint_to_i32_interval(i)\r\n\r\n if i == truncate_pyint_to_i32_interval( i ) : \r\n ## # debug \r\n ## print \"compat: TRUE\", i, truncate_pyint_to_i32_interval(i)\r\n return True\r\n\r\n except :\r\n\r\n ## # debug : \r\n ## print \"compat: FALSE\", i, truncate_pyint_to_i32_interval(i)\r\n pass\r\n \r\n # else ... \r\n return False", "def test_bit_add_overflow_fail(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def test_bit_add_bit_size_signed(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 254, True, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([253] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)", "def uInt32Compatible(integer):\n return integer < 0x100000000 and integer >= 0", "def extEucBin(x, y):\n g = 1\n while isEven(x) and isEven(y):\n x, y, g = x >> 1, y >> 1, g << 1\n u, v, A, B, C, D = x, y, 1, 0, 0, 1\n while True:\n while isEven(u):\n u >>= 1\n if isEven(A) and isEven(B):\n A, B = A >> 1, B >> 1\n else:\n A, B = (A + y) >> 1, (B - x) >> 1\n while isEven(v):\n v >>= 1\n if isEven(C) and isEven(D):\n C, D = C >> 1, D >> 1\n else:\n C, D = (C + y) >> 1, (D - x) >> 1\n if u >= v:\n u, A, B = u - v, A - C, B - D\n else:\n v, C, D = v - u, C - A, D - B\n if u == 0:\n return (C, D, g * v)", "def check_bc(R,D,p,mn):\n R -= (R/D>=p)*np.floor(R/D)*D\n R -= (R/D<=mn)*np.floor(R/D)*D\n return R", "def testFloorSub(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0x80)\n for i in range(256):\n self.im32_3.fill(i)\n floorSub(self.im32_1, self.im32_3, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = max(0x80-i, 0)\n self.assertTrue(vol==value, \"%d: %d %d\" % (i,vol, value))", "def _correct_images(images):\n # From the MNIST website: \"Pixels are organized row-wise. Pixel values are 0 to 255. 0 means\n # background (white), 255 means foreground (black).\"\n # The dataset does not transform the image such that 255 is black, so do that here.\n dtype = _assert_dtype(images)\n max_val = 255 if dtype == dtypes.uint8 else 1.0\n return max_val - images" ]
[ "0.6719554", "0.58146805", "0.56908935", "0.5687286", "0.5670073", "0.5638138", "0.5523233", "0.5491617", "0.5490418", "0.5481001", "0.5478708", "0.5466016", "0.5438659", "0.54138035", "0.5375235", "0.5356348", "0.5356348", "0.5345921", "0.5326352", "0.53262323", "0.53206027", "0.5293966", "0.5271111", "0.52705616", "0.52468956", "0.5230261", "0.5189829", "0.5174068", "0.51695395", "0.5143433" ]
0.70995
0
Verifies the saturated (ceiling) constant addition for 32bit images
def testCeilingAddConst(self): (w,h) = self.im32_1.getSize() self.im32_1.fill(0xffffff80) for i in range(256): ceilingAddConst(self.im32_1, i, self.im32_2) vol = computeVolume(self.im32_2)//(w*h) value = min(0xffffff80+i, 0xffffffff) self.assertTrue(vol==value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testCeilingAdd(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0xffffff80)\n for i in range(256):\n self.im32_3.fill(i)\n ceilingAdd(self.im32_1, self.im32_3, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = min(0xffffff80+i, 0xffffffff)\n self.assertTrue(vol==value)", "def testMulRealConst(self):\n self.im8_1.fill(1)\n \n self.im8_3.fill(1)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=False)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_3.fill(2)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=True)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(10)\n self.im8_3.fill(15)\n mulRealConst(self.im8_1, 1.5, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im32_1.fill(1000)\n self.im32_3.fill(1500)\n mulRealConst(self.im32_1, 1.5, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(200)\n self.im8_3.fill(255)\n self.im32_3.fill(260)\n mulRealConst(self.im8_1, 1.3, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n mulRealConst(self.im8_1, 1.3, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)", "def conditional_guard(src, dst):\n int64_count = 0\n float64_count = 0\n float16_count = 0\n if src in int64_types or dst in int64_types:\n int64_count = 1\n if src in float64_types or dst in float64_types:\n float64_count = 1\n if src in float16_types or dst in float16_types:\n float16_count = 1\n if float16_count > 0:\n print(\"#ifdef cl_khr_fp16\")\n if float64_count > 0:\n #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be\n print(\"#ifdef cl_khr_fp64\")\n return 1 + float16_count\n elif int64_count > 0:\n print(\"#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)\")\n return 1 + float16_count\n return float16_count", "def testItConstantAdd(self):\n\n\t\toutput = calc.it(self.Z,self.c,self.max_iteration)\n\t\tnotError = (self.I == output).all()\n\t\tself.assertEqual(True, notError)", "def test_bit_add_overflow_saturate(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_SATURATE, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([255] * 5)\n assert bins[self.five_255_bin] == expected_result", "def testFloorSubConst(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0x80)\n for i in range(256):\n floorSubConst(self.im32_1, i, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = max(0x80-i, 0)\n self.assertTrue(vol==value)", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def needspadding(self):\n return self.datasize % 2 != 0", "def _cutoff32(value):\n return value % 0x100000000", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def test_asymptotic_32():\r\n\r\n #TODO: consider adding the optimization of crossentropy into the current\r\n # mode for the purpose of running this test\r\n\r\n for dtype in 'float32', 'float64':\r\n if dtype == 'float32':\r\n x = tensor.fmatrix()\r\n x2 = tensor.fvector()\r\n else:\r\n x = tensor.dmatrix()\r\n x2 = tensor.dvector()\r\n y = tensor.lvector()\r\n\r\n c = categorical_crossentropy(softmax(x + x2), y)\r\n f = theano.function([x, y, x2], [c.sum(),\r\n tensor.grad(c.sum(), x)], mode='FAST_RUN')\r\n if 0:\r\n for i, n in enumerate(f.maker.fgraph.toposort()):\r\n print i, n\r\n\r\n xval = numpy.zeros((5, 5), dtype=dtype).astype(dtype)\r\n x2val = numpy.zeros(5, dtype=xval.dtype).astype(dtype)\r\n for i in xrange(100):\r\n cval, gxval = f(xval, numpy.arange(5), x2val)\r\n xval -= 100.3 * gxval\r\n #print cval, gxval\r\n assert cval == 0 # no problem going to zero error\r\n\r\n #what about when x gets really big?\r\n\r\n xval = numpy.zeros((5, 5), dtype=dtype)\r\n x2val = numpy.zeros(5, dtype=xval.dtype)\r\n for i in xrange(100):\r\n\r\n cval, gxval = f(xval, numpy.arange(5), x2val)\r\n xval += 100000.3 * gxval\r\n #print cval, gxval\r\n\r\n assert cval > 61750000\r\n assert gxval[0, 0] == -1.0\r\n assert gxval[0, 1] == 0.25", "def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0", "def less_compare_float32(data_x, data_y):\n shape_inputs = get_shape(data_x)\n # minimun num of float32 2**(-126)\n data_min = akg.lang.ascend.broadcast(tvm.const(2**(-126), dtype=\"float32\"), shape_inputs, \"float32\")\n data_zero = akg.lang.ascend.broadcast(dc.zero_const(\"float32\"), shape_inputs, \"float32\")\n res_sub = topi.subtract(data_y, data_x)\n res_min = topi.minimum(res_sub, data_min)\n res_max = topi.maximum(res_min, data_zero)\n # max num of float32 is 2**126\n # but cce can only support 2**62, so use 62 * 62 * 2 to adaptor 126\n res_mul_fierst = topi.multiply(res_max, tvm.const(2**62, dtype=\"float32\"))\n res_mul_second = topi.multiply(res_mul_fierst, tvm.const(2**62, dtype=\"float32\"))\n res = topi.multiply(res_mul_second, tvm.const(2**2, dtype=\"float32\"))\n\n return res", "def longdouble_lte_float64():\n return np.longdouble(2**53) == np.longdouble(2**53) + 1", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def test_bigSum():\n\n assert bigSum() == 20000000100000000", "def test_bit_add_overflow_wrap(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def _verify(self, dimension):\n value = self.texture.__getattribute__(dimension)\n while value > 1:\n div_float = float(value) / 2.0\n div_int = int(div_float)\n if not (div_float == div_int):\n raise Exception('image %s is %d, which is not a power of 2' % (\n dimension, self.texture.__getattribute__(dimension)))\n value = div_int", "def test_bit_add_bit_size_out_of_range(self):\n ops = [bitwise_operations.bit_add(self.test_bin_zeroes, 0, 41, 1, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def check_bc(R,D,p,mn):\n R -= (R/D>=p)*np.floor(R/D)*D\n R -= (R/D<=mn)*np.floor(R/D)*D\n return R", "def testMulRealConstDepthAcceptance(self):\n self.assertRaises(MambaError, mulRealConst, self.im1_1, 1.0, self.im8_2)\n self.assertRaises(MambaError, mulRealConst, self.im32_1, 1.0, self.im1_2)", "def testCeiling(self):\n\t\tself.assertTrue(calcFlopWins(2, 3) <= 1.0)", "def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)", "def is_32_bit_int_compatible( i ) :\r\n\r\n #\r\n # can we convert the input to an integer value ? \r\n #\r\n\r\n try : \r\n\r\n ## debug:\r\n ## print \"compat: \", i, truncate_pyint_to_i32_interval(i)\r\n\r\n if i == truncate_pyint_to_i32_interval( i ) : \r\n ## # debug \r\n ## print \"compat: TRUE\", i, truncate_pyint_to_i32_interval(i)\r\n return True\r\n\r\n except :\r\n\r\n ## # debug : \r\n ## print \"compat: FALSE\", i, truncate_pyint_to_i32_interval(i)\r\n pass\r\n \r\n # else ... \r\n return False", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_bit_add_bit_size_signed(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 254, True, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([253] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def is_perfect_square():", "def uInt32Compatible(integer):\n return integer < 0x100000000 and integer >= 0", "def test_bit_add_overflow_fail(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)" ]
[ "0.7092009", "0.60995674", "0.5991026", "0.57602704", "0.575369", "0.57189256", "0.5710188", "0.5662257", "0.5640272", "0.5598037", "0.5539307", "0.55210054", "0.54802877", "0.54787415", "0.54771805", "0.54771805", "0.54765064", "0.54759747", "0.54268837", "0.5373671", "0.53585124", "0.5338456", "0.5293783", "0.5283739", "0.52819145", "0.52817297", "0.5274498", "0.52396524", "0.5230143", "0.52230006" ]
0.7191471
0
Verifies the saturated (floor) subtraction for 32bit images
def testFloorSub(self): (w,h) = self.im32_1.getSize() self.im32_1.fill(0x80) for i in range(256): self.im32_3.fill(i) floorSub(self.im32_1, self.im32_3, self.im32_2) vol = computeVolume(self.im32_2)//(w*h) value = max(0x80-i, 0) self.assertTrue(vol==value, "%d: %d %d" % (i,vol, value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testFloorSubConst(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0x80)\n for i in range(256):\n floorSubConst(self.im32_1, i, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = max(0x80-i, 0)\n self.assertTrue(vol==value)", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0", "def test_bit_subtract_overflow_saturate(self):\n ops = [\n bitwise_operations.bit_subtract(self.test_bin_ones, 0, 8, 255, False, aerospike.BIT_OVERFLOW_SATURATE, None)\n ]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([255] * 5)\n assert bins[self.five_255_bin] == expected_result", "def testCeilingAdd(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0xffffff80)\n for i in range(256):\n self.im32_3.fill(i)\n ceilingAdd(self.im32_1, self.im32_3, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = min(0xffffff80+i, 0xffffffff)\n self.assertTrue(vol==value)", "def less_compare_float32(data_x, data_y):\n shape_inputs = get_shape(data_x)\n # minimun num of float32 2**(-126)\n data_min = akg.lang.ascend.broadcast(tvm.const(2**(-126), dtype=\"float32\"), shape_inputs, \"float32\")\n data_zero = akg.lang.ascend.broadcast(dc.zero_const(\"float32\"), shape_inputs, \"float32\")\n res_sub = topi.subtract(data_y, data_x)\n res_min = topi.minimum(res_sub, data_min)\n res_max = topi.maximum(res_min, data_zero)\n # max num of float32 is 2**126\n # but cce can only support 2**62, so use 62 * 62 * 2 to adaptor 126\n res_mul_fierst = topi.multiply(res_max, tvm.const(2**62, dtype=\"float32\"))\n res_mul_second = topi.multiply(res_mul_fierst, tvm.const(2**62, dtype=\"float32\"))\n res = topi.multiply(res_mul_second, tvm.const(2**2, dtype=\"float32\"))\n\n return res", "def check_image_invert(image_data, border_width = 30):\n \n _, avg_intensity_borders, avg_intensity_inside = \\\n _auto_threshold_borders(image_data,border_width = border_width);\n \n # if image borders are darker than the mean image, it's a surface tension\n # image:\n if(avg_intensity_inside > avg_intensity_borders):\n return False;\n # else, it's a shadowgraph image:\n else:\n return True;", "def testCeilingAddConst(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0xffffff80)\n for i in range(256):\n ceilingAddConst(self.im32_1, i, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = min(0xffffff80+i, 0xffffffff)\n self.assertTrue(vol==value)", "def _verify(self, dimension):\n value = self.texture.__getattribute__(dimension)\n while value > 1:\n div_float = float(value) / 2.0\n div_int = int(div_float)\n if not (div_float == div_int):\n raise Exception('image %s is %d, which is not a power of 2' % (\n dimension, self.texture.__getattribute__(dimension)))\n value = div_int", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def conditional_guard(src, dst):\n int64_count = 0\n float64_count = 0\n float16_count = 0\n if src in int64_types or dst in int64_types:\n int64_count = 1\n if src in float64_types or dst in float64_types:\n float64_count = 1\n if src in float16_types or dst in float16_types:\n float16_count = 1\n if float16_count > 0:\n print(\"#ifdef cl_khr_fp16\")\n if float64_count > 0:\n #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be\n print(\"#ifdef cl_khr_fp64\")\n return 1 + float16_count\n elif int64_count > 0:\n print(\"#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)\")\n return 1 + float16_count\n return float16_count", "def canonical_reordering_sign_euclidean(bitmap_a, bitmap_b):\n a = bitmap_a >> 1\n sum_value = 0\n while a != 0:\n sum_value = sum_value + count_set_bits(a & bitmap_b)\n a = a >> 1\n if (sum_value & 1) == 0:\n return 1\n else:\n return -1", "def test_bit_subtract_overflow_wrap(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([254] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def test_bit_subtract_bit_size_signed(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 0, 8, 156, True, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([99] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def __hsl_threshold(input, hue, sat, lum):\r\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\r\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "def test_saturation_mixing_ratio():\n p = 999. * units.mbar\n t = 288. * units.kelvin\n assert_almost_equal(saturation_mixing_ratio(p, t), .01068, 3)", "def __hsl_threshold(input, hue, sat, lum):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "def _cutoff32(value):\n return value % 0x100000000", "def test_bit_subtract_inbetween_bytes(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 4, 8, 255, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([240] * 1 + [15] * 1 + [255] * 3)\n assert bins[self.five_255_bin] == expected_result", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def test_filter_image():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img0 = cv2.imread(str(f_image))\n img1 = model(f_image)\n\n diff = (img0 - img1).sum()\n\n assert abs(diff) > 0", "def testMulRealConst(self):\n self.im8_1.fill(1)\n \n self.im8_3.fill(1)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=False)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_3.fill(2)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=True)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(10)\n self.im8_3.fill(15)\n mulRealConst(self.im8_1, 1.5, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im32_1.fill(1000)\n self.im32_3.fill(1500)\n mulRealConst(self.im32_1, 1.5, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(200)\n self.im8_3.fill(255)\n self.im32_3.fill(260)\n mulRealConst(self.im8_1, 1.3, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n mulRealConst(self.im8_1, 1.3, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)", "def diff_image_feature(image0, image1):\n return 0", "def _correct_images(images):\n # From the MNIST website: \"Pixels are organized row-wise. Pixel values are 0 to 255. 0 means\n # background (white), 255 means foreground (black).\"\n # The dataset does not transform the image such that 255 is black, so do that here.\n dtype = _assert_dtype(images)\n max_val = 255 if dtype == dtypes.uint8 else 1.0\n return max_val - images", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_compute_image_sharpness(self):\n yuv_full_scale = 1023.0\n chart_file = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules',\n 'its', 'test_images', 'ISO12233.png')\n chart = cv2.imread(chart_file, cv2.IMREAD_ANYDEPTH)\n white_level = numpy.amax(chart).astype(float)\n sharpness = {}\n for j in [2, 4, 8]:\n blur = cv2.blur(chart, (j, j))\n blur = blur[:, :, numpy.newaxis]\n sharpness[j] = (yuv_full_scale *\n its.image.compute_image_sharpness(blur /\n white_level))\n self.assertTrue(numpy.isclose(sharpness[2]/sharpness[4],\n numpy.sqrt(2), atol=0.1))\n self.assertTrue(numpy.isclose(sharpness[4]/sharpness[8],\n numpy.sqrt(2), atol=0.1))", "def valid_pixel_coordinates(u, v, IMAGE_HEIGHT, IMAGE_WIDTH):\n return (u >= 0 and v >= 0 and v < IMAGE_HEIGHT and u < IMAGE_WIDTH)", "def check_for_inversion(cxr_img_norm):\n\n # Define image rim\n rim_thickness = max(np.shape(cxr_img_norm)) // 20\n rim_array = [\n list(cxr_img_norm[:rim_thickness, :].flatten()),\n list(cxr_img_norm[:, :rim_thickness].flatten()),\n list(cxr_img_norm[-rim_thickness:, :].flatten()),\n list(cxr_img_norm[:, -rim_thickness:].flatten())]\n\n rim_list = [pixel for rim in rim_array for pixel in rim]\n\n # Compare mean of rim to mean of whole image\n img_mean = np.mean(cxr_img_norm)\n rim_mean = np.mean(np.array(rim_list))\n\n inversion_check = (rim_mean > img_mean)\n\n return inversion_check" ]
[ "0.6624845", "0.6108797", "0.5919725", "0.5898519", "0.5795399", "0.5761699", "0.5750941", "0.5718273", "0.57015765", "0.5672458", "0.55940855", "0.5587078", "0.55136657", "0.5511485", "0.5511485", "0.5492086", "0.545562", "0.54514277", "0.54314", "0.54248077", "0.5393601", "0.5391192", "0.5387793", "0.53828937", "0.53631", "0.53606486", "0.53473204", "0.5345267", "0.5343868", "0.5332805" ]
0.6482305
1
Verifies the saturated (floor) constant subtraction for 32bit images
def testFloorSubConst(self): (w,h) = self.im32_1.getSize() self.im32_1.fill(0x80) for i in range(256): floorSubConst(self.im32_1, i, self.im32_2) vol = computeVolume(self.im32_2)//(w*h) value = max(0x80-i, 0) self.assertTrue(vol==value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def testCeilingAddConst(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0xffffff80)\n for i in range(256):\n ceilingAddConst(self.im32_1, i, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = min(0xffffff80+i, 0xffffffff)\n self.assertTrue(vol==value)", "def testFloorSub(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0x80)\n for i in range(256):\n self.im32_3.fill(i)\n floorSub(self.im32_1, self.im32_3, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = max(0x80-i, 0)\n self.assertTrue(vol==value, \"%d: %d %d\" % (i,vol, value))", "def less_compare_float32(data_x, data_y):\n shape_inputs = get_shape(data_x)\n # minimun num of float32 2**(-126)\n data_min = akg.lang.ascend.broadcast(tvm.const(2**(-126), dtype=\"float32\"), shape_inputs, \"float32\")\n data_zero = akg.lang.ascend.broadcast(dc.zero_const(\"float32\"), shape_inputs, \"float32\")\n res_sub = topi.subtract(data_y, data_x)\n res_min = topi.minimum(res_sub, data_min)\n res_max = topi.maximum(res_min, data_zero)\n # max num of float32 is 2**126\n # but cce can only support 2**62, so use 62 * 62 * 2 to adaptor 126\n res_mul_fierst = topi.multiply(res_max, tvm.const(2**62, dtype=\"float32\"))\n res_mul_second = topi.multiply(res_mul_fierst, tvm.const(2**62, dtype=\"float32\"))\n res = topi.multiply(res_mul_second, tvm.const(2**2, dtype=\"float32\"))\n\n return res", "def testMulRealConst(self):\n self.im8_1.fill(1)\n \n self.im8_3.fill(1)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=False)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_3.fill(2)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=True)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(10)\n self.im8_3.fill(15)\n mulRealConst(self.im8_1, 1.5, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im32_1.fill(1000)\n self.im32_3.fill(1500)\n mulRealConst(self.im32_1, 1.5, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(200)\n self.im8_3.fill(255)\n self.im32_3.fill(260)\n mulRealConst(self.im8_1, 1.3, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n mulRealConst(self.im8_1, 1.3, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)", "def testCeilingAdd(self):\n (w,h) = self.im32_1.getSize()\n \n self.im32_1.fill(0xffffff80)\n for i in range(256):\n self.im32_3.fill(i)\n ceilingAdd(self.im32_1, self.im32_3, self.im32_2)\n vol = computeVolume(self.im32_2)//(w*h)\n value = min(0xffffff80+i, 0xffffffff)\n self.assertTrue(vol==value)", "def _cutoff32(value):\n return value % 0x100000000", "def conditional_guard(src, dst):\n int64_count = 0\n float64_count = 0\n float16_count = 0\n if src in int64_types or dst in int64_types:\n int64_count = 1\n if src in float64_types or dst in float64_types:\n float64_count = 1\n if src in float16_types or dst in float16_types:\n float16_count = 1\n if float16_count > 0:\n print(\"#ifdef cl_khr_fp16\")\n if float64_count > 0:\n #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be\n print(\"#ifdef cl_khr_fp64\")\n return 1 + float16_count\n elif int64_count > 0:\n print(\"#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)\")\n return 1 + float16_count\n return float16_count", "def test_bit_subtract_overflow_saturate(self):\n ops = [\n bitwise_operations.bit_subtract(self.test_bin_ones, 0, 8, 255, False, aerospike.BIT_OVERFLOW_SATURATE, None)\n ]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([255] * 5)\n assert bins[self.five_255_bin] == expected_result", "def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0", "def _verify(self, dimension):\n value = self.texture.__getattribute__(dimension)\n while value > 1:\n div_float = float(value) / 2.0\n div_int = int(div_float)\n if not (div_float == div_int):\n raise Exception('image %s is %d, which is not a power of 2' % (\n dimension, self.texture.__getattribute__(dimension)))\n value = div_int", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def test_saturation_mixing_ratio():\n p = 999. * units.mbar\n t = 288. * units.kelvin\n assert_almost_equal(saturation_mixing_ratio(p, t), .01068, 3)", "def check_bc(R,D,p,mn):\n R -= (R/D>=p)*np.floor(R/D)*D\n R -= (R/D<=mn)*np.floor(R/D)*D\n return R", "def canonical_reordering_sign_euclidean(bitmap_a, bitmap_b):\n a = bitmap_a >> 1\n sum_value = 0\n while a != 0:\n sum_value = sum_value + count_set_bits(a & bitmap_b)\n a = a >> 1\n if (sum_value & 1) == 0:\n return 1\n else:\n return -1", "def test_central(self):\n x = np.array([-100, -2, 0, 0, 1, 1.1])\n self.assertEqual(npinterval.half_sample_mode(x), 0)", "def test_bit_subtract_bit_size_signed(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 0, 8, 156, True, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([99] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def check_image_invert(image_data, border_width = 30):\n \n _, avg_intensity_borders, avg_intensity_inside = \\\n _auto_threshold_borders(image_data,border_width = border_width);\n \n # if image borders are darker than the mean image, it's a surface tension\n # image:\n if(avg_intensity_inside > avg_intensity_borders):\n return False;\n # else, it's a shadowgraph image:\n else:\n return True;", "def test_bit_subtract_overflow_wrap(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([254] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_bit_subtract_inbetween_bytes(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 4, 8, 255, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([240] * 1 + [15] * 1 + [255] * 3)\n assert bins[self.five_255_bin] == expected_result", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def __hsl_threshold(input, hue, sat, lum):\r\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\r\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "def valid_pixel_coordinates(u, v, IMAGE_HEIGHT, IMAGE_WIDTH):\n return (u >= 0 and v >= 0 and v < IMAGE_HEIGHT and u < IMAGE_WIDTH)", "def is_perfect_square():", "def __hsl_threshold(input, hue, sat, lum):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def check_for_inversion(cxr_img_norm):\n\n # Define image rim\n rim_thickness = max(np.shape(cxr_img_norm)) // 20\n rim_array = [\n list(cxr_img_norm[:rim_thickness, :].flatten()),\n list(cxr_img_norm[:, :rim_thickness].flatten()),\n list(cxr_img_norm[-rim_thickness:, :].flatten()),\n list(cxr_img_norm[:, -rim_thickness:].flatten())]\n\n rim_list = [pixel for rim in rim_array for pixel in rim]\n\n # Compare mean of rim to mean of whole image\n img_mean = np.mean(cxr_img_norm)\n rim_mean = np.mean(np.array(rim_list))\n\n inversion_check = (rim_mean > img_mean)\n\n return inversion_check", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass" ]
[ "0.6310723", "0.62938845", "0.62113756", "0.5969369", "0.59030825", "0.5886565", "0.58667827", "0.58567953", "0.5739565", "0.57389843", "0.57019556", "0.5626554", "0.55466545", "0.55359733", "0.5475242", "0.54310983", "0.5407184", "0.54054695", "0.53816265", "0.53612334", "0.53420126", "0.53202605", "0.5284511", "0.5273144", "0.5270601", "0.52666533", "0.52561104", "0.5252852", "0.52364546", "0.52364546" ]
0.6950812
0
Verifies that mulRealConst refuses binary input images
def testMulRealConstDepthAcceptance(self): self.assertRaises(MambaError, mulRealConst, self.im1_1, 1.0, self.im8_2) self.assertRaises(MambaError, mulRealConst, self.im32_1, 1.0, self.im1_2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_on_merlin_image_binary(self):\n im = diffread(TEST_MIB)\n self.assertEqual(im.shape, (256, 256))\n self.assertEqual(im.dtype, np.dtype(\">u2\"))", "def testMulRealConst(self):\n self.im8_1.fill(1)\n \n self.im8_3.fill(1)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=False)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_3.fill(2)\n mulRealConst(self.im8_1, 1.6, self.im8_2, nearest=True)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(10)\n self.im8_3.fill(15)\n mulRealConst(self.im8_1, 1.5, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n \n self.im32_1.fill(1000)\n self.im32_3.fill(1500)\n mulRealConst(self.im32_1, 1.5, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)\n \n self.im8_1.fill(200)\n self.im8_3.fill(255)\n self.im32_3.fill(260)\n mulRealConst(self.im8_1, 1.3, self.im8_2)\n (x,y) = compare(self.im8_3, self.im8_2, self.im8_3)\n self.assertTrue(x<0)\n mulRealConst(self.im8_1, 1.3, self.im32_2)\n (x,y) = compare(self.im32_3, self.im32_2, self.im32_3)\n self.assertTrue(x<0)", "def check_bit_exactness(input_raw_file):\n (t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)\n (t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)\n (t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)\n (t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)\n\n if filecmp.cmp(f1, f2, shallow=True):\n print(\"NN interpolation on GPU is bit exact with CPU\")\n if filecmp.cmp(f3, f4, shallow=True):\n print(\"Bilinear interpolation on GPU is bit exact with CPU\")", "def test_wrong_info(self):\r\n i = theano.scalar.basic.int32()\r\n self.assertRaises(NotScalarConstantError, self.validate,\r\n (3, 2, 8, i), (4, 2, 5, 5),\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))\r\n self.assertRaises(NotScalarConstantError, self.validate,\r\n (3, 2, 8, 8), (4, 2, 5, i),\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))", "def verify_binary_image(self, image_path):\n raise NotImplementedError", "def needs_correction(self, var):\n global matmul_registry\n if var in matmul_registry:\n return True\n else:\n return False", "def test_errors_for_unequal_image_size() -> None:\n cam = Camera(imgsz=(100, 200), f=(10, 10))\n xcam = Matlab(imgsz=(100, 100), fc=(10, 10))\n with pytest.raises(ValueError):\n Converter(xcam, cam)", "def test_image_rw(self):\n from ..image import Image\n from ..io.image import read_image, write_image\n shape = (5,5)\n pix = np.random.uniform(size=shape)\n ivar = np.random.uniform(size=shape)\n mask = np.random.randint(0, 3, size=shape)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0')\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n\n #- Check output datatypes\n self.assertEqual(img2.pix.dtype, np.float64)\n self.assertEqual(img2.ivar.dtype, np.float64)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- Rounding from keeping np.float32 on disk means they aren't equal\n self.assertFalse(np.all(img1.pix == img2.pix))\n self.assertFalse(np.all(img1.ivar == img2.ivar))\n\n #- But they should be close, and identical after float64->float32\n self.assertTrue(np.allclose(img1.pix, img2.pix))\n self.assertTrue(np.all(img1.pix.astype(np.float32) == img2.pix))\n self.assertTrue(np.allclose(img1.ivar, img2.ivar))\n self.assertTrue(np.all(img1.ivar.astype(np.float32) == img2.ivar))\n\n #- masks should agree\n self.assertTrue(np.all(img1.mask == img2.mask))\n self.assertEqual(img1.readnoise, img2.readnoise)\n self.assertEqual(img1.camera, img2.camera)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- should work with various kinds of metadata header input\n meta = dict(BLAT='foo', BAR='quat', BIZ=1.0)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0', meta=meta)\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img2.meta[key], 'meta[{}] not propagated'.format(key))\n\n #- img2 has meta as a FITS header instead of a dictionary;\n #- confirm that works too\n write_image(self.testfile, img2)\n img3 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img3.meta[key], 'meta[{}] not propagated'.format(key))", "def _verify(self, dimension):\n value = self.texture.__getattribute__(dimension)\n while value > 1:\n div_float = float(value) / 2.0\n div_int = int(div_float)\n if not (div_float == div_int):\n raise Exception('image %s is %d, which is not a power of 2' % (\n dimension, self.texture.__getattribute__(dimension)))\n value = div_int", "def mod_check(x, y):\r\n if (as_tensor_variable(x).dtype in complex_dtypes or\r\n as_tensor_variable(y).dtype in complex_dtypes):\r\n # Currently forbidden.\r\n raise scal.Mod.complex_error\r\n else:\r\n return mod(x, y)", "def test_imibread(self):\n gen = imibread(TEST_MIB)\n arr = next(gen)\n self.assertEqual(arr.shape, (256, 256))\n self.assertEqual(arr.dtype, np.dtype(\">u2\"))", "def check_img(img):\n\n if isinstance(img, (str, os.PathLike)) and os.path.exists(img):\n img = nib.load(img)\n elif not isinstance(img, nib.spatialimages.SpatialImage):\n raise TypeError('Provided image must be an existing filepath or a '\n 'pre-loaded niimg-like object')\n\n # ensure 3D or squeezable to 3D\n img = nib.funcs.squeeze_image(img)\n if len(img.shape) != 3:\n raise ValueError('Provided image must be 3D')\n\n # check if atlas data is int or castable to int\n # if image is arrayproxy convert it to an array for speed-up\n data = np.asarray(img.dataobj)\n cast = nib.is_proxy(img.dataobj)\n if img.header.get_data_dtype().kind not in ['i', 'u']:\n idata = data.astype('int32')\n cast = np.allclose(idata, data)\n data = idata\n if not cast:\n raise ValueError('Provided image should have integer values or '\n 'be safely castable to int without data loss')\n if cast:\n img = img.__class__(data, img.affine, header=img.header)\n img.header.set_data_dtype(np.int32)\n\n return img", "def test_decode_bits(self):\r\n for bitvec in ten_bitvecs:\r\n corr, num_errs = golay.decode_bits(bitvec)\r\n if corr is None:\r\n self.assertEqual(num_errs, 4)\r\n else:\r\n self.assertEqual(((corr + bitvec) % 2).sum(), num_errs)", "def test_binary_reg_fn():\n inputs = Variable(torch.Tensor([0, .5, 1]))\n outputs = binary_reg_fn(inputs).data\n expected = torch.Tensor([0.0029409, 1, 0.0029409])\n assert is_close(outputs, expected).all(), \\\n \"{} != {}\".format(outputs.tolist(), expected.tolist())", "def test_bit_not_bit_size_too_large(self):\n ops = [bitwise_operations.bit_not(self.five_255_bin, 0, 41, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def test_make_binary_and_fp(self):\n output_mask = boundary_mask(df=os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_inner.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def _is_scalar(shape):\n return F.shape_mul(shape) == 1", "def _check_binary_data(data):\n if not np.array_equal(data, data.astype(bool)):\n raise ValueError(\n \"This mechanism works with binary data, \"\n \"but input is not binary.\")", "def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)", "def test_mismatched_dims_error(test_output_dir):\n brainreg_args = get_default_brainreg_args(\n mismatched_dims_data_dir, test_output_dir\n )\n sys.argv = brainreg_args\n\n with pytest.raises(LoadFileException) as e:\n brainreg_run()\n\n assert (\n \"File failed to load with \"\n \"imio. Ensure all image files contain the \"\n \"same number of pixels. Full traceback above.\" in e.value.message\n )", "def test_on_skimage_png(self):\n from_skimage = diffread(TEST_PNG)\n\n self.assertTupleEqual(from_skimage.shape, (256, 256))\n self.assertTrue(np.allclose(from_skimage, np.ones_like(from_skimage)))", "def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))", "def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):\n shape_len = len(shape_a)\n inp_src_dtype = src_dtype.lower()\n k_block_size = cce.BLOCK_REDUCE\n check_list = (\"float16\")\n\n if inp_src_dtype not in check_list:\n raise RuntimeError(\"Dtype of input only support float16\")\n\n if shape_len != len(shape_b):\n raise RuntimeError(\"length of a and b are not equal\")\n\n if shape_len < 2:\n raise RuntimeError(\"shape length for batch matmul must large than 2\")\n\n if shape_len == 2:\n raise RuntimeError(\n \"batch matmul not support shape length 2, if shape length equal 2, use matmul!\")\n\n if shape_a[:shape_len - 2] != shape_b[:shape_len - 2]:\n raise RuntimeError(\"batch size of a and b are not equal\")\n\n is_gevm = bool((shape_a[-2] == 1) or (shape_a[-1] == 1))\n is_gemv = bool((shape_b[-2] == 1) or (shape_b[-1] == 1))\n\n if trans_a:\n m_shape = shape_a[shape_len - 1]\n km_shape = shape_a[shape_len - 2]\n else:\n m_shape = shape_a[shape_len - 2]\n km_shape = shape_a[shape_len - 1]\n\n if trans_b:\n kn_shape = shape_b[shape_len - 1]\n n_shape = shape_b[shape_len - 2]\n else:\n kn_shape = shape_b[shape_len - 2]\n n_shape = shape_b[shape_len - 1]\n\n if m_shape == 1:\n if n_shape == 1:\n raise RuntimeError(\"input shape M and N can't both be 1\")\n\n if km_shape != kn_shape:\n raise RuntimeError(\"reduce axis not same\")\n\n if m_shape % cce.BLOCK_IN != 0 and m_shape != 1:\n raise RuntimeError(\n \"input shape M should be 1 or multiple of %d\" % cce.BLOCK_IN)\n\n if m_shape != 1:\n if km_shape % k_block_size != 0:\n raise RuntimeError(\n \"input shape K1 should be multiple of %d\" % cce.BLOCK_IN)\n\n if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:\n raise RuntimeError(\n \"input shape N should be 1 or multiple of %d\" % cce.BLOCK_IN)\n\n shape_bias_length = len(shape_bias)\n\n if shape_bias_length > 0:\n if shape_bias_length == 1:\n if is_gevm or is_gemv:\n if shape_bias[0] != m_shape * n_shape:\n raise RuntimeError(\"broadcast case shape bias for gemv must be equal m*n\")\n else:\n if shape_bias[0] != n_shape:\n raise RuntimeError(\"broadcast bias shape must be equal to shape n\")\n elif shape_bias_length == shape_len:\n out_shape = [i for i in shape_a[:-2]] + [m_shape, n_shape]\n if [i for i in shape_bias] != out_shape:\n raise RuntimeError(\"non broadcast bias shape must be same as output shape\")\n else:\n raise RuntimeError(\"unsupport input shape now for batch bias case\")", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def will_data_fit(bit_num, image, bits_per_pix):\r\n max_bits = reduce(lambda x, y: x * y, image.size) * bits_per_pix\r\n return bit_num <= max_bits", "def ext_test(image,message,num_bits):\n return decode_ext(encode_ext(image,message,num_bits),num_bits)\\\n == message", "def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0", "def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE" ]
[ "0.65523493", "0.6411468", "0.6128225", "0.5862283", "0.5744814", "0.55602735", "0.55312604", "0.54814464", "0.546288", "0.54556775", "0.54417485", "0.54311913", "0.54021156", "0.53700346", "0.53652495", "0.53612983", "0.5352316", "0.53403133", "0.53360397", "0.53285086", "0.5315784", "0.53150904", "0.5308452", "0.53082156", "0.53082156", "0.5305076", "0.5305004", "0.5297695", "0.52737176", "0.5228966" ]
0.6963398
0
DEVELOPMENT ONLY Saves a screenshot of the captcha (out/captcha.png) and reads from file (out/captcha) the captcha text to input.
def handle_captcha(self): self.webdriver.save_screenshot('./out/captcha.png') sleep(20) with open('./out/captcha', 'r') as f: try: self.webdriver.find_element_by_xpath("//input[@aria-label='Type the text you hear or see']").send_keys(f.read()) except: log.error('Captcha input failed. Possibly incorrect captcha?') raise self.webdriver.find_element_by_xpath('//*[@id="identifierNext"]').click() sleep(4) self.webdriver.find_element_by_css_selector("input[type=password]").send_keys(self.bot.getPassword())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_captcha_manual(gid):\n image = auth.get_captcha_image(gid)\n # FIXME: Use Python's temp file interface.\n image.save(\"./test.png\")\n webbrowser.open_new_tab(\"./test.png\")\n text = input('solve_captcha --->')\n return text", "def gen_captcha(**kwargs):\n from PIL import ImageFile\n from PIL import Image\n from PIL import ImageFont\n from PIL import ImageDraw\n from PIL import ImageFilter\n import random\n from PIL import ImageFile as pyImageFile\n import sys\n sys.modules['ImageFile'] = pyImageFile\n from io import StringIO, BytesIO\n # CHAR_BIT=(4,5,6,7,8)\n # CHAR_TYPE=(1,2,3)\n #随机选择字符位数和类型.\n # text=getstr( random.choice(CHAR_BIT), random.choice(CHAR_TYPE))\n text = kwargs.get('text', None)\n fnt_sz = kwargs.get('size', DEFAULT_IMAGE_SIZE)\n bkground = kwargs.get('bkground', DEFAULT_BG)\n font_color = kwargs.get('font_color', DEFAULT_FONT_COLOR)\n distortion = kwargs.get('distortion', DEFAULT_DISTORTION)\n addWidth = kwargs.get('addWidth', None)\n addHeight = kwargs.get('addHeight', None)\n\n period = distortion[0]\n amplitude = distortion[1]\n offset = distortion[2]\n\n## outFile = StringIO()\n outFile = BytesIO()\n\n DATA_PATH = os.path.abspath(os.path.dirname(__file__))\n FONT_PATH = DATA_PATH + '/fonts'\n\n # select font for captcha\n ALL_FONTS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12')\n rand_font = random.choice(ALL_FONTS)\n \"\"\"font = ImageFont.truetype(FONT_PATH+'/font%s.ttf'%rand_font, fnt_sz)\"\"\"\n font = ImageFont.truetype(FONT_PATH + '/font' + rand_font + '.ttf', fnt_sz)\n\n #依据需求认定图片大小\n # textSize =[165,50]\n textSize = [kwargs.get('width', 165), kwargs.get('height', 50)]\n factTextSize = font.getsize(text)\n\n #如果定义尺寸小于实际尺寸则用实际的尺寸\n if factTextSize[0] > textSize[0]:\n textSize[0] = factTextSize[0]\n if factTextSize[1] > textSize[1]:\n textSize[1] = factTextSize[1]\n#------------------------------render background1 -----------------------\n image = Image.new(\n 'RGB', (textSize[0] + addWidth, textSize[1] + addHeight), bkground)\n image.paste(bkground)\n#------------------------------render Text2 ------------------------\n draw = ImageDraw.Draw(image)\n alignment = (random.uniform(0, 1), random.uniform(0, 1))\n x = int((image.size[0] - textSize[0]) * alignment[0] + 0.5)\n y = int((image.size[1] - textSize[1]) * alignment[1] + 0.5)\n\n draw.text((x, y), text, font=font, fill=font_color)\n#--------------new add line i值越大线越粗------------------------\n width, height = image.size\n for i in range(0, 3):\n draw.line(((0, height / 1 + i), (width, height / 8 + i)), fill=128)\n\n#------------------------------render Distortion -----------------------\n r = 1\n xPoints = image.size[0] //r + 2\n yPoints = image.size[1] //r + 2\n\n # Create a list of arrays with transformed points\n xRows = []\n yRows = []\n for j in range(yPoints):\n xRow = []\n yRow = []\n for i in range(xPoints):\n x, y = getTransform(i * r, j * r, amplitude, period, offset)\n\n # Clamp the edges so we don't get black undefined areas\n x = max(0, min(image.size[0] - 1, x))\n y = max(0, min(image.size[1] - 1, y))\n\n xRow.append(x)\n yRow.append(y)\n xRows.append(xRow)\n yRows.append(yRow)\n\n # Create the mesh list, with a transformation for\n # each square between points on the grid\n mesh = []\n for j in range(yPoints - 1):\n for i in range(xPoints - 1):\n mesh.append((\n # Destination rectangle\n (i * r, j * r,\n (i + 1) * r, (j + 1) * r),\n # Source quadrilateral\n (xRows[j][i], yRows[j][i],\n xRows[j + 1][i], yRows[j + 1][i],\n xRows[j + 1][i + 1], yRows[j + 1][i + 1],\n xRows[j][i + 1], yRows[j][i + 1]),\n ))\n\n img = image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)\n\n # save the image to a file\n img.save(outFile, format='jpeg')\n outFile.seek(0)\n # img.save(\"captchas.jpg\") #测试用,正式系统请删除.\n src = outFile.read()\n size = len(src)\n sys.modules['ImageFile'] = ImageFile\n return {'text': text, 'src': src, 'size': size}", "def extractCaptcha(x, y, nameInfix=None, debug=False):\n\n\tif nameInfix == None:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(datetime.datetime.now().isoformat()) + \".png\"\n\telse:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(nameInfix) + \".png\"\n\n\treturn extractScreenPart(x-50, y+5, 170, 60, name=captchaName, debug=debug)", "def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()", "def gen_captcha(text, fnt, fnt_sz, file_name, fmt='JPEG'):\n # randomly select the foreground color\n fgcolor = random.randint(0,0xffff00)\n # make the background color the opposite of fgcolor\n bgcolor = fgcolor ^ 0xffffff\n # create a font object \n font = ImageFont.truetype(fnt,fnt_sz)\n # determine dimensions of the text\n dim = font.getsize(text)\n # create a new image slightly larger that the text\n im = Image.new('RGB', (dim[0]+5,dim[1]+5), bgcolor)\n d = ImageDraw.Draw(im)\n x, y = im.size\n r = random.randint\n # draw 100 random colored boxes on the background\n for num in range(100):\n d.rectangle((r(0,x),r(0,y),r(0,x),r(0,y)),fill=r(0,0xffffff))\n # add the text to the image\n d.text((3,3), text, font=font, fill=fgcolor)\n im = im.filter(ImageFilter.EDGE_ENHANCE_MORE)\n # save the image to a file\n im.save(file_name, format=fmt)", "def get_captcha(self):\n res = self._limited_call(self._requests.get,\n constants.FA_ROOT + \"/captcha.jpg\")\n data = res.content\n return data", "def handle_captcha(thread_call, thread_r):\n import subprocess\n\n iden = thread_r['captcha']\n\n subprocess.call(['open', reddit_url + 'captcha/' + iden])\n thread_call['captcha'] = input(\"Captcha (enclose in quotes):\")\n thread_call['iden'] = iden\n\n request = session.post(reddit_url + 'api/submit', data=thread_call, cookies=cookie)\n thread_r = request.json()['json']['data']\n print request.json()\n if len(thread_r['errors']) > 0:\n debug_printer.pprint(thread_r)", "def obtain_image_captcha(self, file_path):\n id_answer = self.post_image_task(file_path)\n if not id_answer:\n message = f\"Unable to obtain response for request of captcha from 2Captcha\"\n print(message)\n return None\n\n try:\n captcha_id = int(id_answer)\n except ValueError:\n message = f\"Error in captcha request from 2Captcha: {id_answer}\"\n print(message)\n return None\n\n recaptcha_answer = self.get_image_response(captcha_id)\n if not recaptcha_answer:\n message = f\"Unable to obtain response for captcha image solution from 2Captcha\"\n print(message)\n return None\n\n print(f\"Output from 2Captcha {recaptcha_answer}\")\n return recaptcha_answer", "def captcha_to_text(self, imagepath):\n return pytesseract.image_to_string(Image.open(imagepath))", "def askForCaptcha(self, url):\n try:\n import webbrowser\n wikipedia.output(u'Opening CAPTCHA in your web browser...')\n if webbrowser.open(url):\n return wikipedia.input(\n u'What is the solution of the CAPTCHA that is shown in '\n u'your web browser?')\n else:\n raise\n except:\n wikipedia.output(u'Error in opening web browser: %s'\n % sys.exc_info()[0])\n wikipedia.output(\n u'Please copy this url to your web browser and open it:\\n %s'\n % url)\n return wikipedia.input(\n u'What is the solution of the CAPTCHA at this url ?')", "def corp_image(self):\n try:\n # Open image\n image_to_crop = Image.open(self.captcha_image_filename, 'r')\n # Crop image\n image = image_to_crop.crop((-1, 8, 65, 22))\n # Save image\n image.save(self.cropped_captcha_filename)\n except UnidentifiedImageError as error:\n raise(error)", "def solve_image_captcha(self, captcha_tmp_path):\n # Get solution and apply it\n for i in range(1, 4):\n print(f\"Attempt #{i} for recaptcha solution\")\n solution = self.obtain_image_captcha(captcha_tmp_path)\n print(f'this {solution}')\n if solution and ERROR not in solution.upper():\n break\n\n if solution is None or ERROR in solution.upper():\n if not solution:\n message = f\"2Captcha service didn't return a response for the captcha\"\n else:\n message = f\"Error in captcha solution from 2Captcha: {solution}\"\n return None\n\n print(\"Captcha solution: {}\".format(solution))\n return solution", "def image_to_string(self):\n img = Image.open(self.cropped_captcha_filename)\n config = '--psm 10 --oem 1 -c tessedit_char_whitelist=0123456789+?'\n try:\n return pytesseract.image_to_string(img, config=config)\n except pytesseract.pytesseract.TesseractNotFoundError:\n raise(\"Tesseract не установлен!\")\n exit(-1)", "def get_captcha_reply(captcha):\n def get_char_at(pos, captcha):\n char_chars = [line[pos-1:pos] for line in captcha.split(b'\\n')]\n key = ''.join([ str(s, 'ascii') for s in char_chars])\n if key == ' | ':\n return get_char_at(pos+2, captcha)\n if key == ' | .\\\\ ':\n return get_char_at(pos+2, captcha)\n return chars[key]\n\n pos = 1\n\n a, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info(\"a=%d\" % a)\n\n op, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info('op=%s' % op)\n\n b, size = get_char_at(pos, captcha)\n pos += size\n pwn.log.info('b=%d' % b)\n \n if op == '-':\n return a - b\n if op == '*':\n return a * b\n if op == '/':\n return a / b\n if op == '+':\n return a + b\n pwn.log.error(\"Ops not found (%s)\" % op)", "def get_captcha_key(self, captcha_image_url):\n\n if self.interactive:\n print('Open CAPTCHA image url in your browser and enter it below: ',\n captcha_image_url)\n captcha_key = raw_input('Enter CAPTCHA key: ')\n return captcha_key\n else:\n raise VkAuthError(\n 'Captcha is required. Use interactive mode to enter it '\n 'manually')", "def submit(request):\n if request.POST:\n form = CaptchaForm(request.POST, request.FILES)\n if form.is_valid():\n image = request.FILES['singleImage']\n extension = image.name.split('.')[1]\n hashname = random.getrandbits(128)\n with open(os.path.join(settings.STATIC_ROOT, \"tmp/%s.%s\" % (hashname, extension)), \"w+\") as imagePath:\n imagePath.write(image.read())\n\n ctx = RequestContext(request, {\"hash\":hashname, \"extension\":extension})\n template = loader.get_template(\"wainz/submission_details.html\")\n\n return HttpResponse(template.render(ctx))\n else:\n form = CaptchaForm()\n\n return render_to_response(\"wainz/submit.html\", dict(form=form), context_instance = RequestContext(request))", "def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode", "def twocaptcha_solver():\n SITE_URL = get_site_settings()[1]\n SITE_KEY = get_site_settings()[0] # osrs site key\n API_KEY = get_user_settings()[2] # api key read from settings.ini\n if not API_KEY:\n raise ValueError(\"No API key was found in settings.ini.\")\n\n s = requests.Session()\n\n # here we post and parse site key to 2captcha to get captcha ID\n try:\n captcha_id = s.post(f\"http://2captcha.com/in.php?key={API_KEY}\"\n f\"&method=userrecaptcha&googlekey={SITE_KEY}\"\n f\"&pageurl={SITE_URL}\").text.split('|')[1]\n except IndexError:\n print(\"You likely don't have a valid 2captcha.com API key with funds\"\n \" in your settings.ini file. Fix and re-run the program.\")\n\n # then we parse gresponse from 2captcha response\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n print(\"Solving captcha...\")\n while 'CAPCHA_NOT_READY' in recaptcha_answer:\n sleep(6)\n recaptcha_answer = s.get(\n f\"http://2captcha.com/res.php?key={API_KEY}\"\n f\"&action=get&id={captcha_id}\").text\n try:\n recaptcha_answer = recaptcha_answer.split('|')[1]\n except IndexError:\n print(\"2captcha failed to solve this one.. Returning a blank response \"\n \"If the program fails to continue, please msg Gavin with error.\")\n recaptcha_answer = ''\n else:\n return recaptcha_answer", "def generate_image(self, img, seednum=None):\n r = self.csettings['R']\n if self.csettings['auto_cleanup']:\n clean_old_entries(self.csettings['captchas_dir'])\n\n cs = self.csettings\n imagesize = cs['imagesize']\n fontdir = path.join(cs['captchaconf_dir'], 'fonts')\n fontnames = [path.join(fontdir, x) for x in listdir(fontdir) ]\n\n for dummy in range(self.csettings['iterations']):\n posnew = 7\n if dummy != 0:\n cs.generate_solution()\n # render characters\n for c in self.csettings['solution']:\n fgimage = Image.new('RGB', imagesize, cs['fgcolor'])\n font = ImageFont.truetype(r.choice(fontnames), r.randrange(*cs['minmaxheight']))\n charimage = Image.new('L', font.getsize(' %s ' % c), '#000000')\n draw = ImageDraw.Draw(charimage)\n draw.text((0,0), ' %s' % c, font=font, fill='#ffffff')\n if cs['eraser']:\n eraserline = ( 0, r.choice(range(0, charimage.size[1])), \n charimage.size[0], r.choice(range(0, charimage.size[1])))\n draw = ImageDraw.Draw(charimage)\n draw.line(eraserline, width=cs['eraser_width'] , fill='#000000')\n charimage = charimage.rotate(r.randrange(*cs['minmaxrotations']), expand=1,\n resample=Image.BILINEAR)\n charimage = charimage.crop(charimage.getbbox())\n maskimage = Image.new('L', imagesize)\n ypos = r.randrange(*cs['minmaxvpos'])\n maskimage.paste(charimage, \n (posnew, ypos, \n charimage.size[0]+posnew, \n charimage.size[1]+ypos)\n )\n img = Image.composite(fgimage, img, maskimage)\n posnew += charimage.size[0] + r.randrange(*cs['minmaxkerning'])\n\n # draw line(s)\n for dummy in range(cs.get('num_lines')):\n linex = r.choice( range(2, cs['minmaxheight'][1]) )\n minmaxliney = ( cs['minmaxvpos'][0], \n cs['minmaxvpos'][1] + cs['minmaxheight'][0])\n linepoints = [linex, r.randrange(*minmaxliney)]\n while linex < posnew:\n linex += r.randrange(*cs['minmaxheight']) * 0.8\n linepoints.append(linex)\n linepoints.append(r.randrange(*minmaxliney))\n draw = ImageDraw.Draw(img)\n draw.line(linepoints, width=cs['line_width']\n , fill=cs['fgcolor'])\n return img", "def post_image_task(self, file_path):\n url = 'http://2captcha.com/in.php'\n input_file = {'file': open(file_path, 'rb')}\n data = {'key': self.api_key, 'method': 'post', 'json': 1}\n response = self.session.post(url, files=input_file, data=data)\n id_answer = self.handle_id_answer(response.text)\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, files=input_file, data=data)\n id_answer = self.handle_id_answer(response.text)\n\n if not finished:\n return False\n\n return id_answer", "def decoding_the_captcha(captcha, l1=7):\n im = Image.open(captcha)\n im = im.convert(\"RGB\")\n p1 = im.load()\n\n # Filtering the black dots\n for x in range(im.size[0]):\n for y in range(im.size[1]):\n if (p1[x, y][0] < l1) and (p1[x, y][1] < l1) \\\n and (p1[x, y][2] < l1):\n p1[x, y] = (0x80, 0x80, 0x80, 255)\n\n im.save(\"output.png\")\n im.close()", "def create_captcha_image(self, chars, color, background, warp=False, **kwargs):\n\n rotate_from, rotate_to = kwargs.get('rotate_range', (-5, 5))\n left_rate, width_rate = kwargs.get('left_rate', 0.1), kwargs.get('width_rate', 0.4)\n y_low_rate, y_up_rate = kwargs.get('dy_rate_range', (-0.15, 0.15))\n\n image = Image.new('RGB', (self._width, self._height), background)\n draw = Draw(image)\n\n def _draw_character(c):\n font = random.choice(self.truefonts)\n w, h = draw.textsize(c, font=font)\n\n dx = random.randint(0, 4)\n dy = random.randint(0, 6)\n im = Image.new('RGBA', (w + dx, h + dy))\n Draw(im).text((dx, dy), c, font=font, fill=(25, 25, 25, 25))\n\n # rotate\n im = im.crop(im.getbbox())\n im = im.rotate(random.uniform(rotate_from, rotate_to), Image.BILINEAR, expand=1)\n\n # warp\n if warp:\n dx = w * random.uniform(0.1, 0.5)\n dy = h * random.uniform(0.2, 0.3)\n x1 = int(random.uniform(-dx, dx))\n y1 = int(random.uniform(-dy, dy))\n x2 = int(random.uniform(-dx, dx))\n y2 = int(random.uniform(-dy, dy))\n w2 = w + abs(x1) + abs(x2)\n h2 = h + abs(y1) + abs(y2)\n data = (\n x1, y1,\n -x1, h2 - y2,\n w2 + x2, h2 + y2,\n w2 - x2, -y1,\n )\n im = im.resize((w2, h2))\n im = im.transform((w, h), Image.QUAD, data)\n return im\n\n images = []\n for c in chars:\n images.append(_draw_character(c))\n\n text_width = sum([im.size[0] for im in images])\n\n width = max(text_width, self._width)\n image = image.resize((width, self._height))\n\n average = int(text_width / len(chars))\n rand = int(average * left_rate) #int(0.25 * average)\n offset = int(average * width_rate) #int(average * 0.1)\n\n for im in images:\n w, h = im.size\n mask = im.convert('L').point(table)\n image.paste(im, (offset, int((self._height - h) / 2 + random.uniform(y_low_rate, y_up_rate)*self._height)), mask)\n offset = offset + w + random.randint(-rand, 0)\n\n if width > self._width:\n image = image.resize((self._width, self._height))\n\n return image", "def get(self):\n try:\n imageFilename = random.choice(os.listdir(self.cacheDir))\n imagePath = os.path.join(self.cacheDir, imageFilename)\n with open(imagePath) as imageFile:\n self.image = imageFile.read()\n except IndexError:\n raise GimpCaptchaError(\"CAPTCHA cache dir appears empty: %r\"\n % self.cacheDir)\n except (OSError, IOError):\n raise GimpCaptchaError(\"Could not read Gimp captcha image file: %r\"\n % imageFilename)\n\n self.answer = imageFilename.rsplit(os.path.extsep, 1)[0]\n self.challenge = self.createChallenge(self.answer)\n\n return (self.image, self.challenge)", "def writerep_general(contact_link, i):\n\n b = browser.Browser()\n print \"In writerep_general, opening contact_link\", contact_link\n b.open(contact_link)\n\n def get_challenge():\n ''' find captchas'''\n labels = b.find_nodes('label', lambda x: x.get('for') == 'HIP_response')\n if labels: return labels[0].string\n \n def fill_inhofe_lgraham(f):\n \"\"\"special function to fill in forms for inhofe and lgraham\"\"\"\n if DEBUG: print \"Filling special inhofe or lgraham form\"\n f.fill_all(A01=i.prefix, B01=i.fname, C01=i.lname, D01=i.addr1, E01=i.addr2, F01=i.city,\n G01=i.state, H01=i.zip5, H02=i.phone, H03=i.phone, I01=i.email, J01=\"Communications\", K01=i.full_msg)\n f.fill(type='textarea', value=i.full_msg)\n if DEBUG: print \"f filled and ready to submit: \", f\n \n def fill_form(f):\n ''' f is a form '''\n\n f.fill_name(i.prefix, i.fname, i.lname)\n if DEBUG: print \"in fill_form, filling addr\"\n f.fill_address(i.addr1, i.addr2)\n if DEBUG: print \"in fill_form, filling phone\"\n f.fill_phone(i.phone)\n if DEBUG: print \"in fill_form, filling textarea\"\n textareacontrol = f.fill(type='textarea', value=i.full_msg)\n if DEBUG: print 'filled textareacontrol' , textareacontrol\n if DEBUG: print \"in fill_form, filling all\"\n\n if DEBUG: print \"Printing all controls\"\n for c in f.controls:\n if DEBUG: print \"control: \", c.name, \" type: \", c.type\n \n f.fill_all(city=i.city, zipcode=i.zip5, zip4=i.zip4, state=i.state.upper(),\n email=i.email,\n issue=['TECH', 'GEN', 'OTH'],\n subject=i.subject, reply='yes',\n Re='issue', #for billnelson\n newsletter='noAction', aff1='Unsubscribe',\n MessageType=\"Express an opinion or share your views with me\")\n\n # page has one required control that has no name. so we need to fill it in\n if (i.dist == 'SD-00' or 'coburn' in b.url):\n empty_controls = [c for c in f.controls if not c.value]\n for c in empty_controls:\n if DEBUG: print f.fill('OTH', control=c)\n\n \n\n\n # Solve captchas. I included this here because it was placed here by Aaron,\n # but I haven't found a captcha that it works on. -NKF\n challenge = get_challenge()\n if challenge:\n print \"Found challenge!\"\n try:\n solution = captchasolver.solve(challenge)\n except Exception, detail:\n print >> sys.stderr, 'Exception in CaptchaSolve', detail\n print >> sys.stderr, 'Could not solve:\"%s\"' % challenge,\n \n if DEBUG: print \"f filled and ready to submit to \", b.url, \"\\n\", f\n #return b.open(f.click())\n \n \n\n # max loops\n k = 6\n\n # needed this from some weird error that I forgot to document.\n # we only want to do the WYR form once,\n # so it's a flag so we don't choose this one again. \n completedWyrForm = False\n for cnt in range(1,k):\n # todo, place newurl into cache\n if DEBUG: print \"Loop \", cnt, \":\\n\", b.url, \"\\n\" #, b.page, \"\\n Done with page \", cnt, \"\\n\\n\"\n\n # check if this is a refresh page\n # to do: see if we can get javascript window.location refreshes\n # (would require some smart parsing or using a javascript interpreter module)\n if 'http-equiv=\"refresh\"' in b.page:\n if DEBUG: print \"Redirect to a new page:\"\n newurl = r_refresh.findall(b.page)[0]\n newurl = newurl.replace(' ', '%20')\n newurl = newurl.replace('&amp;', '&')\n if DEBUG: print \"\\nNewurl:\", newurl\n try:\n b.open(newurl)\n continue #next loop\n except:\n print \"Failed to open url \", newurl, \" error: \", traceback.print_exc()\n\n # some pages have multiple forms on them.\n # For example, there may be a search tool in the sidebar.\n # or there may be forms which are hidden by not displayed by the css.\n # try to see what we can grab out the page, then we'll decide which one's the best to try\n textareaform = get_form(b, lambda f: f.find_control_by_type('textarea'))\n zipform = get_form(b, lambda f: f.has(name='zip'))\n verificationform = get_form(b, lambda f: 'formproc' in f.action)\n nameform = get_form(b, lambda f: 'wrep_const' in f.action) #see AL-06 for an example, has zip form in page too\n wyrform = get_form(b, lambda f: f.find_control_by_id('state') and f.find_control_by_name('zip') and f.find_control_by_name('zip4')) #get_form(b, not_signup_or_search)\n indexform = get_form(b, lambda f: f.has(name='Re')) # see billnelson for example\n\n #choose which form we want to use\n form = None\n if textareaform:\n if DEBUG: print \"textareaform\"\n form = textareaform\n elif wyrform and not completedWyrForm:\n if DEBUG: print \"wyrform\"\n form = wyrform\n completedWyrForm = True\n elif nameform:\n if DEBUG: print \"step2 contact form with name\"\n form = nameform\n elif zipform:\n if DEBUG: print \"zipform\"\n form = zipform\n elif verificationform:\n if DEBUG: print \"verification form\"\n form = verificationform\n elif indexform:\n if DEBUG: print \"index form\"\n form = indexform\n\n #if no redirect and no form was found, just return. can go no further\n if not form:\n return b.page\n \n \n #to do, add back in captcha solver\n if form.find_control_by_name('captcha') or form.find_control_by_name('validation'):\n if DEBUG: print \"captcha found\"\n #raise Captcha\n return b.page\n else:\n if DEBUG: print \"no captcha found\"\n\n #try:\n if DEBUG: print \"going to fill_form from \", b.url, \" now \\n\", form, \"\\n End form\", cnt, \"\\n\"\n if \"inhofe\" in contact_link or \"lgraham\" in contact_link:\n fill_inhofe_lgraham(form)\n else:\n fill_form(form) #, aggressive=True)\n\n try:\n nextpage = b.open(form.click())\n except:\n print \"caught an http error\"\n print \"Failed to submit form for url \", b.url, \" error: \", traceback.print_exc()\n return \"Failed to submit form for url \"+ b.url+ \" error: \"+ traceback.format_exc()\n\n \n # Now, look for common errors or confirmations.\n foundError = False\n thanked = False\n if DEBUG: print \"Looking for errors in page \" #, b.page\n \n errorStr = getError(b.page)\n if errorStr:\n if DEBUG: print \"Found error: \", errorStr, \" done with \", contact_link\n foundError = True\n\n if DEBUG: print \"Looking for thank you in page: \"# , nextpage.lower()\n confirmations=[cstr for cstr in confirmationStrings if cstr in nextpage.lower()]\n\n if len(confirmations) > 0:\n print 'thanked, done with ', contact_link\n thanked = True\n\n successUrls = ['https://mulvaneyforms.house.gov/submit-contact.aspx']\n if b.url in successUrls:\n thanked = True\n\n if thanked or foundError:\n return nextpage\n\n if DEBUG: print \"Tried \", k, \"times, unsuccessfully, to fill form\"\n return b.page\n #raise UnsuccessfulAfter5Attempts(b.page) ", "def solve_captcha(self):\n # Switch to the Captcha's iframe\n captcha = CapatchaSolver(self.driver)\n while True:\n self.driver.switch_to.frame(self.driver.find_element_by_tag_name(\"iframe\"))\n captcha.solve_captcha()\n # Check if we passed the captcha part by checking the page title\n wait = WebDriverWait(self.driver, 10)\n try:\n wait.until_not(EC.title_is(consts.BLOCKED))\n break\n except TimeoutException:\n self.driver.refresh()", "def generateText(self, imageInputPath):\n self.logger.info(\"Processing the image {0}\".format(imageInputPath))\n imageInputFullPath = self.config['tempPath'] + imageInputPath\n imageInputWithoutExtension = os.path.splitext(imageInputPath)[0]\n outputPath = self.config['outputPath'] + imageInputWithoutExtension + \".txt\"\n outputPath = self.config['outputPath'] + imageInputWithoutExtension\n \n tessConfig = ('--oem 1 --psm 3')\n # Run tesseract OCR on image\n pytesseract.pytesseract.run_tesseract(imageInputFullPath, outputPath, extension=self.config['outputFormat'], lang=\"eng\", config=tessConfig)\n self.logger.debug(\"Processed the image {0}, OutputPath {1}\".format(imageInputPath, outputPath))\n \n # tessConfig = ('-l eng --oem 1 --psm 3')\n # tessConfig = ('--oem 1 --psm 3 -c textonly_pdf=1')\n # # Read image from disk\n # im = cv2.imread(imageInputFullPath, cv2.IMREAD_COLOR)\n # text = pytesseract.image_to_string(im, config=tessConfig)\n # with open(outputPath, mode=\"w\", encoding=\"utf8\") as outFile:\n # outFile.write(text)", "def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False", "async def enter_captcha(self, url, sid):\n raise VkCaptchaNeeded(url, sid)", "def post(self):\n code = request.form.get('captcha-code')\n username = request.form.get('username')\n password = request.form.get('password')\n # creating dictionary for following logic\n ctx = {'captcha': True, 'username': username}\n\n # captcha inserted/not inserted\n if code:\n logger.info(f'User {username} logged in, step 2')\n # FIXME Remove False after function check_code is created\n # captcha valid/invalid\n if dbhandler.check_code(ctx['username'], code):\n logger.info(f'User {username} successfully logged in')\n set_current_user(username)\n return redirect(url_for('index'), 200)\n else:\n logger.warning(f'User {username} posted wrong captcha')\n return render_template(self.template_name, error='Incorrect captcha code', **ctx)\n\n # user valid/non valid\n user = dbhandler.search_user(username, password)\n if user:\n logger.info(f'User {username} logged in, step 1')\n return render_template(self.template_name, **ctx)\n\n logger.warning(f'User {username} posted wrong password')\n return render_template(self.template_name, error='Incorrect username or password')", "def generate_image(self, chars, with_dots=True, with_curve=True, warp=None):\n warp = warp or True\n background = random_color(238, 255)\n color = random_color(0, 200, random.randint(220, 255))\n im = self.create_captcha_image(chars, color, background, warp=warp)\n if with_dots:\n self.create_noise_dots(im, color)\n if with_curve:\n self.create_noise_curve(im, color)\n im = im.filter(ImageFilter.SMOOTH)\n return im" ]
[ "0.71884316", "0.6698693", "0.65424967", "0.6215375", "0.61891127", "0.6079916", "0.60580266", "0.5914424", "0.5824995", "0.5824887", "0.58242536", "0.57925165", "0.5688282", "0.5589822", "0.5572178", "0.55524635", "0.5550549", "0.529425", "0.52441746", "0.5242615", "0.5227137", "0.5218244", "0.52117807", "0.51904285", "0.5174823", "0.51298565", "0.51130664", "0.5067716", "0.50583863", "0.50570047" ]
0.7391811
0
Shows info related the dataframes wrangled in this pipeline.
def _show_info(self): dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS) dataframe.printSchema()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_frame_info(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.info())", "def show(dfs):\n\n for df in dfs:\n print('{} -> {}'.format(df[0], df[1]))", "def show_hdf(self):\n self._walk()", "def display_df_info(df, df_name, max_rows=None, max_columns=None):\n # Head\n display(HTML('<h4>{name}</h4>'.format(\n name=df_name)))\n with pd.option_context('display.max_rows', max_rows, 'display.max_columns', max_columns):\n display(df)\n\n # Attributes\n display(HTML(\"<h4>Data attributes</h4>\"))\n display_df = pd.DataFrame.from_dict(\n {'Null counts': df.isnull().sum(), 'Data types': df.dtypes, 'Unique values': df.nunique()})\n display(display_df)", "def print_info(self):\n\n n_metabolites = len(self.metabolites)\n n_reactions = len(self.reactions)\n n_constraints = len(self.constraints)\n n_variables = len(self.variables)\n\n info = pd.DataFrame(columns=['value'])\n info.loc['name'] = self.name\n info.loc['description'] = self.description\n info.loc['num constraints'] = n_constraints\n info.loc['num variables'] = n_variables\n info.loc['num metabolites'] = n_metabolites\n info.loc['num reactions'] = n_reactions\n info.index.name = 'key'\n\n print(info)", "def show_csv_info(self):\n print()\n display(HTML(self.csv_dataframe.head(10).to_html()))", "def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''):\n info = info_prefix\n if shape:\n info = f'{info}Shape = {df.shape}'\n if cols:\n info = f'{info} , Cols = {df.columns.tolist()}'\n print(info)\n if return_info:\n return info", "def info(self):\n print(\n \"\"\"\n Factory holds {0} unique plots\n \"\"\".format(\n len(self.plots)\n )\n )\n for i, plot in enumerate(self.plots):\n print(\"\\t\\tPlot {0} holds {1} unique datasets\".format(i, len(plot)))\n for j, dataset in enumerate(plot):\n print(\n \"\\t\\t\\tDataset {0} holds {1} datapoints\".format(\n j, len(dataset[\"x\"])\n )\n )\n\n print()\n return", "def show_data(df):\n printmd(str(\"The Data contains **\" + str(df.shape[0])+ '** rows.'))\n printmd(\"*__Sample of the data :__*\")\n display(df.head(n=5))\n print(\"\")\n print(\"\")", "def show(self, notebook=notebook_display):\n print(\"\\nCluster Ensemble:\")\n if notebook is True:\n display(self._df)\n elif notebook is False:\n print(self._df)\n self.massrich_parameters()", "def display(df, urls=False):\n dft = df\n if urls:\n dft = df.copy()\n if df.index.name == 'class_label':\n dft = dft.reset_index()\n if 'index' in dft.columns:\n dft = dft.drop(['index'], axis=1)\n if 'class_label' in dft.columns:\n dft['class_label'] = dft.class_label.apply(lambda x: url(int(x)))\n if df.index.name == 'class_label':\n dft.set_index('class_label')\n disp(dft)", "def disp(df):\n display(HTML(df.to_html(index=False)))", "def print_results_by_model(df, dropped_columns=[]):\n dropped_columns.extend([\"interactive_err_data\"])\n\n dfs = split_df_by_model(df)\n for df_ in dfs:\n print(df_.name)\n display(df_.drop(columns=[col for col in dropped_columns if col in df_]))", "def app():\n # Add title to the page\n st.title(\"Welcome to the Data Info page\")\n\n # Add subheader for the section\n st.subheader(\"View Data\")\n\n # Load the dataset\n X, y = load_data()\n df = pd.concat([X, y], axis=1)\n\n # Create an expansion option to check the data\n with st.expander(\"View data\"):\n st.dataframe(df)\n\n # Create a section to columns values\n # Give subheader\n st.subheader(\"Columns Summary:\")\n\n # Create a checkbox to get the summary.\n if st.checkbox(\"View Summary\"):\n st.dataframe(df.describe())\n\n # Create multiple check box in row\n col_name, col_dtype, col_data = st.columns(3)\n\n # Show name of all dataframe\n with col_name:\n if st.checkbox(\"Column Names\"):\n st.dataframe(df.columns)\n\n # Show datatype of all columns \n with col_dtype:\n if st.checkbox(\"Columns data types\"):\n dtypes = df.dtypes.apply(lambda x: x.name)\n st.dataframe(dtypes)\n \n # Show data for each columns\n with col_data: \n if st.checkbox(\"Columns Data\"):\n col = st.selectbox(\"Column Name\", list(df.columns))\n st.dataframe(df[col])\n \n # Add image for your data describtion.\n #st.image(\"./images/iris_classification_model.jpg\")\n\n # Add info about your dataset\\\n # st.write(\"Data Info\")\n\n # Add the link to you dataset\n # st.markdown(\"\"\"\n # <p style=\"font-size:24px\">\n # <a \n # href=\"https://github.com/ShishirShekhar/car-price-prediction/blob/main/about.py\"\n # target=_blank\n # style=\"text-decoration:none; color:red\"\n # >Dataset\n # </a> \n # </p>\n # \"\"\", unsafe_allow_html=True\n # )", "def show_dataframe(self, df, **kwargs):\n show_index = False\n if 'show_index' in kwargs:\n show_index = kwargs['show_index']\n\n exceed_limit = len(df) > self.max_result\n header_buf = StringIO(\"\")\n if show_index:\n idx_name = str(df.index.name) if df.index.name is not None else \"\"\n header_buf.write(self.normalizeColumn(idx_name) + \"\\t\")\n header_buf.write(self.normalizeColumn(str(df.columns[0])))\n for col in df.columns[1:]:\n header_buf.write(\"\\t\")\n header_buf.write(self.normalizeColumn(str(col)))\n header_buf.write(\"\\n\")\n\n body_buf = StringIO(\"\")\n rows = df.head(self.max_result).values if exceed_limit else df.values\n rowNumber = len(rows)\n index = df.index.values\n for idx, row in zip(index, rows):\n if show_index:\n body_buf.write(\"%html <strong>{}</strong>\".format(idx))\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(row[0])))\n for cell in row[1:]:\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(cell)))\n # don't print '\\n' after the last row\n rowNumber -=1\n if rowNumber != 0:\n body_buf.write(\"\\n\")\n body_buf.seek(0)\n header_buf.seek(0)\n print(\"%table \" + header_buf.read() + body_buf.read())\n body_buf.close()\n header_buf.close()\n if exceed_limit:\n print(\"\\n%html <font color=red>Results are limited by {}.</font>\".format(self.max_result))", "def print_test_df(smp):\n print(\"Overview of smp dataframe: \\n\", smp.head())\n print(\"Info about smp dataframe:\\n\")\n smp.info()\n print(\"Dataypes of columns: \\n\", smp.dtypes)\n print(\"Datapoints per SMP File: \\n\", smp[\"smp_idx\"].value_counts())\n print(\"First row: \\n\", smp.iloc[0])\n print(\"Force at first row: \\n\", smp[[\"mean_force\", \"var_force\", \"min_force\", \"max_force\"]].iloc[0])\n print(\"Amount of datapoints with a force > 40: \", len(smp[smp[\"max_force\"] > 40]))\n print(\"Was S31H0117 found in the dataframe? \", any(smp.smp_idx == idx_to_int(\"S31H0117\")))\n print(\"Only S31H0117 data: \\n\", smp[smp[\"smp_idx\"] == idx_to_int(\"S31H0117\")].head())", "def dataframe_displayer(df):\n\n #On paramètre les options d'affichage du module pandas\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n pd.set_option('display.max_colwidth', -1)\n\n print(df)", "def show_dmvs(dmvs):\n\n for df in dmvs:\n print('{} ->-> {}'.format(df[0], df[1]))", "def show_data():", "def print_data_list(self):\n print('\\n{0}'.format(self.webDataFrame))", "def df():\n fs.df()", "def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)", "def print_info(df):\n\n # Data statistics\n # Number of total samples\n print('There are {n_samples} samples in total.'.format(n_samples=len(list(df.index.get_level_values(0).unique()))))\n\n # Count the different types of labels\n unique = df['label'].unique()\n count = []\n\n for label in unique:\n count.append(len(df.index.get_level_values(0)[df['label'] == label].unique()))\n\n count_dict = {unique[i]: count[i] for i in range(len(unique))}\n count_dict_percentage = {\n unique[i]: np.round(count[i] / len(list(df.index.get_level_values(0).unique())), decimals=2)\n for i in range(len(unique))}\n\n print('The types and counts of different labels : \\n {count_dict}'.format(count_dict=count_dict))\n print('The types and counts of different labels as percentage of the total data'\n ' : \\n {count_dict}'.format(count_dict=count_dict_percentage))", "def mdisplay(dfs: List[DataFrame], names:List[str]=[]):\n \n html_str = ''\n if names:\n html_str += ('<tr>' + \n ''.join(f'<td style=\"text-align:center\">{name}</td>' for name in names) + \n '</tr>')\n html_str += ('<tr>' + \n ''.join(f'<td style=\"vertical-align:top\"> {df.to_html(index=False)}</td>' \n for df in dfs) + \n '</tr>')\n html_str = f'<table>{html_str}</table>'\n html_str = html_str.replace('table','table style=\"display:inline\"')\n display_html(html_str, raw=True)", "def showLevels(self):\n\n pa = 'EUR_USD GBP_USD AUD_USD USD_CAD USD_CHF NZD_USD'.split(' ')\n gr = 'D H4 H1 M30 M15'.split(' ')\n for i in xrange(len(pa)):\n dfs = p.DataFrame()\n for j in xrange(len(gr)):\n try:\n training = self.viewTraining(pa[i], gr[j])\n df = training[0]\n manifest = training[1]\n dfs = dfs.combine_first(manifest.set_index('timeframe'))\n plot(df.get_values())\n except: \n ''\n try:\n dfs['timeframe'] = dfs.index # save the lost field before calling set_index()\n print dfs.set_index('forecast').sort(ascending=False)\n except: ''\n dfp = p.read_csv('/ml.dev/bin/data/oanda/ticks/{0}/{0}-M5.csv'.format(pa[i])).sort(ascending=True).tail(50).ix[:,'closeAsk']\n plot(dfp)\n title('{0} Forecast'.format(pa[i]))\n legend(gr)\n show();\n #break", "def info(self):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%HH-%MM-%SS\")\n print(f\"Exploration info ({now})\")\n print(f\"HDF name: {self.HDF_FILE}\")\n print(f\"Trajectory name: {self.trajectoryName}\")\n if self.model is not None:\n print(f\"Model: {self.model.name}\")\n if hasattr(self, \"nRuns\"):\n print(f\"Number of runs {self.nRuns}\")\n print(f\"Explored parameters: {self.exploreParameters.keys()}\")\n if hasattr(self, \"_t_end_exploration\") and hasattr(self, \"_t_start_exploration\"):\n print(f\"Duration of exploration: {self._t_end_exploration-self._t_start_exploration}\")", "def show(self): # pragma: no cover\n if self.data is None:\n raise AttributeError(\"The data must be deconvolved first !\")\n self.data.show()", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def info(self):\n print 'A= ', self.application\n print 'C= ', self.city\n print 'D= ', self.dataset.shape", "def show_df_by_tags(df, tags):\n return st.dataframe(filter_df(df, tags)) if not 'Expert' in df.columns else st.dataframe(filter_df(df, tags), height=150, width=450)" ]
[ "0.68191516", "0.65245867", "0.6203311", "0.61643773", "0.6033394", "0.59863055", "0.5783366", "0.57670814", "0.5765709", "0.5762325", "0.5701758", "0.5682693", "0.5662552", "0.56528604", "0.56318945", "0.5623627", "0.56216586", "0.5618758", "0.56068605", "0.55752116", "0.5569756", "0.5567077", "0.5566187", "0.5566033", "0.5563489", "0.55400544", "0.55265874", "0.55134547", "0.5500225", "0.5469364" ]
0.739165
0
parse the table containing personal info
def get_personal_info(self, table): json_result = {} row_list = table.xpath('./tr[position() > 1]') for row in row_list: row_key = row.xpath('./td[1]/b/text()') if row_key: row_key = row_key[0] else: raise ProfileException("Failed to get key of personal info") row_value = row.xpath('./td[2]/text()') if (len(row_value) != 0) & hasattr(row_value[0], 'strip'): row_value = row_value[0].strip() else: raise ProfileException("Failed to get value of personal info") json_result.update({row_key: row_value}) if json_result: return json_result else: raise ProfileException("Failed to get personal info table(row list is empty)")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseRow(self, row):\n\n data = {}\n for i in self.index:\n data[self.headers[i].value] = row[i].value\n # initialize parser\n username = re.findall('(\\w+)\\s(\\w+)', self.request.POST['user'])[0]\n self.user = User.objects.get(first_name=username[0], \n last_name=username[1]\n )\n self.parser(self.user, data)", "def parse_info_page(raw_page):\n student_id = etree.HTML(raw_page).xpath('//table[@style=\"width:100%; margin-top:30px;\"]/tr[3]/td[2]/text()')\n if student_id:\n return student_id[0].strip()\n else:\n raise ProfileException(\"Failed to get student id\")", "def parse(self):\n mp = {}\n cells = self.row.find_all(\"td\")\n\n for cell in cells:\n if \"visible-mobile\" in cell.attrs[\"class\"]:\n continue\n title = self._get_cell_title(cell)\n content = cell.find(\"span\", class_=\"table-responsive__inner\")\n\n if title == \"name\":\n mp.update(self._parse_name(cell, content))\n elif title == \"fraktion\":\n fraktion, klub = self._parse_abbreviation(content)\n mp[\"political_affiliation\"] = klub + \" (\" + fraktion + \")\"\n elif title == \"wahlkreis\":\n mp[\"wahlkreis\"] = content.text.strip()\n elif title == \"bundesland\":\n mp[\"state\"] = self._parse_abbreviation(content)[1]\n\n return mp", "def parse_results(table):\n results = []\n\n # FILL IN THE BLANK: Read each row from the table and append it to\n # `results` as a list. Be sure to also get the `href` value of the link\n # to the profile and include as the last element of each result list.\n\n return results", "def parse_gulde_people():\n # set url\n url = 'https://web-scraping-demo.zgulde.net/people' \n agent = 'codeup ds germain'\n # query\n response = requests.get(url, headers={'User-Agent': agent}) \n # soup\n soup = BeautifulSoup(response.text) \n # raw list of people\n people = soup.find_all('div', {'class':'person'}) \n # list of dicts for dataframe\n info_list = [] \n # parse each person\n for person in people: \n # grab name\n name = person.h2.text \n # grab more info\n quote, email, phone, address = person.find_all('p') \n # fix info\n quote, email, phone, address = quote.text.strip(), email.text, phone.text, address.text.strip()\n # set regex for address fix\n regexp = r'\\s{2,}' \n # fix address\n address = re.sub(regexp, ' ', address) \n # create dict\n person_dict = {'name':name, 'quote':quote, 'email':email, \n 'phone':phone, 'address':address} \n # add dict to list\n info_list.append(person_dict) \n # return dataframe\n return pd.DataFrame(info_list)", "def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]", "def parse(self):\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n url = CostOfLiving.URL.format(self.city)\r\n req = Request(url, headers=hdr)\r\n page = urlopen(req)\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n self.table = soup.find(\"table\", attrs={\"class\": \"data_wide_table\"})", "def parse_profile_list_page(page):\n page_tree = etree.HTML(page)\n profile_list = page_tree.xpath(\n '//table[@class=\"table_header\"]/tr[position() > 4 and position() < (last() - 3)]/td/node()[1]')\n if profile_list:\n profile_list[0] = \"main\"\n return [text.strip() for text in profile_list]\n else:\n raise ProfileException(\"Failed to get profile list\")", "def parse_table(soup, start_gen, end_gen):\n pokes = []\n for cell in soup.find_all(\"td\", attrs={'style': None}):\n for name in cell.find_all(\"a\"):\n pokes.append(name.string)\n\n start_index = pokes.index(GEN_STARTS_WITH[start_gen])\n end_index = pokes.index(GEN_ENDS_WITH[end_gen]) + 1\n\n # Doesn't have to be ordered, just personal preference.\n unique_list = OrderedSet(pokes[start_index:end_index])\n\n if start_gen != end_gen:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} to {end_gen} were fetched.\")\n else:\n print(f\"{len(unique_list)} Pokémon from gen {start_gen} were fetched.\")\n\n pkmn_string = ', '.join(unique_list)\n\n for key, value in NIDORAN_CASE.items():\n # Handling of Nidoran male/female symbols.\n pkmn_string = pkmn_string.replace(key, value)\n\n return pkmn_string", "def _parse_html_status(table):\n dfs = pandas.read_html(str(table))\n df = dfs[0]\n # moodle pads the end of the table with empty rows\n df = df.dropna(how='all', axis=0)\n\n cols = list(df.columns)\n\n mapping = {\n '.*First name.*Surname.*': 'Name',\n 'Username.*': 'Username',\n 'Status.*': 'Status',\n 'Grade.*': 'Grade',\n }\n\n for oldname, newname in mapping.items():\n for i, colname in enumerate(cols):\n cols[i] = re.sub(oldname, newname, colname)\n\n df.columns = cols\n df = df.set_index('Username')\n\n return df", "def read_personal_data(self):\n self._filename = self.input_filename()\n try:\n new_list = pd.read_csv(\n self._filename,\n sep=\"\\s+\",\n names=['index'] + self.columns,\n index_col=['index'],\n parse_dates=['birthday'],\n dtype={'id':'object', 'grade':'object'}\n )\n\n self.merge_list(new_list)\n except pd.errors.EmptyDataError as e:\n print(f'The file is empty [{e!r}].')", "def get_personal_info(line_objs):\n result = []\n start = True\n for line in line_objs:\n line_label = line.get('label')\n line_category = line.get('category')\n if line_label == 'title':\n if line_category == 'personal_info':\n start = True\n continue\n else:\n start = False\n if start:\n result.append(line)\n try:\n max_height = max([line.get('wh')[1] for line in result])\n except:\n max_height = max([line.get('wh')[1] for line in line_objs])\n track_candicate_name = False\n for line in result:\n height = line.get('wh')[1]\n if height == max_height and not track_candicate_name:\n for word in profile_words.keys():\n if word in line.get(\"text\"):\n continue\n line['label'] = 'candicate_name'\n track_candicate_name = True\n else:\n line['label'] = 'description'\n line['category'] = 'personal_info'\n return result", "def process_data(data):\n # Table Name from Json file\n table_name = data['table_name']\n\n # No of Column\n column_count = data['column_count']\n\n # No of Row\n row_count = data['row_count']\n\n # Table columns schema from Json file\n column_properties = data['column_properties']\n\n # Get the row row_properties\n row_properties = data['row_properties']\n return table_name, column_count, row_count, column_properties, row_properties", "def parse_sp500_wiki_page(html_page):\n soup = BeautifulSoup(html_page, 'html.parser')\n table = soup.find(\"table\", {\"id\": \"constituents\"})\n \n data = []\n column_names = [col_name.text.strip() for col_name in table.find_all('th')]\n for row in table.find_all('tr'):\n data_row = [col_name.text.strip() for col_name in row.find_all('td')]\n if data_row:\n data.append(data_row)\n return data, column_names", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def extractionTitlePrincipals(cur, conn):\n fh = open(pathTitlePrincipals)\n reader = csv.reader(fh, delimiter = '\\t')\n firstLine = True\n idActor_list = []\n idJugar = 1\n for row in reader:\n if firstLine : firstLine = False # Read header\n else :\n if (row[3]=='actor' or row[3]=='actress'): #only record actors\n idTitulo = int(row[0][2:])\n idActor = int(row[2][2:])\n idActor_list.append(idActor)\n idJugar +=1\n # print(jugarInsert.format(idJugar, idTitulo, idActor))\n # REGISTER DATA IN JUGAR TABLE\n cur.execute(jugarInsert.format(idJugar, idTitulo, idActor))\n conn.commit()\n return idActor_list", "def get_person(self, url: str) -> dict:\n person = self.read_html(url)\n\n return {\n # TODO: There's a better way of doing this.\n self._columns[0]: person.select_one(\"h1\").text.strip(),\n self._columns[1]: person.select_one(\".party-membership--party\").text,\n self._columns[2]: \"; \".join(\n [a.text for a in person.select('[href^=\"tel:\"]')]\n ),\n self._columns[3]: \"; \".join(\n [a.text for a in person.select(\".email-address a\")]\n ),\n self._columns[4]: \"; \".join(\n [a.text.strip() for a in person.select(\".contact-actions__twitter\")]\n ),\n }", "def parseTable(chart):\n rowelems = chart.find_all('tr')\n rows = [rowelem.find_all('td') for rowelem in rowelems]\n data = [[elem.get_text() for elem in row] for row in rows]\n return(data)", "def _parse_entry_table(self,obj,padding=0):\n data = ''\n\n for item in obj.data:\n line = self.parse(item,padding)\n tokens = line.split('\\n')\n tokens[0] = '%s [0x%x] {' % (tokens[0][:-1],int(item.offset))\n data += '\\n'.join(tokens) + '\\n'\n\n return data", "def getTableHead():\n return [\"Reporter\", \"Reportee\", \"aln. DKIM\", \"aln. SPF\", \"Disposition\",\n \"DKIM result\", \"SPF result\", \"msg#\", \"IP\", \"Country\",\n \"Report Begin\", \"Report End\", \"Report ID\"]", "def parse_ipac_table(table_file):\n file_lines = table_file.readlines()\n if len(file_lines) < 5:\n raise ValueError(\"No images found!\")\n \n columns = file_lines[0].replace(\"|\",\" \").split()\n \n # Each row in the table starting at index 4 is metadata for a new image / observation\n metadatas = []\n for image_data in file_lines[4:]:\n line = image_data.replace(\"|\",\" \").split()\n obsdate_idx = columns.index(\"obsdate\")\n tmp = line[obsdate_idx] + \" \" + line[obsdate_idx+1]\n del line[obsdate_idx+1]\n line[obsdate_idx] = tmp\n metadatas.append(dict(zip(columns, line)))\n \n return metadatas", "def parse_table(self, table_name):\n table_offset = self.catalog.get(table_name)\n if not table_offset:\n logging.error(f\"Could not find table {table_name} in DataBase\")\n return\n table_offset = table_offset * self.page_size\n table = self._tables_with_data.get(table_offset)\n if not table:\n table_def = self._table_defs.get(table_offset)\n if table_def:\n table = TableObj(offset=table_offset, val=table_def)\n logging.info(f\"Table {table_name} has no data\")\n else:\n logging.error(f\"Could not find table {table_name} offset {table_offset}\")\n return\n access_table = AccessTable(table, self.version, self.page_size, self._data_pages, self._table_defs)\n return access_table.parse()", "def lineup_user(self, userid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/standings.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/playerInfo.phtml?pid=' + userid, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n info = list()\r\n for i in soup.find_all('td', {'class': 'name_cont'}):\r\n info.append(i.text.strip())\r\n return info", "def get_table_metadata(self, table_data):\n table_id = table_data[\"id\"]\n game_server = table_data[\"gameserver\"]\n game_name = table_data[\"game_name\"]\n table_url = f\"{self.base_url}/{game_server}/{game_name}?table={table_id}\"\n resp = self.fetch(table_url)\n game_progress_match = re.search('updateGameProgression\\\":([\\d]+)}', resp)\n if game_progress_match:\n game_progress = game_progress_match[1]\n else:\n game_progress = \"\"\n num_moves_match = re.search('move_nbr\":\"([^\"]*)\"', resp)\n if num_moves_match:\n num_moves = num_moves_match[1]\n else:\n num_moves = \"\"\n current_player_match = re.search('active_player\":\"([^\"]*)\"', resp)\n if current_player_match:\n current_player = current_player_match[1]\n else:\n current_player = \"\"\n return game_progress, num_moves, current_player, table_url", "def _parse_result_page(self, page):\n items = []\n table = list(page.findall(\".//table[@id='browse']\"))[0]\n for row in (x for x in list(table.findall('tr'))[1:]\n if len(x.getchildren()) != 1):\n item = self._parse_item_row(row)\n items.append(item)\n return items", "def current_person(self):\n d = self.people_table_data[self.row_i]\n\n # \"fullname\", \"lunaid\", \"age\", \"dob\", \"sex\", \"lastvisit\", \"maxdrop\", \"studies\",\n info = dict(zip(self.person_columns, d))\n info[\"pid\"] = d[8] # pid not shown\n\n # dont get fname and lname from table\n # could word split, but need to be accurate at least for edit module\n if self.sql:\n res = self.sql.query.get_name(pid=info[\"pid\"])\n info[\"fname\"] = res[0][0]\n info[\"lname\"] = res[0][1]\n return info\n # # main model\n # self.checkin_button.setEnabled(False)\n # print('people table: subject selected: %s' % d[8])\n # self.render_person(pid=d[8], fullname=d[0], age=d[2],\n # sex=d[4], lunaid=d[1])\n # self.render_schedule(ScheduleFrom.PERSON)", "def get_table_info(line):\n\n COMMENT_EXPR = '-- Name: '\n TYPE_EXPR = '; Type: '\n SCHEMA_EXPR = '; Schema: '\n OWNER_EXPR = '; Owner: '\n TABLESPACE_EXPR = '; Tablespace: '\n\n temp = line.strip('\\n')\n type_start = get_all_occurrences(TYPE_EXPR, temp)\n schema_start = get_all_occurrences(SCHEMA_EXPR, temp)\n owner_start = get_all_occurrences(OWNER_EXPR, temp)\n tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)\n if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:\n return (None, None, None, None)\n name = temp[len(COMMENT_EXPR) : type_start[0]]\n type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]\n schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]\n if not tblspace_start:\n tblspace_start.append(None)\n owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]\n return (name, type, schema, owner)", "def _parse_table(text):\n\n text = str(text)\n try:\n text = text.split(\"<pre>\")[1]\n text = text.split(\"</pre>\")[0]\n text = text.split(\"To save this output\")[0]\n lines = text.split(\"\\n\")\n except Exception as exc:\n raise NNDCRequestError(f\"Unable to parse text:\\n{exc}\\n{text}\")\n table = {}\n headers = None\n for line in lines:\n tokens = line.split(\"\\t\")\n tokens = [t.strip() for t in tokens]\n if len(tokens) <= 1:\n continue\n if headers is None:\n headers = tokens\n headers = _parse_headers(headers)\n for header in headers:\n table[header] = []\n else:\n if len(tokens) != len(headers):\n raise NNDCRequestError(\n \"Too few data in table row\\n\"\n + f' Headers: \"{headers}\"\\n'\n + f' Row: \"{tokens}\"'\n )\n for header, token in zip(headers, tokens):\n table[header].append(token)\n return table", "def _parse_table(res, key_index, value_index):\n data = OrderedDict()\n for sel in res.xpath('//tr'):\n columns = sel.xpath('td')\n if len(columns) == value_index+1:\n key = ''.join(columns[key_index].xpath('.//text()').extract())\n key = base.helpers.slugify(key.strip())\n value = ''.join(columns[value_index].xpath('.//text()').extract())\n value = value.strip()\n if key and value:\n data[key] = value\n return data", "def _parse_name(self, cell, cell_content):\n mp_page = cell_content.find(\"a\").attrs[\"href\"]\n\n full_name = cell_content.text.strip()\n name, *title = full_name.split(\",\")\n last, *first = name.split(\" \")\n\n id_ = mp_page[mp_page.find(\"PAD_\") + 4 : mp_page.rfind(\"/\")]\n url = re.sub(\"index.shtml$\", \"\", mp_page)\n\n first_name = \" \".join(first).rstrip(\",\").strip()\n last_name = last.strip()\n title = \",\".join(title).strip()\n\n return {\n \"id\": id_,\n \"url\": url,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"title\": title,\n }" ]
[ "0.59491", "0.5947452", "0.58929724", "0.5722304", "0.5655687", "0.5653452", "0.5643614", "0.56298906", "0.5590057", "0.5525687", "0.55215514", "0.5480727", "0.5425067", "0.5399843", "0.53760237", "0.5370106", "0.5361874", "0.53501594", "0.53389573", "0.5324406", "0.5318426", "0.5316898", "0.52961683", "0.52817994", "0.5276599", "0.5266132", "0.5258646", "0.523449", "0.523047", "0.5230427" ]
0.68037164
0
parse the table containing the course table
def get_course_table(self, table): json_result = {} row_list = table.xpath('.//table[@id = "s_course"]/tr[position() > 1]') for row in row_list: session = row.xpath('./td[1]/text()') course_full_code_list = row.xpath('.//a[starts-with(@href, "javascript:course_popup")]/text()') course_name_list = row.xpath('.//font[@style = "font-size:7pt;"]/text()') course_list = [] if len(course_full_code_list) != len(course_name_list): # year course design project would be count twice if ("Design Project" == course_name_list[0]) & \ (len(course_full_code_list) + 1 == len(course_name_list)): course_name_list = course_name_list[1:] else: raise ProfileException( "Error: unmatched lists. course code list:", course_full_code_list, "\n course name list:", course_name_list) for i, full_code in enumerate(course_full_code_list): if re.match(re.compile('\w{3}\d{3}[YH]1\s+[SFY]'), full_code) is None: raise ProfileException("Illegal course code!:" + full_code) course_list.append({ "courseName": course_name_list[i], "courseCode": full_code[0:6], "courseTime": full_code[-1], "courseLength": full_code[6:8] }) # there is a empty session if session: json_result.update({session[0]: course_list}) if json_result: return json_result else: raise ProfileException("Failed to get course_table table(row list is empty)")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self):\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n url = CostOfLiving.URL.format(self.city)\r\n req = Request(url, headers=hdr)\r\n page = urlopen(req)\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n self.table = soup.find(\"table\", attrs={\"class\": \"data_wide_table\"})", "def _parse_table(value):\n lines = value.split('\\n')\n header = None\n rows = []\n\n for l in lines:\n if l.startswith('+-'):\n pass\n elif l.startswith('|'):\n columns = [c.strip() for c in l.split('|')[1:-1]]\n if header is None:\n header = columns\n else:\n row = {}\n for i, c in enumerate(columns):\n if len(header)-1 <= i:\n row[i] = c\n else:\n row[header[i]] = c\n rows.append(row)\n return rows", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def _parse(self, course: NavigableString) -> ParseType:\n\n info = {\"link\": \"\", \"icon\": \"\", \"title\": \"\", \"description\": \"\",\n \"counts\": {}}\n\n info[\"link\"] = course.a[\"href\"]\n info[\"icon\"] = course.a.img[\"src\"]\n\n description: NavigableString = course.a.div\n info[\"title\"] = description.div.get_text()\n info[\"description\"] = description.p.get_text()\n\n counts: NavigableString = course.find(\"div\", {\"class\": \"courseCounts\"})\n counts_data: ResultSet = counts.find_all(\"li\")\n for data in counts_data:\n name: str = data.span.get_text().lower()\n val: str = data.find(\"p\").get_text()\n info[\"counts\"][name] = int(val.replace(\",\", \"\"))\n\n return info", "def parseSection(self, response):\n sel = Selector(response)\n sections = sel.xpath('//table[@class=\"sections responsive\"]//tr[not(@class=\"headers\")]')\n for s in sections:\n item = CourseItem(response.request.meta[\"item\"])\n item['section'] = s.xpath('@data-section-id').get().strip()\n item['instructors'] = s.css('.instructor::text').get()\n if item['instructors'] != None:\n item['instructors'].strip()\n item['instructors'] = [x.strip() for x in re.split(',', item['instructors'])]\n item['syllabus'] = s.css('.syllabus a::attr(href)').get()\n if item['syllabus'] != None:\n item['syllabus'].strip()\n return item\n \n\n \"\"\"\n Ignore the code below this. I was trying to get\n the times, days, and number registered from the class sections\n \"\"\"\n #times = s.xpath('//td[@class=\"time\"]/text()').get().strip()\n #times = re.split('-', times)\n #starttime = times[0]\n #endtime = times[1]\n #endt = dt.datetime.strptime(endtime, '%H:%M%p')\n # TODO: Check if \"am\"/\"pm\" from endt, & if endt hour is greater/less than startt \n #startt = dt.datetime.strptime(starttime, '%H:%M')\n #days = s.xpath('//td[@class=\"days\"]/text()').get().strip()\n #days = re.split(',', days)\n #numdays = len(days]\n \n #cap = s.xpath('//td[@class=\"registered\"]//a/text()').get().strip()\n #cap = re.split(' of ', cap.strip())\n #item['capacity'] = cap[1]", "def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df", "def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})", "def parse_datafile(datafile):\n lines = datafile.readlines()[1:] # ignore first line\n datafile.close()\n\n course_list = []\n for i, line in enumerate(lines):\n line = line.strip()\n if not line or line[0] in ('#', '-', '_'): # ignore lines that begin with '#', '-' or '_'\n continue\n cells = line.split('|')\n try:\n # put stripped content of each cell in corresponding variable\n full_semester, cid, creditpts, name, grade = tuple(map(lambda cell: cell.strip(), cells))\n semester = full_semester[-1]\n year = full_semester[:-1]\n if grade != '???':\n course_list.append(\n Course(semester, int(year), cid, int(creditpts), name, int(grade)))\n except:\n print(ParseError(path.basename(datafile.name)))\n exit(1)\n\n return course_list", "def table_parsing(self):\n table_count=0\n if self.table: \n for tebil in self.table:\n json_list=[]\n try:\n table_caption = wtp.parse(str(tebil)).tables[0].caption\n table_folder_name=remove_markup(str(table_caption))\n table_folder_name=table_folder_name.lower()\n table_folder_name=table_folder_name.strip()\n except Exception as e:\n print('Exception: table folder name or out of list in table', str(e))\n continue \n if table_caption:\n try:\n self.revision_page_folder_path=os.path.join(self.rd_folder_path_table,self.page_folder)\n if not os.path.exists(self.revision_page_folder_path):\n os.mkdir(self.revision_page_folder_path)\n table_folder_name=table_folder_name.strip('\\n')\n revision_table_folder_path=os.path.join(self.revision_page_folder_path,table_folder_name)\n revision_table_folder_path=revision_table_folder_path.strip()\n if not os.path.exists(revision_table_folder_path):\n os.mkdir(revision_table_folder_path)\n except Exception as e:\n print('Exception: revision table folder', str(e))\n continue\n table_count=table_count+1\n json_list.append(str(tebil))\n json.dump(json_list, open(os.path.join(revision_table_folder_path, self.revision_id_parent + '_' + self.revision_id_current + \".json\"), \"w\"))\n print('Table caption: ', table_folder_name)\n table_count=table_count+1 \n return table_count", "def _parse_table(text):\n\n text = str(text)\n try:\n text = text.split(\"<pre>\")[1]\n text = text.split(\"</pre>\")[0]\n text = text.split(\"To save this output\")[0]\n lines = text.split(\"\\n\")\n except Exception as exc:\n raise NNDCRequestError(f\"Unable to parse text:\\n{exc}\\n{text}\")\n table = {}\n headers = None\n for line in lines:\n tokens = line.split(\"\\t\")\n tokens = [t.strip() for t in tokens]\n if len(tokens) <= 1:\n continue\n if headers is None:\n headers = tokens\n headers = _parse_headers(headers)\n for header in headers:\n table[header] = []\n else:\n if len(tokens) != len(headers):\n raise NNDCRequestError(\n \"Too few data in table row\\n\"\n + f' Headers: \"{headers}\"\\n'\n + f' Row: \"{tokens}\"'\n )\n for header, token in zip(headers, tokens):\n table[header].append(token)\n return table", "def parseTable(chart):\n rowelems = chart.find_all('tr')\n rows = [rowelem.find_all('td') for rowelem in rowelems]\n data = [[elem.get_text() for elem in row] for row in rows]\n return(data)", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def isccp4table(self, text):\n #\n # See e.g. http://www.ccp4.ac.uk/dist/html/loggraphformat.html\n # for format of TABLES\n #\n # Note that this regular expression accommodates slight deviations\n # by making the \"closing\" \":\" of the $TABLE line optional.\n # This is done for consistency with loggraph's behaviour.\n #\n # Set up regular expression for entire table\n # This is the \"strict\" form of the table\n table = self.compile(\n \"isccp4table\",\n r\" *\\$TABLE ?:([^:]*):?[ \\n]+\\$(GRAPHS|SCATTER)[^:]*(:[^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$\",\n ).search(text)\n result = dict()\n if table:\n result[\"rawtable\"] = table.group(0)\n result[\"title\"] = table.group(1).strip()\n result[\"type\"] = table.group(2).strip()\n result[\"graphs\"] = table.group(3)\n result[\"columns\"] = table.group(4)\n result[\"text\"] = table.group(5)\n result[\"data\"] = table.group(6)\n result[\"nlines\"] = table.group(0).count(\"\\n\")\n return result\n # If there wasn't a match then try a simpler match\n # This relaxes some of the rules in the format definintion\n table = self.compile(\n \"isccp4simplertable\",\n r\" *\\$TABLE ?:([^\\n]*)\\n+\\$(GRAPHS|SCATTER)[^:]*(:[^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$\",\n ).search(text)\n if table:\n result[\"rawtable\"] = table.group(0)\n result[\"title\"] = table.group(1).strip()\n result[\"type\"] = table.group(2).strip()\n result[\"graphs\"] = table.group(3)\n result[\"columns\"] = table.group(4)\n result[\"text\"] = table.group(5)\n result[\"data\"] = table.group(6)\n result[\"nlines\"] = table.group(0).count(\"\\n\")\n return result\n return result", "def _createCoursesTable(self):\n\t\tcommand = \"\"\"CREATE TABLE courses (ID INTEGER PRIMARY KEY,\n\t\t\tname TEXT,\n\t\t\tauthor_id INTEGER,\n\t\t\tdescription TEXT\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)", "def get_semester_course_data(self, url, semester):\n print(f\"Obtaining and indexing information for {semester}\")\n html = self.call_url_and_get_html_object(url)\n tables = html.findall(\".//table[@class='courseListing basicTable courseListingSetWidths']\")\n\n # Parse html to get course offering data\n for table in tables:\n fields = table.findall(\".//td\")\n spans = table.findall(\".//span\")\n course_number = str(spans[1].text.strip())\n title = str(fields[4].text).strip()\n professor = str(fields[6].text).strip()\n status = str(fields[0].text)\n crn = str(fields[1].text)\n\n # Add course offering data to dictionary of course classes\n if course_number not in self.course_dict.keys():\n # If course doesn't already exist in dictionary keys, instantiate class of it\n self.course_dict[course_number] = Course(\n title=title, semester=semester,\n professor=professor, crn=crn, status=status)\n else:\n self.course_dict[course_number].add_instance_of_course(\n semester, professor, crn, status)", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def parse_dept(url):\n\n x = lxml.html.parse(url)\n course_roots = x.xpath('//*[@class=\"course\"]')\n\n subcourse_list = []\n\n for course_root in course_roots:\n\n day_elems = course_root.xpath('div//*[@class=\"mtgpat\"]')\n time_elems = course_root.xpath('div//*[@class=\"mtg_time\"]')\n\n for j in range(0, len(day_elems)):\n\n if day_elems[j].text == u'\\xa0' or 'TBA' in day_elems[j].text:\n day_list = []\n else:\n day_list = convert_day(day_elems[j].text)\n if time_elems[j].text == u'\\xa0' or 'TBA' in time_elems[j].text:\n hour_list = []\n else:\n hour_list = convert_time(time_elems[j].text)\n\n subcourse = [day_list, hour_list]\n subcourse_list.append(subcourse)\n\n return subcourse_list", "def parse_table(table):\n rows = table.find_all('tr')\n if not rows:\n raise ValueError(\"No rows for table\")\n pages = []\n table_tag = \"<table>\"\n tbl_headers = get_tbl_headers(rows)\n table_tag += \"<tr>\"\n for header in tbl_headers.keys():\n table_tag += conf.ADD_TH_TAG(header)\n table_tag += \"</tr>\"\n for row in rows:\n cols = row.find_all('td')\n if not cols:\n continue\n for page_name in cols[0].find_all('a'):\n if not page_name:\n continue\n pages.append(page_name.text)\n table_tag += '<tr>'\n for header, col in tbl_headers.items():\n try:\n table_tag += f\"<td>{preprocess_data(f'{header} : {cols[col].text}')} \\t</td>\"\n except IndexError:\n pass\n table_tag += '</tr>'\n table_tag += '</table>'\n if conf.DOWNLOAD_IMAGES:\n download_images(pages)\n return table_tag", "def parse_table(self, table_name):\n table_offset = self.catalog.get(table_name)\n if not table_offset:\n logging.error(f\"Could not find table {table_name} in DataBase\")\n return\n table_offset = table_offset * self.page_size\n table = self._tables_with_data.get(table_offset)\n if not table:\n table_def = self._table_defs.get(table_offset)\n if table_def:\n table = TableObj(offset=table_offset, val=table_def)\n logging.info(f\"Table {table_name} has no data\")\n else:\n logging.error(f\"Could not find table {table_name} offset {table_offset}\")\n return\n access_table = AccessTable(table, self.version, self.page_size, self._data_pages, self._table_defs)\n return access_table.parse()", "def preprocess_courses_corpus():\n soup = None\n with open('courses_corpus.html', 'r') as infile:\n content = infile.read()\n\n soup = BeautifulSoup(content, 'html.parser')\n\n docid = 0\n data = {}\n data['documents'] = []\n\n main_table = soup.find_all(\"div\", attrs={'class': 'courseblock'})\n for course in main_table:\n docid += 1\n title = course.find_all('p', attrs={'class':'courseblocktitle noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblocktitle noindent'}))!=0 else ''\n description = (course.find_all('p', attrs={'class':'courseblockdesc noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblockdesc noindent'}))!=0 else '') + ' ' + (course.find_all('p', attrs={'class':'courseblockextra noindent'})[0].text if len(course.find_all('p', attrs={'class':'courseblockextra noindent'}))!=0 else '')\n\n data['documents'].append({\n 'docId' : docid,\n 'title' : title.strip(),\n 'description' : description.strip()\n })\n\n with open('courses_data.json', 'w') as outfile:\n json.dump(data, outfile)", "def parse(self):\n for index in range(len(self.columns)):\n if index in self.columns:\n self.parsed_table[self.columns[index].col_name_str] = []\n if not self.table.linked_pages:\n return self.create_empty_table()\n for data_chunk in self.table.linked_pages:\n original_data = data_chunk\n parsed_data = parse_data_page_header(original_data, version=self.version)\n\n last_offset = None\n for rec_offset in parsed_data.record_offsets:\n # Deleted row - Just skip it\n if rec_offset & 0x8000:\n last_offset = rec_offset & 0xfff\n continue\n # Overflow page\n if rec_offset & 0x4000:\n # overflow ptr is 4 bits flags, 12 bits ptr\n rec_ptr_offset = rec_offset & 0xfff\n # update last pointer to pointer without flags\n last_offset = rec_ptr_offset\n # The ptr is the offset in the current data page. we get a 4 byte record_pointer from that\n overflow_rec_ptr = original_data[rec_ptr_offset:rec_ptr_offset + 4]\n overflow_rec_ptr = struct.unpack(\"<I\", overflow_rec_ptr)[0]\n record = self._get_overflow_record(overflow_rec_ptr)\n if record:\n self._parse_row(record)\n continue\n # First record is actually the last one - from offset until the end of the data\n if not last_offset:\n record = original_data[rec_offset:]\n else:\n record = original_data[rec_offset:last_offset]\n last_offset = rec_offset\n if record:\n self._parse_row(record)\n return self.parsed_table", "def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail", "def build_courses_from_rows(self, rowset):\n class_id_re = re.compile(\"[A-Z]+&* [0-9]+\")\n courses = []\n course_learning_outcomes = []\n for row in rowset:\n if not class_id_re.fullmatch(row[0].strip()):\n continue\n # If credit is numeric assign it to lower and upper credit bound\n # Otherwise, split the credit range and assign\n try:\n lowercb = float(row[2])\n uppercb = float(row[2])\n except ValueError:\n if \"-\" in row[2]:\n bounds = row[2].split(\"-\")\n lowercb = float(bounds[0])\n uppercb = float(bounds[1])\n else:\n lowercb = None\n uppercb = None\n \n course = models.Course(id=row[0].strip(),\n label=row[1].strip(\" or\"),\n lower_credit_bound=lowercb,\n upper_credit_bound=uppercb)\n course.save()\n\n outcome_string = row[3]\n clo_content = re.findall(\"[0-9]+\", outcome_string)\n for outcome in clo_content:\n core_learning_outcome = models.CoreLearningOutcome.objects.get(\n id=int(\n outcome))\n try:\n models.CourseLearningOutcome.objects.get(\n course=course,\n learning_outcome=core_learning_outcome)\n break\n except models.CourseLearningOutcome.DoesNotExist:\n course_learning_outcome = models.CourseLearningOutcome(\n course=course,\n learning_outcome=core_learning_outcome)\n course_learning_outcome.save()\n \n return (courses, course_learning_outcomes)", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def table_parser(table_files, study, outdir, timepoint=None, dtype=\"wide\",\n auto_type=False):\n # Welcome\n print(\"Starting tables parsing...\")\n\n # Check inputs\n if dtype not in (\"wide\", \"long\"):\n raise ValueError(\"Unexpected data type '{0}'.\".format(dtype))\n\n # Parse all the tables\n tables = []\n with progressbar.ProgressBar(max_value=len(table_files),\n redirect_stdout=True) as bar:\n for cnt, path in enumerate(table_files):\n\n # Open the TSV table\n with open(path, \"rt\") as open_file:\n raw_table = open_file.readlines()\n header = raw_table[0].rstrip(\"\\n\").split(\"\\t\")\n table_content = []\n for row in raw_table[1:]:\n row = row.rstrip(\"\\n\").split(\"\\t\")\n if auto_type:\n raise NotImplementedError(\n \"The automatic typing of columns has not been yet \"\n \"implemented.\")\n table_content.append(row)\n\n # Generate the final structure\n table = {}\n qname = os.path.basename(path).replace(\".tsv\", \"\")\n center = DEFAULT_CENTER\n if timepoint is None:\n timepoint = DEFAULT_TIMEPOINT\n for row_cnt, row in enumerate(table_content):\n assessment_id = \"{0}_q{1}_{2}\".format(\n study.lower(), qname, timepoint)\n subject = row[0].replace(\"sub-\", \"\")\n if dtype == \"wide\":\n assessment_id = \"{0}_{1}\".format(\n assessment_id, row_cnt + 1)\n assessment_id = \"{0}_{1}\".format(assessment_id, subject)\n\n # Create assessment structure\n assessment_struct = {\n \"identifier\": assessment_id,\n \"timepoint\": timepoint}\n\n # Build the subject questionnaires structure for this timepoint\n subj_questionnaires = {\n \"Questionnaires\": OrderedDict(),\n \"Assessment\": assessment_struct\n }\n\n # Fill the questionnaire structure\n qdata = OrderedDict()\n for question, answer in zip(header, row):\n question = question.decode(\"utf-8\", \"ignore\").encode(\n \"utf-8\")\n answer = answer.decode(\"utf-8\", \"ignore\").encode(\"utf-8\")\n qdata[question] = answer\n subj_questionnaires[\"Questionnaires\"][qname] = qdata\n\n # Add this questionnaire to the patient data\n if center not in table:\n table[center] = {}\n if subject not in table[center]:\n table[center][subject] = []\n table[center][subject].append(subj_questionnaires)\n\n # Saving result\n save_parsing(table, outdir, study, \"tables-{0}\".format(qname))\n tables.extend(glob.glob(\n os.path.join(outdir, \"tables-{0}*.json\".format(qname))))\n\n # Update progress bar\n bar.update(cnt)\n\n # Goodbye\n print(\"Done.\")\n\n return tables", "def parse_courses():\n\n subjects = collections.OrderedDict()\n name = '' # the most recent course name acronym (ex. 'COMP')\n\n courses = re.sub(r'\\([^)]*\\)', '', COURSES).split() # Remove parens and their contents\n\n for course in courses:\n if course == 'OR':\n continue\n\n if course[0].isalpha():\n\n index = 0 # the upper bound character index of the subject name\n for char in course:\n if char.isalpha():\n index += 1\n else:\n break\n\n name = course[:index]\n number = course[index:index+4]\n else:\n number = course[:4]\n\n try:\n subjects[name].append(number)\n except KeyError:\n subjects[name] = [number]\n\n return subjects", "def parse_course(browser, college, course_node):\n # open the course details\n course_handle = course_node.find_element_by_class_name('course')\n course_handle.click()\n wait_for_load(browser)\n\n title = course_node.find_element_by_class_name('courseID').text\n m = re.match(r'([A-Z&]+) *(\\d+)', title)\n if not m:\n logging.warning('Unable to parse title: %s', title)\n return\n\n dept = m.group(1)\n code = m.group(2)\n name = titlecase.titlecase(\n course_node.find_element_by_class_name('courseTitle').text)\n creds = float(course_node.find_element_by_class_name('courseCredits').text)\n tags = parse_tags(course_node)\n prerequisites = parse_prerequisites(course_node)\n\n # close the course details\n course_handle.click()\n wait_for_load(browser)\n\n return Course(\n titlecase.titlecase(college), dept, code, name, creds, tags,\n prerequisites)", "def make_datatable(self):\r\n\r\n data = []\r\n\r\n for course in self.get_courses():\r\n gdir = course.id.course\r\n data.append([course.display_name, course.id.to_deprecated_string()]\r\n + self.git_info_for_course(gdir))\r\n\r\n return dict(header=[_('Course Name'), _('Directory/ID'),\r\n _('Git Commit'), _('Last Change'),\r\n _('Last Editor')],\r\n title=_('Information about all courses'),\r\n data=data)", "def _parse_table(res, key_index, value_index):\n data = OrderedDict()\n for sel in res.xpath('//tr'):\n columns = sel.xpath('td')\n if len(columns) == value_index+1:\n key = ''.join(columns[key_index].xpath('.//text()').extract())\n key = base.helpers.slugify(key.strip())\n value = ''.join(columns[value_index].xpath('.//text()').extract())\n value = value.strip()\n if key and value:\n data[key] = value\n return data", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab" ]
[ "0.59884536", "0.5907242", "0.5891754", "0.5856169", "0.58113074", "0.57813877", "0.57237667", "0.56541145", "0.56348234", "0.5606098", "0.56006235", "0.55713445", "0.55658233", "0.55501825", "0.5547375", "0.55394775", "0.55043435", "0.54974276", "0.54566413", "0.5454764", "0.5451167", "0.5436621", "0.54283136", "0.5403054", "0.5398238", "0.5360706", "0.53232217", "0.5280274", "0.52676976", "0.5259653" ]
0.67425555
0
Get a selection of integers for syscalls, by using the C preprocessor. This relies on the system headers defining the numbers as macros.
def get_syscall_numbers(fns): import subprocess p = subprocess.Popen(["cc", "-E", "-"], stdin=subprocess.PIPE, stdout=subprocess.PIPE) p.stdin.write("#include <sys/syscall.h>\n".encode()) for fn in fns: p.stdin.write(("SYS_%s\n" % fn).encode()) p.stdin.close() lns = list(p.stdout) ns = [int(x) for x in lns[-len(fns):]] return ns
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def host_arch_cc():\n\n k = cc_macros()\n\n matchup = {\n '__x86_64__' : 'x64',\n '__i386__' : 'ia32',\n '__arm__' : 'arm',\n }\n\n rtn = 'ia32' # default\n\n for i in matchup:\n if i in k and k[i] != '0':\n rtn = matchup[i]\n break\n\n return rtn", "def host_arch_cc():\n\n k = cc_macros()\n\n matchup = {\n '__x86_64__': 'x64',\n '__i386__': 'ia32',\n '__arm__': 'arm',\n }\n\n rtn = 'ia32' # default\n\n for i in matchup:\n if i in k and k[i] != '0':\n rtn = matchup[i]\n break\n\n return rtn", "def get_socket_ids() -> List[int]:\n socket_id_list = []\n for cpu_id in cpu_ids():\n api_file = open('/sys/devices/system/cpu/cpu' + str(cpu_id) + '/topology/physical_package_id')\n socket_id_list.append(int(api_file.readline().strip()))\n return list(set(socket_id_list))", "def cc_macros():\n\n try:\n p = subprocess.Popen(shlex.split(CC) + ['-dM', '-E', '-'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except OSError:\n print('''Configure error: No acceptable C compiler found!\n\n Please make sure you have a C compiler installed on your system and/or\n consider adjusting the CC environment variable if you installed\n it in a non-standard prefix.\n ''')\n sys.exit()\n\n p.stdin.write('\\n')\n out = p.communicate()[0]\n\n out = str(out).split('\\n')\n\n k = {}\n for line in out:\n lst = shlex.split(line)\n if len(lst) > 2:\n key = lst[1]\n val = lst[2]\n k[key] = val\n return k", "def cc_macros():\n\n try:\n p = subprocess.Popen(shlex.split(CC) + ['-dM', '-E', '-'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except OSError:\n print('''Configure error: No acceptable C compiler found!\n\n Please make sure you have a C compiler installed on your system and/or\n consider adjusting the CC environment variable if you installed\n it in a non-standard prefix.\n ''')\n sys.exit()\n\n p.stdin.write('\\n')\n out = p.communicate()[0]\n\n out = str(out).split('\\n')\n\n k = {}\n for line in out:\n lst = shlex.split(line)\n if len(lst) > 2:\n key = lst[1]\n val = lst[2]\n k[key] = val\n return k", "def cpu_ids() -> List[int]:\n api_file = open('/sys/devices/system/cpu/present', 'r')\n\n cpu_id_tmp = re.findall('\\d+|-', api_file.readline().strip())\n cpu_id_list = []\n for i in range(len(cpu_id_tmp)):\n if cpu_id_tmp[i] == '-':\n for cpu_id in range(int(cpu_id_tmp[i - 1]) + 1, int(cpu_id_tmp[i + 1])):\n cpu_id_list.append(int(cpu_id))\n else:\n cpu_id_list.append(int(cpu_id_tmp[i]))\n return cpu_id_list", "def get_cpu_compiler_arguments():\n return ['-I', str(_get_hoomd_include_path()), '-O3']", "def get_arg_select(arg_list):\n\n arg_select_string = ''\n argument_list = (itypes.rs1, itypes.rs2, itypes.rd, itypes.rm,\n itypes.imm12lo, itypes.imm12hi, itypes.imm12,\n itypes.imm20, itypes.shamt,\n itypes.shamtw)\n arg_select_list = ['0' for i in range(len(argument_list))]\n\n for i in range(len(arg_list)):\n for j in range(len(argument_list)):\n if arg_list[i] == argument_list[j]:\n if arg_select_list[j] == '1':\n pass\n else:\n arg_select_list[j] = '1'\n\n else:\n if arg_select_list[j] == '1':\n pass\n else:\n arg_select_list[j] = '0'\n arg_select_string += arg_select_list[j]\n\n return Signal(intbv(int(arg_select_string[-10:], 2))[10:])", "def _node_macros(self, node):\n return {\n '{$CPU_COUNT}': str(node.cpu),\n }", "def get_ops():\n li = [\"EOF\",\"ADD\",\"SUB\",\"MUL\",\"DIV\",\"POW\",\"BITAND\",\"BITOR\",\"CMP\",\"GET\", \\\n \"SET\",\"NUMBER\",\"STRING\",\"GGET\",\"GSET\",\"MOVE\",\"DEF\",\"PASS\", \\\n \"JUMP\",\"CALL\",\"RETURN\",\"IF\",\"DEBUG\",\"EQ\",\"LE\",\"LT\",\"DICT\", \\\n \"LIST\",\"NONE\",\"LEN\",\"LINE\",\"PARAMS\",\"IGET\",\"FILE\",\"NAME\", \\\n \"NE\",\"HAS\",\"RAISE\",\"SETJMP\",\"MOD\",\"LSH\",\"RSH\",\"ITER\",\"DEL\", \\\n \"REGS\",\"BITXOR\", \"IFN\", \"NOT\", \"BITNOT\"]\n dic = {}\n for i in li:\n dic[i] = li.index(i)\n return dic", "def find_ioctls(p, dispatch_addr):\n\t\n\timport pyvex\n\timport simuvex\n\timport claripy\n\ts = p.factory.blank_state(addr=dispatch_addr)\n\tpg = p.factory.path_group(s)\n\n\tgeneric_reg_vals = set()\n\tval_addr = {}\n\tsteps = 0\n\twhile len(pg.active) > 0 and steps < 25:\n\t\tfor i in pg.active:\n\t\t\t\tif not idaapi.isLoaded(i.addr):\n\t\t\t\t\tprint('Non mapped value for addr: {}'.format(hex(i.addr)))\n\t\t\t\t\tcontinue\n\t\t\t\tprint('step: {}, addr: {}'.format(steps, hex(i.addr)))\n\t\t\t\tfor reg in i.state.arch.default_symbolic_registers:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tval = i.state.se.eval(getattr(i.state.regs, reg))\n\t\t\t\t\t\t#Always use first occurrence\n\t\t\t\t\t\tgeneric_reg_vals.add(val)\n\t\t\t\t\t\tif val not in val_addr:\n\t\t\t\t\t\t\tval_addr[val] = i.addr\n\t\t\t\t\texcept simuvex.SimUnsatError:\n\t\t\t\t\t\tprint(\"failed to get {}\".format(reg))\n\t\t\t\t\texcept claripy.errors.ClaripyZeroDivisionError:\n\t\t\t\t\t\tprint(\"failed to get {}\".format(reg))\n\t\tpg.step()\n\t\tsteps += 1\n\tdevice_codes = {}\n\t\t\n\tgeneric_reg_vals = filter(lambda x: 0xfff0 > ((x >> 16) & 0xffff) > 0x10, generic_reg_vals)\n\tfor i in generic_reg_vals:\n\t\ttry:\n\t\t\tdevice_codes[((i >> 16) & 0xffff)] += 1\n\t\texcept KeyError:\n\t\t\tdevice_codes[((i >> 16) & 0xffff)] = 1\n\n\tif len(device_codes.keys()) == 0:\n\t\treturn []\n\tprint('potential device codes: {}'.format(device_codes))\n\tlikely_device_code = max(device_codes, key=device_codes.get)\n\tprint \"Likely device code: 0x%X\" % (likely_device_code,)\n\t\n\tout = []\n\tfor i in generic_reg_vals:\n\t\taddr = val_addr[i]\n\t\tif (i >> 16) & 0xffff == likely_device_code:\n\t\t\tout.append((addr, i))\n\treturn out", "def __gettid():\r\n try:\r\n import platform\r\n if not platform.system().startswith('Linux'):\r\n raise ValueError\r\n syscalls = {\r\n 'i386': 224, # unistd_32.h: #define __NR_gettid 224\r\n 'x86_64': 186, # unistd_64.h: #define __NR_gettid 186\r\n }\r\n import ctypes\r\n tid = ctypes.CDLL('libc.so.6').syscall(syscalls[platform.machine()])\r\n except:\r\n tid = -1\r\n return tid", "def determineNumberOfCPUs():\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError,NotImplementedError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError,ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],\n stdout=subprocess.PIPE)\n scStdout = sysctl.communicate()[0]\n res = int(scStdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudoDevices = os.listdir('/devices/pseudo/')\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudoDevices:\n if expr.match(pd) != None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesgProcess.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def get_equations(system: str):\r\n if system == S_OPEN_2:\r\n return open2\r\n elif system == S_OPEN_1:\r\n return open1\r\n elif system == S_CLOSED_REVERSIBLE:\r\n return reversible\r\n elif system == S_ONLY_FORWARD:\r\n return irreversible\r\n elif system == S_OPEN_2_FEEDBACK:\r\n return open2_feedback\r\n elif system == TEST_SYSTEM:\r\n return test_system\r\n else:\r\n raise Exception(\"No such system found :%s\" % system)", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def get_isolate_cpus(self):\n\n command = \"cat /proc/cpuinfo | grep processor | awk '{print $NF}'\"\n out = run_and_getout(command)\n str_out = out.decode(self.default_code).replace('\\n', ' ').strip()\n str_out = str(str_out)\n if str_out[0] == \"0\":\n return str_out[2:]\n else:\n return str_out", "def get_gpio(request):\n lines=[]\n append=False\n with open(tasmotadir + \"/sonoff/sonoff_template.h\", \"r\") as f:\n for line in f:\n if append==True:\n split = line.split('//')[0]\n subbed = sub('[\\\\s+,;}]','', split)\n lines.append(subbed)\n if 'UserSelectablePins' in line:\n append=True\n if '}' in line:\n append=False\n gpios={}\n for num, gpio in enumerate(lines):\n gpios[gpio] = num\n return(gpios[request])", "def _get_sys_per_cpu_times():\r\n cpus = []\r\n f = open('/proc/stat', 'r')\r\n # get rid of the first line who refers to system wide CPU stats\r\n try:\r\n f.readline()\r\n for line in f.readlines():\r\n if line.startswith('cpu'):\r\n values = line.split()[1:8]\r\n values = tuple([float(x) / _CLOCK_TICKS for x in values])\r\n entry = nt_sys_cputimes(*values[:7])\r\n cpus.append(entry)\r\n return cpus\r\n finally:\r\n f.close()", "def _get_iops(self, report):\n match = re.search(\"iops\\=(\\d+)\", report)\n if match:\n return int(match.group(1))", "def read_sysparam():\n global sys_n, sys_t, sys_f\n fp = open(\"system.param\",\"r\")\n n = fp.readline()\n t = fp.readline()\n f = fp.readline()\n fp.close()\n n = n.rstrip('\\r\\n')\n t = t.rstrip('\\r\\n')\n f = f.rstrip('\\r\\n')\n pr = re.search(\"(n)\\s(\\d)\", n)\n if pr==None:\n print(\"Corrupted system.param(n). About to quit.\")\n sys.exit()\n else:\n sys_n = pr.group(2)\n pr = re.search(\"(t)\\s(\\d)\", t)\n if pr==None:\n print(\"Corrupted system.param(t). About to quit.\")\n sys.exit()\n else:\n sys_n = pr.group(2)\n pr = re.search(\"(f)\\s(\\d)\", f)\n if pr==None:\n print(\"Corrupted system.param(t). About to quit.\")\n sys.exit()\n else:\n sys_n = pr.group(2)", "def describe_operating_systems():\n pass", "def angr_find_ioctls(bin, dispatch_addr):\n\t\n\ttry:\n\t\timport angr\n\texcept ImportError:\n\t\tprint \"Please install angr to continue, see: https://github.com/andreafioraldi/angr-win64-wheels\"\n\t\treturn\n\t\n\tp = angr.Project(bin, auto_load_libs=False)\n\tprint('loaded binary in angr')\n\tioctls = find_ioctls(p, dispatch_addr)\n\treturn ioctls", "def get_icc_flags():\n cc_flags = ['-g']\n cc_flags += ['-D_GLIBCXX_USE_CXX11_ABI=0']\n cc_flags += ['-std=c++11']\n if CAD_DEBUG:\n cc_flags += ['-O0']\n else:\n # cc_flags += ['-O3']\n # cc_flags += ['-xHost']\n # cc_flags += ['-axSSE4.2,AVX,AVX2,CORE-AVX512']\n # cc_flags += ['-qopt-zmm-usage=high']\n # only the flag '-fast' is found to vectorize the box kernels properly\n cc_flags += ['-fast']\n cc_flags += ['-qopt-zmm-usage=high']\n if CAD_OPENMP:\n cc_flags += ['-qopenmp']\n return cc_flags", "def determineNumberOfCPUs():\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # http://code.google.com/p/psutil/\n try:\n import psutil\n return psutil.NUM_CPUS\n except (ImportError, AttributeError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],\n stdout=subprocess.PIPE)\n scStdout = sysctl.communicate()[0]\n res = int(scStdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudoDevices = os.listdir('/devices/pseudo/')\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudoDevices:\n if expr.match(pd) != None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesgProcess.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def test_cgc_random_syscall_handling_native_interface():\n\n binary = os.path.join(bin_location, \"tests\", \"cgc\", \"KPRCA_00011\")\n pov_file = os.path.join(bin_location, \"tests_data\", \"cgc_povs\", \"KPRCA_00011_POV_00000.xml\")\n output_file = os.path.join(bin_location, \"tests_data\", \"cgc_povs\", \"KPRCA_00011_stdout.txt\")\n add_options = {\n angr.options.UNICORN_HANDLE_CGC_RECEIVE_SYSCALL,\n angr.options.UNICORN_HANDLE_CGC_RANDOM_SYSCALL,\n angr.options.UNICORN_HANDLE_SYMBOLIC_ADDRESSES,\n angr.options.UNICORN_HANDLE_SYMBOLIC_CONDITIONS,\n angr.options.UNICORN_HANDLE_SYMBOLIC_SYSCALLS,\n }\n\n rand_syscall_data = {\n \"random\": [\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n ]\n }\n with open(output_file, \"rb\") as fh:\n output_bytes = fh.read()\n\n trace_cgc_with_pov_file(\n binary,\n \"tracer_cgc_receive_unicorn_native_interface_rx_bytes\",\n pov_file,\n output_bytes,\n add_options=add_options,\n syscall_data=rand_syscall_data,\n )", "def getThreads():\n if sys.platform == 'win32':\n return int(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return int(os.popen('grep -c cores /proc/cpuinfo').read())", "def go_c_enumerate():\n for i,k in enumerate(range(1,5)):\n print(i, k)", "def parse_defines(self):\n for line in self.header.splitlines():\n if line.lower().startswith(\"#define\"):\n _, line = line.strip().split(None, 1) # remove #define\n if \" \" in line:\n symbol, value = line.split(None, 1)\n if value.isdigit():\n value = int(value)\n elif value.startswith(\"0x\"):\n value = int(value, 16)\n elif value in self.types:\n self.types[symbol] = self.types[value]\n else:\n symbol = line\n value = \"\"\n self.constants[symbol] = value\n return self.constants", "def get_opcode_mode(op):\n op_str = f\"{op:05}\"\n DE = int(op_str[-2:])\n C = int(op_str[2])\n B = int(op_str[1]) \n A = int(op_str[0]) \n\n return A, B, C, DE", "def get_current_cpu_flags() -> Set[str]:\n\n cpuinfo = get_cpuinfo()\n flags = extract_cpu_flags(cpuinfo)\n return flags" ]
[ "0.50889546", "0.49972665", "0.4936319", "0.49223414", "0.4902251", "0.4890649", "0.4839331", "0.48086706", "0.47911236", "0.47417554", "0.47330827", "0.46965626", "0.46034023", "0.46000037", "0.45939147", "0.4557752", "0.45424187", "0.45383772", "0.45346913", "0.4531515", "0.4520843", "0.45147955", "0.45052874", "0.44946712", "0.44944587", "0.44835857", "0.44737652", "0.44726747", "0.44696486", "0.44331625" ]
0.70829827
0
Returns a a digest of a secret you want to store in memory
def hash_secret(secret): hashed = hashlib.sha256(secret.encode('UTF-8')).hexdigest() return hashed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def get_secret(self):\n return Buffer.from_mpz(self._secret_key)", "def secretstore():\n pass", "def create_temporary_secret():\n return uuid.uuid4().hex", "def _compute_key(secret_key=None):\n if secret_key is None:\n secret_key = settings.SECRET_KEY\n if isinstance(secret_key, six.string_types):\n secret_key = secret_key.encode()\n return SHA256.new(bytearray(secret_key)).digest()", "async def get_secret(app: Sanic, secret_key: str, passphrase: str) -> str:\n\n data = await app.db.secrets.find_one({'secret_key': secret_key})\n await app.db.secrets.find_one({'secret_key': secret_key})\n if not data:\n raise exceptions.InvalidSecretKeyException()\n\n key = get_fernet_key(app, passphrase)\n\n sign = hmac.digest(key=key, msg=passphrase.encode(), digest='sha512').hex()\n if sign != data['signature']:\n raise exceptions.InvalidPassphraseException()\n\n await app.db.secrets.delete_one({'secret_key': secret_key})\n\n encrypted = data['secret'].encode()\n cipher = fernet.Fernet(key)\n if data.get('ttl'):\n try:\n secret = cipher.decrypt(encrypted, ttl=data['ttl']).decode()\n except fernet.InvalidToken:\n raise exceptions.InvalidSecretKeyException()\n else:\n secret = cipher.decrypt(encrypted).decode()\n\n return secret", "def make_secret(length=SecretLength.GOOGLE_AUTH):\n if hasattr(length, \"value\"):\n length = length.value\n\n return token_bytes(length)", "def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')", "def secret(self):\n return self._secret", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def generate_sharedsecret(self):\n return self._get_shared_secret(self.public_key)", "def secret(self) -> str:\n return pulumi.get(self, \"secret\")", "def secret(self) -> str:\n return pulumi.get(self, \"secret\")", "def generate_sharedsecret_bytes(self):\n return number_to_string(\n self.generate_sharedsecret(),\n self.private_key.curve.order)", "def getSecret(self):\n\n with open(self._secret_file) as f:\n secret=f.readline().rstrip()\n \n return secret", "def shared_secret(self):\n return self.__shared_secret", "def secret_hash(data):\n\n passwords_hash = hashlib.md5(data.encode(\"UTF-8\")).hexdigest()\n \n return passwords_hash", "def secret() -> None:\n pass", "def secret():\n pass", "async def add_secret(app: Sanic, secret: str, passphrase: str, ttl: Optional[int]) -> str:\n\n key = get_fernet_key(app, passphrase)\n\n sign = hmac.digest(key=key, msg=passphrase.encode(), digest='sha512').hex()\n secret_key = secrets.token_hex(16)\n\n cipher = fernet.Fernet(key)\n encrypted = cipher.encrypt(secret.encode()).decode()\n\n expires = None\n if ttl:\n expires = datetime.utcnow() + timedelta(seconds=ttl)\n\n await app.db.secrets.insert_one({\n 'secret': encrypted,\n 'secret_key': secret_key,\n 'signature': sign,\n 'expires': expires, # for mongo index\n 'ttl': ttl, # for fernet check\n })\n\n return secret_key", "def value(cls):\n return cls.query().fetch()[0].secret", "def digest(self):\n pass", "def digest(self):\n pass", "def digest(self):\n pass", "def secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"secret\")", "def secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"secret\")", "def client_secret(self) -> str:", "def get_shared_secret(self):\n shared_secret = self.charm_config.get(\"shared-secret\")\n saved_shared_secret = self.kv.get(\"shared-secret\")\n if not shared_secret:\n if saved_shared_secret:\n return saved_shared_secret\n else:\n shared_secret = self.random_string(16)\n self.kv.set(\"shared-secret\", shared_secret)\n return shared_secret", "def hash_secret(secret: str) -> Tuple[bytes, bytes]:\n secret_bytes = secret.encode(SECRET_ENCODING)\n secret_salt = os.urandom(CRYPTO_SALT_LEN)\n secret_hash = hash_secret_raw(secret_bytes, secret_salt, **CRYPTO_PARAMS)\n\n return secret_hash, secret_salt", "def access_secret(self):\n return copy(self._access_secret)" ]
[ "0.7134875", "0.6946913", "0.6946709", "0.68133456", "0.67842025", "0.6751108", "0.6749412", "0.67133594", "0.66987425", "0.6670669", "0.6663457", "0.66460395", "0.66460395", "0.66328174", "0.660878", "0.6516705", "0.65102446", "0.64714366", "0.6459151", "0.6428858", "0.6423833", "0.63949215", "0.63949215", "0.63949215", "0.63819504", "0.63819504", "0.6360688", "0.6320339", "0.63057595", "0.6280677" ]
0.70165616
1
Verifies that a value in the current config (hashed) corresponds to the value passed as parameter (unhashed)
def verify_secret(prop_name, value): hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest() has_must_be = RUN_CONFIG.get(prop_name) return hashed == has_must_be
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid(self, user_specific_config: Any, factor: str) -> bool:", "def check(secret: bytes, b64str: str, values: Dict[str, Any]) -> bool:\n return check_with_reason(secret, b64str, values)[0]", "def verify_config_params(attack_config):\n _check_config(attack_config, _VALID_CONFIG_CHECKLIST)", "def check_value(self, value):", "def validate_uv_check_value_exists(self, password:str)->bool:\n if self._storage.get_uv_value() is None:\n salt = os.urandom(16)\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),\n length=32,salt=salt,iterations=100000,backend=default_backend())\n temp_key = base64.urlsafe_b64encode(kdf.derive(password.encode(\"UTF-8\")))\n fernet = Fernet(temp_key)\n token = fernet.encrypt(os.urandom(32))\n self._storage.set_uv_value(salt + token)\n return True\n return self.validate_uv_check_value(password)", "def test_hash_params():\n assert (\n hash_params(\n {\n \"name\": \"my-name\",\n \"labels\": {\n \"label1\": \"label\",\n },\n }\n )\n == \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n )", "def verify():", "def check_hashable(self, setup):\n try:\n hash(setup)\n except TypeError as e:\n raise AssertionError(f\"setup object is not hashable:\\n{setup}\") from e", "def validate(self, key, val):\n return True", "def validate(self, key, val):\n return True", "def check_config(config):\n rq = {\"name\", \"description\", \"region\", \"user\", \"instance_type\",\n \"base_image\", \"uploads\", \"commands\"}\n diff = rq - set(config.keys())\n if diff:\n raise(BadConfigFile(\"Missing keys {} in config\".format(diff)))", "def check_secure_val(hash_val):\n\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val", "def check_config(config):\n pass", "def honeypot_equals(val):\n expected = getattr(settings, 'HONEYPOT_VALUE', '')\n if callable(expected):\n expected = expected()\n return val == expected", "def _check_config(self):", "def valid(self):\n return self.hash.to_int('little') < self.target", "def validate_uv_check_value(self, password:str)->bool:\n check_val = self._storage.get_uv_value()\n salt = check_val[:16]\n token = check_val[16:]\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),\n length=32,salt=salt,iterations=100000,backend=default_backend())\n temp_key = base64.urlsafe_b64encode(kdf.derive(password.encode(\"UTF-8\")))\n fernet = Fernet(temp_key)\n try:\n fernet.decrypt(token)\n except InvalidToken:\n auth.debug(\"User verification failed\")\n return False\n return True", "def __hash__(self):\n return self.value.__hash__()", "def check_config(cfg):", "def _verify_hash(self, read_bytes):\n if self.hash is None:\n raise QuiltException(\"Hash missing - need to build the package\")\n _check_hash_type_support(self.hash.get('type'))\n digest = hashlib.sha256(read_bytes).hexdigest()\n if digest != self.hash.get('value'):\n raise QuiltException(\"Hash validation failed\")", "def verify_config(dut, **kwargs):\n st.log(\"KWARGS -- {}\".format(kwargs))\n cli_type = st.get_ui_type(dut, **kwargs)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n output = show(dut, cli_type)\n st.log(\"OUTPUT === {}\".format(output))\n supported_params = [\"state\", \"polling_interval\", \"collector_name\", \"collectors_cnt\", \"collector_ip\", \"port\",\n \"agent_id\"]\n if output:\n if not kwargs.get(\"data\"):\n st.error(\"VERIFY DATA NOT PROVIDED ...\")\n return False\n verify_data = kwargs.get(\"data\") if isinstance(kwargs.get(\"data\"), list) else [kwargs.get(\"data\")]\n for data in verify_data:\n if cli_type == 'klish': data.pop(\"collector_name\", None)\n for key in data:\n if key not in supported_params:\n st.log(\"Unsupported params {}\".format(key))\n return False\n if key not in [\"collector_name\", \"collector_ip\", \"port\"]:\n if str(data[key]) != str(output[key]):\n st.log(\"Verification failed for {} with {}, hence checking other values ...\".format(data[key], output[key]))\n return False\n else:\n is_found = 0\n for collector_data in output[\"collectors\"]:\n if str(data[key]) != str(collector_data[key]):\n is_found = 1\n st.log(\"Verification failed for {} with {}\".format(data[key], collector_data[key]))\n else:\n is_found = 0\n break\n if is_found >= 1:\n st.log(\"Verification failed ...\")\n return False\n st.log(\"Verification successful ...\")\n return True\n else:\n st.error(\"Show output not found ...\")\n return False", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)", "def _validate_hash(data, shasum):\n from hashlib import sha1\n digest = sha1(data).hexdigest()\n if digest == shasum:\n return True\n else:\n print('Invalid shasum, got: {} , expected: {}'.format(digest, shasum))\n return False", "def _check_valid_config(self):\n default_keys = self.default_config.keys()\n current_keys = self.config.keys()\n\n if default_keys != current_keys:\n msg = f\"Config must have the following keys : {list(default_keys)}\"\n self.logger.critical(msg)\n sys.exit(0)", "def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))", "def verifyImageVerification( imageVerification ):\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"", "def test_good_values_for_validate_guid(good_value):\n bcvalidators.validate_guid(good_value)", "def compare_with_encrypted(model_config, param_config):\n for key, model_val in model_config.items():\n param_val = param_config.get(key, 'missing')\n if isinstance(model_val, str) and (model_val.startswith('$encrypted$') or param_val.startswith('$encrypted$')):\n assert model_val.startswith('$encrypted$') # must be saved as encrypted\n assert len(model_val) > len('$encrypted$')\n else:\n assert model_val == param_val, 'Config key {0} did not match, (model: {1}, input: {2})'.format(key, model_val, param_val)", "def __hash__(self):\n return hash(self.value)", "def fixed(o):\n try:\n hash(o)\n except TypeError:\n return False\n return True" ]
[ "0.61623335", "0.6038576", "0.6019497", "0.5998033", "0.59090203", "0.5885619", "0.58353555", "0.5834025", "0.58322227", "0.58322227", "0.58189595", "0.5798566", "0.57869095", "0.5744254", "0.5738265", "0.5719662", "0.5696878", "0.56657153", "0.56617755", "0.5657235", "0.561154", "0.5588371", "0.5582994", "0.55748445", "0.5572674", "0.55492485", "0.5548413", "0.55479246", "0.55390954", "0.55385596" ]
0.74857116
0
Returns the index of the rotation point >>> find_rotation(['play', 'xebra', 'bat', 'cat', 'dog']) 2
def find_rotation(arr): # edge case: already sorted if arr[0] < arr[-1]: return 0 for idx, item in enumerate(arr): # if the first letter of next item is 'lower' than first letter of item, # rotation point is index of next item if arr[idx+1][0] < item[0]: return idx+1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_rotation_point( word_list ):", "def test_finds_rotation_point_in_long_list(self):\n result = find_rotation_point(long_words_list)\n self.assertEqual(result, 6)", "def find_rotation_efficient(arr):\n # edge case: already sorted\n if arr[0] < arr[-1]:\n return 0\n\n low = 0\n high = len(arr)-1\n\n # when high is one greater than low, high will be rotation index\n while high - low > 1:\n\n # start guessing at middle\n guess_index = low + (high - low) / 2\n\n # rotation is left\n if arr[guess_index] < arr[low]:\n high = guess_index\n\n # rotation is right\n else:\n low = guess_index\n\n return high", "def test_finds_rotation_point_in_short_list(self):\n result = find_rotation_point(short_words_list)\n self.assertEqual(result, 3)", "def get_key_from_rot(rotation):\n if rotation < -67.5: \n return -90\n elif rotation < -22.5: \n return -45\n elif rotation < 22.5: \n return 0\n elif rotation < 67.5: \n return 45\n else:\n return 90", "def test_returns_single_phrase_if_len_one(self):\n result = find_rotation_point([\"nathan\"])\n self.assertEqual(result, 0)", "def _get_first_index(self, degree):\n if degree < 1:\n raise ValueError('degree must be 1 or greater')\n lo = 0\n hi = len(self._degreesDesc) - 1\n\n while lo < hi:\n mid = (lo + hi + 1) // 2\n if degree < self._degreesDesc[mid]:\n lo = mid\n else:\n hi = mid - 1\n\n if degree == self._degreesDesc[hi] and hi != -1:\n return hi\n else:\n return hi + 1", "def rotated_array_search(input_list, number):\n start = 0 \n end = len(input_list) - 1\n\n if len(input_list) == 0:\n return \"\\nThe list is empty\\n\"\n\n print(\"(target for search, index of target):\")\n return number, array_search(start, end, input_list, number)", "def idx(self):\n if self._idx is None:\n self._idx = list(np.where(self.polar_angle < self.polar_max)[0])\n return self._idx", "def rotated_array_search(input_list, number):\n if not number or not input_list:\n return -1\n\n return rotated_array_search_recursive(input_list, number, 0, len(input_list)-1)", "def __find_next_position_in_degrees(self, movement: int) -> int:\n next_position = (\n self.counterpoint[-1].scale_element.position_in_degrees\n + movement\n )\n return next_position", "def GetRotorPosition(self):\n position = self._position + 1\n return position", "def rotated_array_search(input_list, target):\n if target is None or input_list is None:\n return -1\n\n return rotated_array_search_recursive(input_list, target, 0, len(input_list)-1)", "def heading_idx(self):\n if self.heading > 0:\n idx = self.heading * 180\n else:\n idx = 360 + self.heading * 180\n return int(idx - 1)", "def find_knee_index(data, theta):\n try:\n if not isinstance(data, np.ndarray):\n raise AttributeError(\"The given data should be of type numpy array\")\n\n if not isinstance(theta, float) or not isinstance(theta, np.float64):\n raise AttributeError(\"The given angle should be of type float or np.float64\")\n\n data_shape = data.shape\n\n # make rotation matrix\n cos_fun = np.cos(theta)\n sin_fun = np.sin(theta)\n rotation_matrix = np.array(((cos_fun, -sin_fun), (sin_fun, cos_fun)))\n\n # rotate data vector\n rotated_vector = data.dot(rotation_matrix)\n\n LOGGER.info(\"Plotting the Data Before Finding The Index\")\n plt.scatter(rotated_vector[:, 0], rotated_vector[:, 1])\n plt.show()\n\n # Setting knee to be the index where the Y- Axis values are minimum\n knee = np.argmin(rotated_vector[:, 1])\n\n # If the knee is set to the last or first index, then it should have been a convex curve\n if knee == 0 or knee == (data_shape[0] - 1):\n # Hence we find the index where the values are max\n knee = np.argmax(rotated_vector[:, 1])\n\n return knee\n except AttributeError as err:\n LOGGER.error(err)", "def _get_last_index(self, degree):\n if degree < 1:\n raise ValueError('degree must be 1 or greater')\n length = len(self._degreesDesc)\n lo = 0\n hi = length\n\n while lo < hi:\n mid = (lo + hi) // 2\n if degree > self._degreesDesc[mid]:\n hi = mid\n else:\n lo = mid + 1\n\n if lo < length and degree == self._degreesDesc[lo]:\n return lo\n else:\n return lo - 1", "def rotated_array_search(input_list, number):\n return rotated_array_search_soln(input_list, number, 0, len(input_list) - 1)", "def rotated_array_search(input_list, number):\n if input_list is None:\n return -1\n if number is None:\n return -1\n pivot = find_pivot(input_list, 0, len(input_list))\n list1 = input_list[0:pivot+1]\n list2 = input_list[pivot+1:]\n first_result = search_arr(list1, number, 0, len(list1) - 1)\n second_result = search_arr(list2, number, 0, len(list2) - 1)\n if first_result is None and second_result is None:\n return -1\n elif first_result is None:\n return len(list1) + second_result\n else:\n return first_result\n pass", "def rotated_array_search(input_list , number ):\r\n\r\n return binarysearch(input_list, number, 0, len(input_list) - 1)", "def get_rotation_interface(self, i, t):\n\n if self.rotation_type == 1:\n ith = self._get_check_i(self.interface, i)\n rotations = self.pro.get_ordering(self.interface, ith)\n t -= 1\n return rotations[t]\n else:\n return self.get_interface(t)", "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def rotation_angle(self):\n return self.container['rotation_angle']", "def indexMatching(seq, condition):\n for i,x in enumerate(seq):\n if condition(x):\n return i\n return -1", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)", "def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1", "def offset(self, needle):\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n\n idx = self._seq.index(needle)\n if self._seq[idx+len(needle):].find(needle) != -1:\n raise ValueError(\"Multiple occurances found!\")\n\n return idx", "def f_index(self, substring, direction=[]):\n substr = self.value(substring)\n if \"back\" in direction:\n pos = self._val.rfind(substr)\n else:\n pos = self._val.find(substr)\n\n return pos + 1", "def rotated_array_search(input_list, number):\n result = -1\n if len(input_list) <= 0:\n print(\"Please input a non-empty array!\")\n return result\n\n pivotIdx = findPivot(input_list)\n if number == input_list[0]:\n return 0\n elif number > input_list[0]:\n result = binarySearch(input_list, 0, pivotIdx, number)\n else:\n result = binarySearch(input_list, pivotIdx + 1, len(input_list) - 1, number)\n return result", "def rotated_array_search(input_list, number):\n n = len(input_list)\n # 1. find out the pivot\n pivot = findPivot(input_list, 0, n - 1)\n # If we didn't find a pivot,\n # then array is not rotated at all\n if pivot == -1:\n return binarySearch(input_list, 0, n - 1, number)\n\n # search in two subarrays around pivot\n if input_list[pivot] == number:\n return pivot\n if input_list[0] <= number:\n return binarySearch(input_list, 0, pivot - 1, number)\n return binarySearch(input_list, pivot + 1, n - 1, number)" ]
[ "0.70094186", "0.6499037", "0.6390685", "0.63771015", "0.60722977", "0.5954184", "0.5886457", "0.57360756", "0.57325774", "0.5722924", "0.5688037", "0.568782", "0.5638405", "0.5631212", "0.56212085", "0.55536884", "0.5515408", "0.55049914", "0.55042875", "0.54950285", "0.5465315", "0.5430133", "0.5426199", "0.5422009", "0.5411266", "0.53968495", "0.539305", "0.53893757", "0.5383013", "0.5376973" ]
0.6721891
1
Test flipflop conditions. Signal propagation only at the rising edge
def test_dflipflop(self): circ = DFlipFlop(size=2) circ.clk.pulse() self.assertSigEq(circ.q, 0) circ.d = 3 self.assertSigEq(circ.q, 0) circ.clk.set() self.assertSigEq(circ.q, 3) circ.d = 2 self.assertSigEq(circ.q, 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_flipflop(self):\n circ = FlipFlop(size=2)\n #test basic flip flop functionality\n circ.d = 3\n self.assertSigEq(circ.q, 0)\n circ.clk.pulse()\n self.assertSigEq(circ.q, 3)\n #test reset circuit\n circ.r.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n circ.r.reset()\n #test load\n circ.l.set()\n circ.d = 3\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)\n #test enable\n circ.l.reset()\n circ.e.set()\n circ.clk.pulse()\n self.assertSigEq(circ.q, 0)", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_should_flip():\n\n # Function: should_flip(bag_state, has_red, has_blue, has_green)\n\n # Test state and serial number:\n test_state = {\n 'suspicion level': 0,\n 'indicators': {},\n }\n test_state['serial number'] = 'JQXX7e3652'\n test_state['indicators']['check engine'] = False\n test_state['indicators']['everything ok'] = True\n\n # Label D, All off, return False.\n assert(should_flip(test_state, False, False, False) is False)\n\n # Label C, Red and blue on, green off, False.\n assert(should_flip(test_state, True, True, False) is False)\n\n # Label E, green and red on, blue off, True.\n assert(should_flip(test_state, True, False, True) is True)\n\n # Label J, green on, red and blue off, True (J in serial)\n assert(should_flip(test_state, False, False, True) is True)\n\n # Label Q, all lights on, True (Q in serial)\n assert(should_flip(test_state, True, True, True) is True)\n\n # Label Y, only blue light on. False (No Y in serial).\n assert(should_flip(test_state, False, True, False) is False)", "def test_invert_down(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"invert\": \"Yes\",\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n self.opp.states.set(\"sensor.test_state\", \"2\")\n self.opp.block_till_done()\n self.opp.states.set(\"sensor.test_state\", \"1\")\n self.opp.block_till_done()\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"", "def test_apply_father_wavelet_dirac(self):\n pass", "def back_test(self, turnover_frequency):", "def test_equivalence(self):\n x = np.random.RandomState(0).randn(1000)\n for order in range(1, 6):\n zpk = signal.butter(order, 0.35, output='zpk')\n b, a = zpk2tf(*zpk)\n sos = zpk2sos(*zpk)\n y = filtfilt(b, a, x)\n y_sos = sosfiltfilt(sos, x)\n assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)", "def test_forward_backward(self):\n f = forward(self.obs, self.S, self.A, self.E)\n b = backward(self.obs, self.S, self.A, self.E)\n fp = logsumexp(f[:, -1])\n emission = precompute_emission(np.log(self.E))[tuple(self.obs[0])]\n bp = logsumexp(np.log(self.S) + emission + b[:, 0])\n assert_allclose(fp, bp)", "def on_flip(self):\r\n self.next_data_has_pump = not self.next_data_has_pump\r\n self.probe_only_data, self.pump_probe_data = (\r\n self.pump_probe_data, self.probe_only_data)", "def test_correct_backward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"backward\")\r\n assert np.allclose(coeffs, [1, -1])\r\n assert np.allclose(shifts, [0, -1])", "def on_floated(self, event):\n if not self._guard & FLOATED_GUARD:\n self._guard |= FLOATED_GUARD\n try:\n self.declaration.floating = True\n finally:\n self._guard &= ~FLOATED_GUARD", "def test_high_voltage_passing_signal(self):\n data = gen_random_data(-0.5, 0.5, self.channels)\n self.assertFalse(self.highvoltage_rule.is_broken(data))", "def test_low_voltage_passing_signal(self):\n data = gen_random_data(-0.5, 0.5, self.channels)\n self.assertFalse(self.lowvoltage_rule.is_broken(data))", "def nearest_test_pulse(self):", "def flowingFrom(self, fount):" ]
[ "0.7181312", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.5667709", "0.56069386", "0.55510265", "0.55361986", "0.5455058", "0.54459006", "0.5419483", "0.5391464", "0.53797406", "0.5358948", "0.53540426", "0.53362405", "0.5326817", "0.5312062" ]
0.65154636
1
Test ELR flip flop
def test_flipflop(self): circ = FlipFlop(size=2) #test basic flip flop functionality circ.d = 3 self.assertSigEq(circ.q, 0) circ.clk.pulse() self.assertSigEq(circ.q, 3) #test reset circuit circ.r.set() circ.clk.pulse() self.assertSigEq(circ.q, 0) circ.r.reset() #test load circ.l.set() circ.d = 3 circ.clk.pulse() self.assertSigEq(circ.q, 0) #test enable circ.l.reset() circ.e.set() circ.clk.pulse() self.assertSigEq(circ.q, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_should_flip():\n\n # Function: should_flip(bag_state, has_red, has_blue, has_green)\n\n # Test state and serial number:\n test_state = {\n 'suspicion level': 0,\n 'indicators': {},\n }\n test_state['serial number'] = 'JQXX7e3652'\n test_state['indicators']['check engine'] = False\n test_state['indicators']['everything ok'] = True\n\n # Label D, All off, return False.\n assert(should_flip(test_state, False, False, False) is False)\n\n # Label C, Red and blue on, green off, False.\n assert(should_flip(test_state, True, True, False) is False)\n\n # Label E, green and red on, blue off, True.\n assert(should_flip(test_state, True, False, True) is True)\n\n # Label J, green on, red and blue off, True (J in serial)\n assert(should_flip(test_state, False, False, True) is True)\n\n # Label Q, all lights on, True (Q in serial)\n assert(should_flip(test_state, True, True, True) is True)\n\n # Label Y, only blue light on. False (No Y in serial).\n assert(should_flip(test_state, False, True, False) is False)", "def test_01_lighting(self):", "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def flip(self):", "def back_test(self, turnover_frequency):", "def test_invert_down(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"invert\": \"Yes\",\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n self.opp.states.set(\"sensor.test_state\", \"2\")\n self.opp.block_till_done()\n self.opp.states.set(\"sensor.test_state\", \"1\")\n self.opp.block_till_done()\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"", "def testBeliefs1sk(self):", "def test_flip_loop():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (4,0), (6,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (6,5), (4,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def test_flip_loop2():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (5,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (5,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def testBeliefs2sk(self):", "def test_theft_and_stealing(self):", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def test_02_visit_again(self):", "def test_roll_or_hold(self):\n INPUT.side_effect = ['R', 'H', 'h', 'z', '12345', 'r']\n pig = game.pig.Pig('PlayerA', 'PlayerB')\n self.assertEqual(pig.roll_or_hold(), 'roll')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'roll')", "def test_inverse_transform(self):", "def do_revive(self, arg):\n \treturn False", "def test_rr_testeffect(results):\n test_t0 = results.test_effect()\n test_t1 = results.test_effect(0.)\n test_t2 = results.test_effect(5.2)\n assert test_t0 == pytest.approx(1.1920928955078125e-07)\n assert test_t1 == pytest.approx(1.1920928955078125e-07)\n assert test_t2 == 1.0", "def test_change_brightness_back_to_10():", "def test_flip_piece():\n board = Board(640, 640, 8)\n board.start_game()\n board.gm.flip_pieces = [(3, 3)]\n current_color = board.game_pieces[3][3].color\n board.flip_pieces()\n assert board.game_pieces[3][3].color != current_color\n \n board.gm.flip_pieces = [(3, 4)]\n current_color = board.game_pieces[3][4].color\n board.flip_pieces()\n assert board.game_pieces[3][4].color != current_color", "def test_invert_up(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"invert\": \"Yes\",\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n self.opp.states.set(\"sensor.test_state\", \"1\")\n self.opp.block_till_done()\n self.opp.states.set(\"sensor.test_state\", \"2\")\n self.opp.block_till_done()\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"off\"", "def test_right_mode(self):\n self.dp.setRewindingMode('AUTO')\n self.assertEqual(self.dp.getRewindingMode(), 'AUTO')\n self.dp.setRewindingMode('MANUAL')", "def test_outE_traverals(self):\r\n results = self.blake.outE()\r\n assert len(results) == 1\r\n assert self.blake_in_theoretics in results", "def flip():\n #Roller turns to curl page\n pwm.ChangeDutyCycle(11)\n time.sleep(0.22)\n pwm.ChangeDutyCycle(0)\n time.sleep(3)\n\n #flipper flips\n pwm2.ChangeDutyCycle(7.8)\n time.sleep(4)\n pwm2.ChangeDutyCycle(0)\n time.sleep(3)\n\n \"\"\"#Flipper turns to flip page and flips back\n pwm2.ChangeDutyCycle(4)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(8)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(12)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(13)\n time.sleep(3)\n pwm2.ChangeDutyCycle(2.4)\n time.sleep(5)\"\"\"\n\n \"\"\"#Rollers turning back\n pwm.ChangeDutyCycle(1)\n time.sleep(0.2)\n pwm.ChangeDutyCycle(0)\n time.sleep(3)\"\"\"", "def step_forward(self):", "async def foggle_flip(self, ctx: Context, base: Bases = 10):\n ...", "def run_flip(self, maxstep, nprint=1000, penref=0.2, thresh=1.0e-3):\n self.thresh = thresh\n step = 0\n while (step < maxstep) and (self.totpen>thresh):\n dpen = self.flip_color()\n accept = False\n if dpen <= 0.0:\n accept = True\n else:\n prob = np.exp(-dpen/penref)\n #print(\"dpen %10.5f prob %10.5f\" %(dpen, prob))\n if random.random() < prob:\n accept=True\n if accept:\n self.accept_flip()\n else:\n self.unflip_colors()\n if (step%nprint) == 0:\n print(\"step: %7d ; penalty %10.5f\" % (step, self.totpen))\n step += 1\n if step<maxstep:\n print(\"Converged after %7d steps with penalty %10.5f\" % (step, self.totpen))\n print(\"last delta_pen was %10.5f\" % dpen)\n converged = True\n else:\n print(\"Not converged!!!\")\n converged = False\n return (converged, step, self.totpen)", "def flip():\n return random.choice((True, False))", "def test_OR_low(self):\n outcome = self.OR_Neuron.activate([0, 0])\n self.assertAlmostEqual(outcome, 0, 2)" ]
[ "0.6335177", "0.6155589", "0.5920715", "0.5845766", "0.5824236", "0.57557905", "0.56701374", "0.56628877", "0.5648033", "0.56269133", "0.5609399", "0.559891", "0.55862045", "0.5580663", "0.5524137", "0.550997", "0.55030376", "0.54818213", "0.5480717", "0.5453894", "0.54397684", "0.54316175", "0.5391556", "0.53891546", "0.5378138", "0.53647244", "0.5355162", "0.53342336", "0.5331757", "0.53225243" ]
0.6759984
0
Return a nicelyformatted sourcelike function signature.
def signature(function: model.Function) -> str: return str(function.signature)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_method_signature(self, locals, code):\n\n res = \"\"\n is_args = code.co_flags & 4\n is_kwargs = code.co_flags & 8\n total_args = code.co_argcount\n if is_args:\n total_args += 1\n if is_kwargs:\n total_args += 1\n for i in xrange(total_args):\n varname = code.co_varnames[i]\n\n if is_args and is_kwargs and i == total_args - 2:\n varname = \"*\" + varname\n elif is_args and is_kwargs and i == total_args - 1:\n varname = \"**\" + varname\n elif is_args and i == total_args - 1:\n varname = \"*\" + varname\n elif is_kwargs and i == total_args - 1:\n varname = \"**\" + varname\n if res == \"\":\n res = varname\n else:\n res += \", \" + varname\n\n return \"(%s)\" % res", "def header_from_function_name_and_args(fname, fargs):\n header = \"void {fname}_({fargs_str});\".format(\n fname=fname, fargs_str=args_str_from_args(fargs)\n )\n return header", "def signature(function):\n pass", "def signature(function):\n\tdesc = inspect.getargspec(function)\n\tif desc[3]:\n\t\tldefault = len(desc[3])\n\t\tdefault = desc[3]\n\t\tsign = ','.join(desc[0][:-ldefault])\n\telse:\n\t\tldefault = 0\n\t\tdefault=[]\n\t\tsign = ','.join(desc[0])\t\n\tfor n,v in zip(desc[0][-ldefault:],default):\n\t\tsign += ','+n+\"=\"+str(v)\t\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign", "def fmt_rust_function(func: Callable) -> str:\n return f\"{func.__module__}:{func.__code__.co_firstlineno}:{func.__name__}\"", "def log_function_code(func_to_log: Callable) -> str:\n if not callable(func_to_log):\n TypeError(f\"Parameter 'func_to_log' is not function. Actual value: {func_to_log}.\")\n function_definition = inspect.getsource(func_to_log)\n if function_definition.startswith(\"return \"):\n function_definition = function_definition[7:]\n return repr(function_definition.strip())", "def clean_source(func: Callable[[Any], Any]) -> str:\n source = inspect.getsource(func).split(\":\", 1)[1].strip()\n if source.endswith(\",\"):\n # special case for lambdas\n return source[:-1]\n return source", "def format_stack_entry(self, frame_lineno, lprefix=': '):\n import linecache, reprlib\n frame, lineno = frame_lineno\n filename = self.canonic(frame.f_code.co_filename)\n s = '%s(%r)' % (filename, lineno)\n if frame.f_code.co_name:\n s += frame.f_code.co_name\n else:\n s += \"<lambda>\"\n s += '()'\n if '__return__' in frame.f_locals:\n rv = frame.f_locals['__return__']\n s += '->'\n s += reprlib.repr(rv)\n line = linecache.getline(filename, lineno, frame.f_globals)\n if line:\n s += lprefix + line.strip()\n return s", "def input_signature(self):\n return self._function_spec.input_signature", "def parse_function_signature(code):\n m = re.search(\"^\\s*\" + re_func_decl + \"\\s*{\", code, re.M)\n if m is None:\n print(code)\n raise Exception(\"Failed to parse function signature. \"\n \"Full code is printed above.\")\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n return name, args, rtype", "def callsignature(function):\n\tdesc = inspect.getargspec(function)\n\tsign = ','.join(desc[0])\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign", "def format_args(self, **kwargs: Any) -> str:\n decl = self.declaration\n\n # The logic allows this to be used for both function like and non\n # function like macros.\n # 'SOME_DEFINE'.partition('(')\n # >>> 'SOME_DEFINE', '', ''\n #\n # 'FUNCTION_LIKE(_a, _b)'.partition('(')\n # >>> 'FUNCTION_LIKE', '(', '_a, _b)'\n _, part, args = decl.partition(\"(\")\n return part + args", "def fortran_function(self) -> str:\n if self.f_override is not None:\n return indent(\n self.f_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\n \"$F_PREFIX$\", self.f_prefix),\n 4*' ')\n\n result = ''\n\n # declaration\n func_name = '{}_{}_{}'.format(\n self.f_prefix, self.class_name, self.name)\n in_parameters = self._f_in_parameters()\n return_type, out_parameters = self._f_out_parameters()\n if self.may_throw:\n out_parameters.append(('integer, optional', 'err_code'))\n out_parameters.append(('character(:), allocatable, optional',\n 'err_msg'))\n\n all_parameters = in_parameters + out_parameters\n arg_list = ', &\\n'.join([par_name for _, par_name in all_parameters])\n arg_ilist = indent(arg_list, 8*' ')\n if return_type != '':\n result += 'function {}( &\\n{})\\n'.format(func_name, arg_ilist)\n else:\n result += 'subroutine {}( &\\n{})\\n'.format(func_name, arg_ilist)\n\n # parameter declarations\n result += ' implicit none\\n'\n for par_type, par_name in in_parameters:\n result += ' {}, intent(in) :: {}\\n'.format(\n par_type, par_name)\n for par_type, par_name in out_parameters:\n result += ' {}, intent(out) :: {}\\n'.format(par_type, par_name)\n if return_type != '':\n result += ' {} :: {}\\n'.format(return_type, func_name)\n result += '\\n'\n\n # variable declarations\n c_return_type, fi_out_parameters = self._fi_out_parameters()\n if c_return_type:\n result += ' {} :: ret_val\\n'.format(c_return_type)\n for par_type, par_name in fi_out_parameters:\n result += ' {} :: {}\\n'.format(par_type, par_name)\n for par_type, par_name in self.ret_type.f_aux_variables():\n result += ' {} :: {}\\n'.format(par_type, par_name)\n if self.may_throw:\n result += ' integer (c_int) :: err_code_v\\n'\n result += ' type (c_ptr) :: err_msg_v\\n'\n result += ' integer (c_size_t) :: err_msg_len_v\\n'\n result += ' character (c_char), dimension(:), pointer :: err_msg_f\\n'\n result += ' character(:), allocatable :: err_msg_p\\n'\n result += ' integer (c_size_t) :: err_msg_i\\n'\n if c_return_type or fi_out_parameters or self.may_throw:\n result += '\\n'\n\n # convert input\n args = [param.f_chain_arg() for param in self.params]\n args += [par_name for _, par_name in fi_out_parameters]\n if self.may_throw:\n args += ['err_code_v', 'err_msg_v', 'err_msg_len_v']\n arg_str = ', &\\n'.join([8*' ' + arg for arg in args])\n\n # call C function\n fc_func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n chain_call = self.fc_chain_call(\n ns_prefix=self.c_prefix, class_name=self.class_name,\n fc_func_name=fc_func_name, fc_args=arg_str)\n result_name = ''\n if return_type != '':\n result_name = func_name\n elif out_parameters:\n result_name = out_parameters[0][1]\n result += self.ret_type.f_call_c('ret_val', chain_call)\n\n # handle errors if necessary\n if self.may_throw:\n # Note: I tried to factor this out into a function, but Fortran\n # makes that near-impossible. Since we're generating anyway, it's\n # not really duplication, so leave it as is.\n result += indent(dedent(f\"\"\"\\\n if (err_code_v .ne. 0) then\n if (present(err_code)) then\n err_code = err_code_v\n if (present(err_msg)) then\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg)\n do err_msg_i = 1, err_msg_len_v\n err_msg(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n end if\n {dedent(\n self.ret_type.f_return_dummy_result(result_name))}\n return\n else\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg_p)\n do err_msg_i = 1, err_msg_len_v\n err_msg_p(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n print *, err_msg_p\n stop\n end if\n else\n if (present(err_code)) then\n err_code = 0\n end if\n end if\n\n \"\"\"), 4*' ')\n\n # convert and return result\n result += self.ret_type.f_return_result(result_name, 'ret_val')\n\n # end\n if return_type != '':\n result += 'end function {}\\n\\n'.format(func_name)\n else:\n result += 'end subroutine {}\\n\\n'.format(func_name)\n return indent(result, 4*' ')", "def fmtd_str(self,c=False,prefix_symbol=\"\"):\n psym = prefix_symbol\n ms1 = (\n f\"{self.filename_prefix_mono}{self.event_kind:9} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lms1 = len(ms1)+len(\"(\")\n join_mstr = f\",\\n{' '*lms1}\"\n mavs = (\n f\"{self.argvars}\"\n )\n ms = f\"{psym}{ms1}{mavs}\"\n if c:\n aac = argvars_argname_color = \"MAGENTA\"\n ps1 = (\n f\"{self.filename_prefix_poly}{self.color.fore(f'{self.event_kind:9}','KIND')} \"\n f\"{self.separator * len(self.stack)} \"\n )\n lps1 = lms1\n join_pstr = f\",\\n{' '*lps1}\"\n pavs = (\n f\"{self.argvars}\"\n )\n ps = f\"{psym}{ps1}{pavs}\"\n return ps\n return ms", "def add_function_signature_help(specification: dict) -> dict:\n for f in specification[\"functions\"][\"signatures\"]:\n for argset_idx, argset in enumerate(\n specification[\"functions\"][\"signatures\"][f][\"signatures\"]\n ):\n args_summary = \"\"\n args_list = []\n for arg in specification[\"functions\"][\"signatures\"][f][\"signatures\"][argset_idx][\n \"arguments\"\n ]:\n if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n vals = [\n specification[\"functions\"][\"to_short\"].get(\n val, specification[\"functions\"][\"to_short\"].get(val)\n )\n for val in arg[\"values\"]\n ]\n args_summary += \"|\".join(vals) + \"()\"\n\n if arg.get(\"optional\", False) and arg.get(\"multiple\", False) is False:\n args_summary += \"?\"\n text = f'Zero or one of each function(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg.get(\"optional\", False):\n args_summary += \"*\"\n text = f'Zero or more of each function(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'One of following function(s): {\", \".join([val for val in arg[\"values\"]])}'\n\n elif arg[\"type\"] in [\"NSArg\", \"StrArg\", \"StrArgNSArg\"]:\n args_summary += f'{arg[\"type\"]}'\n if arg.get(\"optional\", False) and arg.get(\"multiple\", False) is False:\n args_summary += \"?\"\n if arg[\"type\"] in [\"NSArg\"]:\n text = f'Zero or one namespace argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg[\"type\"] == \"StrArgNSArg\":\n text = f'Zero or one namespace argument or default namespace argument (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'Zero or one string argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg.get(\"optional\", False):\n args_summary += \"*\"\n if arg[\"type\"] in [\"NSArg\"]:\n text = f'Zero or more namespace arguments of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg[\"type\"] == \"StrArgNSArg\":\n text = f'Zero or more namespace arguments or default namespace arguments (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'Zero or more of string arguments of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n if arg[\"type\"] in [\"NSArg\"]:\n text = f'Namespace argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n elif arg[\"type\"] == \"StrArgNSArg\":\n text = f'Namespace argument or default namespace argument (without prefix) of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n else:\n text = f'String argument of following type(s): {\", \".join([val for val in arg[\"values\"]])}'\n\n args_summary += \", \"\n args_list.append(text)\n\n args_summary = re.sub(\", $\", \"\", args_summary)\n specification[\"functions\"][\"signatures\"][f][\"signatures\"][argset_idx][\n \"argument_summary\"\n ] = f\"{f}({args_summary})\"\n specification[\"functions\"][\"signatures\"][f][\"signatures\"][argset_idx][\n \"argument_help_listing\"\n ] = args_list\n\n return specification", "def generate_header_from_declarations(function_declarations, verbose=True):\n header = \"\"\n for (f_name, (f_dims, f_dict)) in function_declarations.iteritems():\n s = header_from_function_name_and_args(f_name, f_dict[\"args\"])\n header += s + \"\\n\"\n\n return header", "def signature(self, p_int): # real signature unknown; restored from __doc__\n return \"\"", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def fix_function_signatures(code):\n pat = r\"\"\"^[ \\t]*function[ \\t.\\n]* # keyword (function)\n (\\[?[\\w, \\t.\\n]*\\]?) # outputs: group(1)\n [ \\t.\\n]*=[ \\t.\\n]* # punctuation (eq)\n (\\w+)[ \\t.\\n]* # name: group(2)\n \\(?([\\w, \\t.\\n]*)\\)?\"\"\" # args: group(3)\n pat = re.compile(pat, re.X | re.MULTILINE) # search start of every line\n\n # replacement function\n def repl(m):\n retv = m.group(0)\n # if no args and doesn't end with parentheses, append \"()\"\n if not (m.group(3) or m.group(0).endswith(\"()\")):\n retv = retv.replace(m.group(2), m.group(2) + \"()\")\n return retv\n\n code = pat.sub(repl, code) # search for functions and apply replacement\n\n return code", "def getsource(func: Callable) -> str:\n\n if hasattr(func, SOURCE_ATTRIBUTE):\n # An attribute created in ``Function.eval()``\n return getattr(func, SOURCE_ATTRIBUTE)\n else:\n return unindent(inspect.getsource(func))", "def python_code_markdown(func: Callable) -> str:\n return \"\"\"\n ```python\n \"\"\" + inspect.getsource(func) + \"\"\"\n ```\n \"\"\"", "def format_signature(sig: inspect.Signature, colon: bool) -> str:\n # First get a list with all params as strings.\n result = pdoc.doc._PrettySignature._params(sig) # type: ignore\n return_annot = pdoc.doc._PrettySignature._return_annotation_str(sig) # type: ignore\n\n multiline = (\n sum(len(x) + 2 for x in result) + len(return_annot)\n > pdoc.doc._PrettySignature.MULTILINE_CUTOFF\n )\n\n def _try_highlight(code: str) -> str:\n \"\"\"Try to highlight a piece of code using pygments, but return the input as-is if pygments detects errors.\"\"\"\n pretty = pygments.highlight(code, lexer, signature_formatter).strip()\n if '<span class=\"err\">' not in pretty:\n return pretty\n else:\n return html.escape(code)\n\n # Next, individually highlight each parameter using pygments and wrap it in a span.param.\n # This later allows us to properly control line breaks.\n pretty_result = []\n for i, param in enumerate(result):\n pretty = _try_highlight(param)\n if multiline:\n pretty = f\"\"\"<span class=\"param\">\\t{pretty},</span>\"\"\"\n else:\n pretty = f\"\"\"<span class=\"param\">{pretty}, </span>\"\"\"\n pretty_result.append(pretty)\n\n # remove last comma.\n if pretty_result:\n pretty_result[-1] = pretty_result[-1].rpartition(\",\")[0] + \"</span>\"\n\n # Add return annotation.\n anno = \")\"\n if return_annot:\n anno += f\" -> {_try_highlight(return_annot)}\"\n if colon:\n anno += \":\"\n if return_annot or colon:\n anno = f'<span class=\"return-annotation\">{anno}</span>'\n\n rendered = \"(\" + \"\".join(pretty_result) + anno\n\n if multiline:\n rendered = f'<span class=\"signature pdoc-code multiline\">{rendered}</span>'\n else:\n rendered = f'<span class=\"signature pdoc-code condensed\">{rendered}</span>'\n\n return Markup(rendered)", "def funcstring(funcname):\n s = str(funcname)[10:] #chop off '<function '\n spi = s.index(' ')\n return s[:spi]", "def get_filename_and_formatted_source():\n sal = gdb.selected_frame().find_sal() # gdb.Symtab_and_line\n\n # Check if source code is available\n if sal.symtab is None:\n return \"\", []\n\n # Get the full source code\n closest_line = sal.line\n filename = sal.symtab.fullname()\n\n try:\n source = get_highlight_source(filename)\n except OSError:\n return \"\", []\n\n if not source:\n return \"\", []\n\n n = int(source_code_lines)\n\n # Compute the line range\n start = max(closest_line - 1 - n // 2, 0)\n end = min(closest_line - 1 + n // 2 + 1, len(source))\n num_width = len(str(end))\n\n # split the code\n source = source[start:end]\n\n # Compute the prefix_sign length\n prefix_sign = C.prefix(str(pwndbg.gdblib.config.code_prefix))\n prefix_width = len(prefix_sign)\n\n # Format the output\n formatted_source = []\n for line_number, code in enumerate(source, start=start + 1):\n if pwndbg.gdblib.config.context_source_code_tabstop > 0:\n code = code.replace(\"\\t\", \" \" * pwndbg.gdblib.config.context_source_code_tabstop)\n fmt = \" {prefix_sign:{prefix_width}} {line_number:>{num_width}} {code}\"\n if pwndbg.gdblib.config.highlight_source and line_number == closest_line:\n fmt = C.highlight(fmt)\n\n line = fmt.format(\n prefix_sign=prefix_sign if line_number == closest_line else \"\",\n prefix_width=prefix_width,\n line_number=line_number,\n num_width=num_width,\n code=code,\n )\n formatted_source.append(line)\n\n return filename, formatted_source", "def text_for_funcs_in_script(filename, prefix):\n funcs = funcs_in_script(filename)\n\n ###################################################\n # FIND LENGTH OF LONGEST FUNCTION NAME #\n ###################################################\n maxlen = 0\n for func in funcs:\n name, header = func\n length = len(name)\n if length > maxlen:\n maxlen = length\n\n ###################################################\n # CREATE ONE LINE FOR EACH FUNCTION #\n ###################################################\n text = ''\n for func in funcs:\n name, header = func\n namep = name + '()'\n line = prefix + namep.ljust(maxlen + 3) + '> ' + header + '\\n'\n text += line\n\n return text", "def signature(self) -> global___SnippetSignature:", "def get_parsed_declaration(self) -> str:\n args = self._get_arguments()\n\n func = self.node\n tu = func.tu\n\n # For functions the extent encompasses the return value, and the\n # location is the beginning of the functions name. So we can consume\n # all tokens in between.\n end = cindex.SourceLocation.from_offset(\n tu, func.location.file, func.location.offset - 1\n )\n extent = cindex.SourceRange.from_locations(func.extent.start, end)\n\n return_type = \" \".join(\n t.spelling for t in cindex.TokenGroup.get_tokens(tu, extent=extent)\n )\n\n return f\"{return_type} {func.spelling}({args})\"", "def format_reconstruct_signature(cls, kwargs):\n prefix = \" \"\n kwargs_sig_parts = []\n for k, v in sorted(kwargs.items()):\n v_str = str(v)\n if isinstance(v, pm.vec2d.Vec2d):\n v_str = \"(%.5g, %.5g)\" % (v.x, v.y)\n elif isinstance(v, float):\n v_str = \"%.5g\" % v\n part = f\"{prefix}{k}={v_str}\"\n kwargs_sig_parts.append(part)\n kwargs_sig = \",\\n\".join(kwargs_sig_parts)\n result = f\"{cls.__name__}(\\n{kwargs_sig})\"\n return result", "def format_partial(func: Callable, verbose: bool = False) -> str:\n fname = func.__qualname__ if hasattr(func, '__qualname__') else str(func)\n arg_str = \", \".join([repr(a) for a in func.args])\n kwargs_str = \", \".join([str(k)+\":\"+repr(v)\n for k, v in func.keywords.items()])\n if verbose:\n repr_ = f\"<partial {fname}:{arg_str} {kwargs_str}>\"\n else:\n repr_ = f\"<partial {fname}>\"\n return repr_", "def _print_source(f):\n\n @_wraps(f)\n def wrapper(*args, **kwargs):\n source = _getsource(f)\n print(_clean_source(source))\n return f(*args, **kwargs)\n\n return wrapper" ]
[ "0.6755815", "0.6658284", "0.6619437", "0.63633657", "0.63358295", "0.61643356", "0.6092141", "0.60560864", "0.6055912", "0.60539556", "0.6053157", "0.600914", "0.60030514", "0.5999202", "0.59839904", "0.5970945", "0.5919851", "0.5903308", "0.5886662", "0.5876354", "0.5852387", "0.58214796", "0.5784674", "0.57743406", "0.57542104", "0.57451546", "0.57426053", "0.5714513", "0.56702644", "0.5652809" ]
0.73504645
0
Set up 3D cube with percentiles of height
def set_up_percentiles_cube(): test_data = np.full((5, 4, 4), -1, dtype=float) for i in range(5): test_data[i].fill(100*i + 200) percentiles = DimCoord(np.linspace(0, 100, 5), long_name="percentiles", units="%") grid_x = DimCoord(np.arange(4), standard_name="projection_x_coordinate", units="km") grid_y = DimCoord(np.arange(4), standard_name="projection_y_coordinate", units="km") test_cube = iris.cube.Cube(test_data, long_name="snow_level", units="m", dim_coords_and_dims=[(percentiles, 0), (grid_y, 1), (grid_x, 2)]) return test_cube
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def setUp(self):\n self.cube = set_up_probability_cube(\n ECC_TEMPERATURE_PROBABILITIES,\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n )\n\n self.percentile_25 = np.array(\n [[24.0, 8.75, 11.0], [8.33333333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n dtype=np.float32,\n )\n self.percentile_50 = np.array(\n [[36.0, 10.0, 12.0], [10.0, 10.0, 8.0], [8.0, -32.5, -46.0]],\n dtype=np.float32,\n )\n self.percentile_75 = np.array(\n [\n [48.0, 11.66666667, 36.0],\n [11.66666667, 11.0, 10.5],\n [9.66666667, 1.25, -19.0],\n ],\n dtype=np.float32,\n )", "def surface_area_of_cube(side):\n return side", "def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False", "def setUp(self):\n\n self.thresholds = np.array([276, 277], dtype=np.float32)\n self.rain_name = \"probability_of_falling_rain_level_above_surface\"\n self.snow_name = \"probability_of_falling_snow_level_below_surface\"\n\n rain_prob = np.array(\n [\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n ],\n dtype=np.float32,\n )\n self.rain_prob_cube = set_up_probability_cube(\n rain_prob, self.thresholds, variable_name=self.rain_name\n )\n\n snow_prob = np.array(\n [\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n ],\n dtype=np.float32,\n )\n self.snow_prob_cube = set_up_probability_cube(\n snow_prob, self.thresholds, variable_name=self.snow_name\n )\n\n high_prob = np.array(\n [\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n ],\n dtype=np.float32,\n )\n self.high_prob_cube = set_up_probability_cube(\n high_prob, self.thresholds, variable_name=self.snow_name\n )", "def setUp(self):\n self.cube = set_up_probability_cube(\n ECC_TEMPERATURE_PROBABILITIES,\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n )\n self.percentiles = [10, 50, 90]", "def volume_of_a_cuboid(length, width, height):\n return length * width * height", "def calc_hypercube_volume(r: float, n: int) -> float:\n return (r * 2) ** n", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def test_preservation_of_dimensions(self):\n percentiles_cube = set_up_percentiles_cube()\n test_data = np.array([percentiles_cube.data, percentiles_cube.data])\n percentiles = percentiles_cube.coord('percentiles')\n grid_x = percentiles_cube.coord('projection_x_coordinate')\n grid_y = percentiles_cube.coord('projection_y_coordinate')\n\n new_model_coord = build_coordinate([0, 1],\n long_name='leading_coord',\n coord_type=DimCoord,\n data_type=int)\n input_cube = iris.cube.Cube(\n test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(new_model_coord, 0),\n (percentiles, 1),\n (grid_y, 2), (grid_x, 3)])\n\n plugin_instance = ProbabilitiesFromPercentiles2D(\n input_cube, 'new_name')\n probability_cube = plugin_instance.process(self.orography_cube)\n self.assertEqual(input_cube.coords(dim_coords=True)[0],\n probability_cube.coords(dim_coords=True)[0])", "def setup_precipitation_cube():\n data = np.array([[1, 2, 1], [1, 1, 1], [0, 2, 0], [1, 2, 1]], dtype=np.float32)\n precipitation_cube = set_up_variable_cube(\n data, name=\"lwe_precipitation_rate\", units=\"mm/hr\", spatial_grid=\"equalarea\"\n )\n return precipitation_cube", "def setUp(self):\n cube = set_up_variable_cube(\n np.zeros((2, 2), dtype=np.float32),\n name=\"lwe_thickness_of_precipitation_amount\",\n units=\"m\",\n time=dt(2017, 1, 10, 5, 0),\n frt=dt(2017, 1, 10, 3, 0),\n )\n self.cube = add_coordinate(\n cube,\n [dt(2017, 1, 10, 5, 0), dt(2017, 1, 10, 6, 0)],\n \"time\",\n is_datetime=True,\n )\n self.coord_name = \"time\"", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def __init__(self, cube_file):\n\t\timport pyfits, pywcs\n\t\t# Put the cube in RA - DEC - RM order and save it\n\t\tCube.__init__(self, np.transpose(pyfits.getdata(cube_file), (2, 1, 0)))\n\t\tself.wcs = pywcs.WCS(pyfits.getheader(cube_file))\n\n\t\tsky0 = self.pix2sky([0,0,0])\n\t \tskyN = self.pix2sky([self.x_max,self.y_max,self.z_max])\n\t \tself.ra_min = min(sky0[0],skyN[0])\n\t\tself.ra_max = max(sky0[0],skyN[0])\n\t\tself.ra_step = (self.ra_max-self.ra_min)/self.x_max\n\t \tself.dec_min = min(sky0[1],skyN[1])\n\t self.dec_max = max(sky0[1],skyN[1])\n\t\tself.dec_step = (self.dec_max-self.dec_min)/self.y_max\n\t\tself.fd_min = min(sky0[2],skyN[2])\n\t\tself.fd_max = max(sky0[2],skyN[2])\n\t\tself.fd_step = (self.fd_max-self.fd_min)/self.z_max", "def setUp(self):\n self.orography_cube = set_up_threshold_cube()\n self.percentiles_cube = set_up_percentiles_cube()", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def surface_area_cube(side_length: float) -> float:\r\n if side_length < 0:\r\n raise ValueError(\"surface_area_cube() only accepts non-negative values\")\r\n return 6 * side_length**2", "def cube_area(side_length):\n area = side_length ** 3\n return area", "def test_cube_multi_level(self):\n temperature = self._make_multi_level(self.temperature, time_promote=True)\n relative_humidity = self._make_multi_level(\n self.relative_humidity, time_promote=True\n )\n pressure = self._make_multi_level(self.pressure, time_promote=True)\n result = WetBulbTemperature().process(\n CubeList([temperature, relative_humidity, pressure])\n )\n self.assertEqual(result.coord_dims(\"time\")[0], 0)\n self.assertEqual(result.coord_dims(\"height\")[0], 1)", "def setUp(self):\n data_down = np.full((3, 2), dtype=np.float32, fill_value=0.1)\n uv_down_name = \"surface_downwelling_ultraviolet_flux_in_air\"\n\n self.cube_uv_down = set_up_variable_cube(\n data_down, name=uv_down_name, units=\"W m-2\"\n )\n self.cube_down_badname = set_up_variable_cube(\n data_down, name=\"Wrong name\", units=\"W m-2\"\n )", "def setUp(self):\n percentiles_cube = set_up_percentiles_cube()\n self.plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.reference_cube = percentiles_cube[0]\n self.orography_cube = set_up_threshold_cube()", "def __init__(self, *, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, range=None, **kwargs):\n super().__init__(points=points, **kwargs)\n\n if size_x is None or size_y is None or size_z is None:\n print('WARNING: when computing a voxelgrid for running a Neural net, voxel sizes should be homogeneous among different point clouds or the neural network wont learn spatial relationships. To ensure this, use (size_x, size_y, size_z) instead of (n_x, n_y, n_z)')\n\n self.x_y_z = [n_x, n_y, n_z]\n self.sizes = np.array([size_x, size_y, size_z])\n\n if range is None:\n self.xyzmin = self.bounds[0]\n self.xyzmax = self.bounds[1]\n else: \n self.xyzmin = range[:3]\n self.xyzmax = range[3:]\n\n for n, size in enumerate(self.sizes):\n if size is None:\n continue\n\n # ensure that 'sizes' are respected by making the box bigger if necessary\n margin = size - ((self.xyzmax[n] - self.xyzmin[n]) % size)\n self.xyzmin[n] -= margin / 2\n self.xyzmax[n] += margin / 2\n self.x_y_z[n] = int(round((self.xyzmax[n] - self.xyzmin[n]) / size))", "def test_3d_freq_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n lowmem_write_readback_3D(dic,data)", "def cube(\n network,\n pore_diameter='pore.diameter'\n):\n return network[pore_diameter]**3", "def test_3d_lowmem():\n dic, data = ng.bruker.read_lowmem(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n lowmem_write_readback(dic, data)", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def place_cube(self,\n cube_xy,\n player=None,\n weight=1,\n azimuth=None,\n return_azimuth=False):\n\n self.color_idx += 1\n if self.color_idx == len(self.colors):\n self.color_idx = 0\n if azimuth is None:\n azimuth = np.random.randint(0, 180)\n else:\n assert azimuth >= 0 and azimuth <= 180\n cube_rot = self.p0.getQuaternionFromEuler([\n 0, 0, np.deg2rad(azimuth)\n ]) # rotated around which axis? # np.deg2rad(90)\n\n alpha = 1 # this could be set to .5 for some transparency\n\n if weight == 1:\n if player is None or self.four_colors:\n color = self.colors[self.color_idx] + [alpha]\n elif player == Player.Player:\n color = [0, 0, 1, 1]\n if DEBUG:\n print(\"Player putting down cube at\", cube_xy)\n elif player == Player.Enemy:\n color = [1, 0, 0, 1]\n if DEBUG:\n print(\"Opponent putting down cube at\", cube_xy)\n elif player == Player.Starter:\n color = [0, 0, 0, 1]\n if self.dark:\n color = [1, 1, 1, 1]\n if DEBUG:\n print(\"Starter cube at\", cube_xy)\n else:\n color = WEIGHT_COLORS[weight]\n\n max_z = self.find_highest_z(cube_xy, azimuth)\n\n cube_pos = [cube_xy[0], cube_xy[1], max_z + 1.0001]\n # print (\"placing cube at\",cube_pos)\n\n cube_visual = self.p0.createVisualShape(\n shapeType=self.p0.GEOM_BOX,\n rgbaColor=color,\n halfExtents=[1, 1, 1]\n # specularColor=[0.4, .4, 0],\n )\n\n cube = self.p0.createMultiBody(\n baseMass=weight,\n # baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=self.cube_collision,\n baseVisualShapeIndex=cube_visual,\n basePosition=cube_pos,\n baseOrientation=cube_rot,\n useMaximalCoordinates=True)\n\n self.cubes.append(cube)\n\n if max_z > self.current_max_z:\n self.current_max_z = np.around(max_z)\n out = True\n else:\n out = False\n\n if not return_azimuth:\n return out\n else:\n return out, azimuth", "def createCube():\n subjects, detections, antigen = getAxes()\n cube = np.full([len(subjects), len(detections), len(antigen)], np.nan)\n\n IGG = importIGG()\n glycan, dfGlycan = importGlycan()\n glyCube = np.full([len(subjects), len(glycan)], np.nan)\n\n for k, curAnti in enumerate(antigen):\n lumx = importLuminex(curAnti)\n\n for _, row in lumx.iterrows():\n i = subjects.index(row[\"subject\"])\n j = detections.index(row[\"variable\"])\n cube[i, j, k] = row[\"value\"]\n\n for _, row in dfGlycan.iterrows():\n i = subjects.index(row[\"subject\"])\n j = glycan.index(row[\"variable\"])\n glyCube[i, j] = row[\"value\"]\n\n # Add IgG data on the end as another detection\n for _, row in IGG.iterrows():\n i = subjects.index(row[\"subject\"])\n k = antigen.index(row[\"variable\"])\n cube[i, -1, k] = row[\"value\"]\n\n # Clip to 0 as there are a few strongly negative outliers\n cube = np.clip(cube, 1.0, None)\n glyCube = np.clip(glyCube, 0.1, None)\n\n cube = np.log10(cube)\n glyCube = np.log10(glyCube)\n\n # Mean center each measurement\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n cube -= np.nanmean(cube, axis=0)\n glyCube -= np.nanmean(glyCube, axis=0)\n\n # Check that there are no slices with completely missing data\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 1)))\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 2)))\n assert ~np.any(np.all(np.isnan(cube), axis=(1, 2)))\n\n glyCube *= np.sqrt(np.nanvar(cube) / np.nanvar(glyCube))\n return cube, glyCube", "def setUp(self):\n self.cube = _create_2d_cube()" ]
[ "0.6401066", "0.6047249", "0.5978683", "0.5975754", "0.5891019", "0.57748264", "0.5761081", "0.5691293", "0.56676495", "0.5665823", "0.5639302", "0.56036085", "0.55903935", "0.5572805", "0.5559912", "0.5519475", "0.5457179", "0.5456965", "0.54461634", "0.54381275", "0.5431166", "0.53897417", "0.5381692", "0.53785175", "0.5376454", "0.5373093", "0.5364421", "0.53516215", "0.53480756", "0.53431624" ]
0.7220225
0
Set up 2D cube with "orography" data on which to threshold percentiles
def set_up_threshold_cube(): test_data = 50*np.arange(16).reshape(4, 4) grid_x = DimCoord(np.arange(4), standard_name="projection_x_coordinate", units="km") grid_y = DimCoord(np.arange(4), standard_name="projection_y_coordinate", units="km") test_cube = iris.cube.Cube(test_data, long_name="surface_altitude", units="m", dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)]) return test_cube
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube", "def setUp(self):\n self.orography_cube = set_up_threshold_cube()\n self.percentiles_cube = set_up_percentiles_cube()", "def setUp(self):\n self.cube = set_up_probability_cube(\n ECC_TEMPERATURE_PROBABILITIES,\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n )\n\n self.percentile_25 = np.array(\n [[24.0, 8.75, 11.0], [8.33333333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n dtype=np.float32,\n )\n self.percentile_50 = np.array(\n [[36.0, 10.0, 12.0], [10.0, 10.0, 8.0], [8.0, -32.5, -46.0]],\n dtype=np.float32,\n )\n self.percentile_75 = np.array(\n [\n [48.0, 11.66666667, 36.0],\n [11.66666667, 11.0, 10.5],\n [9.66666667, 1.25, -19.0],\n ],\n dtype=np.float32,\n )", "def setUp(self):\n\n self.thresholds = np.array([276, 277], dtype=np.float32)\n self.rain_name = \"probability_of_falling_rain_level_above_surface\"\n self.snow_name = \"probability_of_falling_snow_level_below_surface\"\n\n rain_prob = np.array(\n [\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n ],\n dtype=np.float32,\n )\n self.rain_prob_cube = set_up_probability_cube(\n rain_prob, self.thresholds, variable_name=self.rain_name\n )\n\n snow_prob = np.array(\n [\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n ],\n dtype=np.float32,\n )\n self.snow_prob_cube = set_up_probability_cube(\n snow_prob, self.thresholds, variable_name=self.snow_name\n )\n\n high_prob = np.array(\n [\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n ],\n dtype=np.float32,\n )\n self.high_prob_cube = set_up_probability_cube(\n high_prob, self.thresholds, variable_name=self.snow_name\n )", "def setUp(self):\n percentiles_cube = set_up_percentiles_cube()\n self.plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.reference_cube = percentiles_cube[0]\n self.orography_cube = set_up_threshold_cube()", "def setUp(self):\n self.cube = set_up_probability_cube(\n ECC_TEMPERATURE_PROBABILITIES,\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n )\n self.percentiles = [10, 50, 90]", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def setup_orographic_enhancement_cube():\n data = np.array([[1, 1, 1], [0, 1, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float32)\n orographic_enhancement_cube = set_up_variable_cube(\n data, name=\"orographic_enhancement\", units=\"mm/hr\", spatial_grid=\"equalarea\"\n )\n return orographic_enhancement_cube", "def test_preservation_of_dimensions(self):\n percentiles_cube = set_up_percentiles_cube()\n test_data = np.array([percentiles_cube.data, percentiles_cube.data])\n percentiles = percentiles_cube.coord('percentiles')\n grid_x = percentiles_cube.coord('projection_x_coordinate')\n grid_y = percentiles_cube.coord('projection_y_coordinate')\n\n new_model_coord = build_coordinate([0, 1],\n long_name='leading_coord',\n coord_type=DimCoord,\n data_type=int)\n input_cube = iris.cube.Cube(\n test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(new_model_coord, 0),\n (percentiles, 1),\n (grid_y, 2), (grid_x, 3)])\n\n plugin_instance = ProbabilitiesFromPercentiles2D(\n input_cube, 'new_name')\n probability_cube = plugin_instance.process(self.orography_cube)\n self.assertEqual(input_cube.coords(dim_coords=True)[0],\n probability_cube.coords(dim_coords=True)[0])", "def setUp(self):\n self.percentiles_cube = set_up_percentiles_cube()\n self.percentile_coordinate = find_percentile_coordinate(\n self.percentiles_cube)\n self.new_name = \"probability\"\n self.plugin_instance = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, self.new_name)\n self.orography_cube = set_up_threshold_cube()", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def createCube():\n subjects, detections, antigen = getAxes()\n cube = np.full([len(subjects), len(detections), len(antigen)], np.nan)\n\n IGG = importIGG()\n glycan, dfGlycan = importGlycan()\n glyCube = np.full([len(subjects), len(glycan)], np.nan)\n\n for k, curAnti in enumerate(antigen):\n lumx = importLuminex(curAnti)\n\n for _, row in lumx.iterrows():\n i = subjects.index(row[\"subject\"])\n j = detections.index(row[\"variable\"])\n cube[i, j, k] = row[\"value\"]\n\n for _, row in dfGlycan.iterrows():\n i = subjects.index(row[\"subject\"])\n j = glycan.index(row[\"variable\"])\n glyCube[i, j] = row[\"value\"]\n\n # Add IgG data on the end as another detection\n for _, row in IGG.iterrows():\n i = subjects.index(row[\"subject\"])\n k = antigen.index(row[\"variable\"])\n cube[i, -1, k] = row[\"value\"]\n\n # Clip to 0 as there are a few strongly negative outliers\n cube = np.clip(cube, 1.0, None)\n glyCube = np.clip(glyCube, 0.1, None)\n\n cube = np.log10(cube)\n glyCube = np.log10(glyCube)\n\n # Mean center each measurement\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n cube -= np.nanmean(cube, axis=0)\n glyCube -= np.nanmean(glyCube, axis=0)\n\n # Check that there are no slices with completely missing data\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 1)))\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 2)))\n assert ~np.any(np.all(np.isnan(cube), axis=(1, 2)))\n\n glyCube *= np.sqrt(np.nanvar(cube) / np.nanvar(glyCube))\n return cube, glyCube", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def setUp(self):\n self.cube = _create_2d_cube()", "def test_attributes(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n self.assertEqual(result.units, \"1\")\n self.assertEqual(result.name(), self.new_name)\n self.assertEqual(result.attributes['relative_to_threshold'], 'below')\n self.assertEqual(result.attributes['thresholded_using'],\n 'surface_altitude')", "def test_threshold_dimensions(self, warning_list=None):\n threshold_data_3d = np.broadcast_to(self.orography_cube.data,\n (2, 4, 4))\n grid_x = self.orography_cube.coord(\"projection_x_coordinate\")\n grid_y = self.orography_cube.coord(\"projection_y_coordinate\")\n realization = DimCoord(np.arange(2), standard_name=\"realization\",\n units=\"1\")\n threshold_cube = iris.cube.Cube(threshold_data_3d,\n long_name=\"topography\", units=\"m\",\n dim_coords_and_dims=[(realization, 0),\n (grid_y, 1),\n (grid_x, 2)])\n\n warning_msg = 'threshold cube has too many'\n probability_cube = self.plugin_instance.process(threshold_cube)\n self.assertTrue(any(item.category == UserWarning\n for item in warning_list))\n self.assertTrue(any(warning_msg in str(item)\n for item in warning_list))\n\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)\n self.assertArrayAlmostEqual(probability_cube.data,\n set_reference_probabilities())", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def __init__(self, reference_rng: RNG, number_of_points: int, runs: int = 10, dimension: int = 3,\n scale: float = 1.0, homology_dimension: int = 0, filtration_size: int = 20,\n max_filtration_value: float = None, recalculate_distribution=False, delayed_coordinates=False,\n store_data=False, gpu=False):\n self.dimension = dimension\n # self.filtration_range = np.array([0.015, 0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05, 0.055, 0.06, 0.065, 0.070, 0.075, 0.08, 0.085, 0.09, 0.095])\n # self.filtration_size = len(self.filtration_range)\n self.scale = scale\n super().__init__(reference_rng, runs, number_of_points, homology_dimension, filtration_size,\n max_filtration_value, recalculate_distribution, store_data, gpu)\n # Delayed coordinates can only be used for 3D. Working on making it more general.\n assert not delayed_coordinates or (\n delayed_coordinates and dimension == 3), \"Delayed coordinates can only be used for 3D.\"\n self.delayed_coordinates = delayed_coordinates", "def test_1_1_2D_cube_init(self): # TODO: REMOVE FUNC AFTER SPLIT\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5)]\n\n nn_checks = {(0.5, 0.5): [(0, 1), (1, 0), (0, 0), (1, 1)],\n (0, 1): [(0, 0), (1, 1), (0.5, 0.5)]}\n\n init_triangulation(2, 0, check, nn_checks)", "def test_4_1_5D_cube_init(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0),\n (1, 1, 1, 0, 0), (1, 1, 1, 1, 0), (1, 1, 1, 0, 1),\n (1, 1, 0, 1, 0),\n (1, 1, 0, 1, 1), (1, 1, 0, 0, 1), (1, 0, 1, 0, 0),\n (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1),\n (1, 0, 0, 0, 1), (0, 1, 0, 0, 0), (0, 1, 1, 0, 0),\n (0, 1, 1, 1, 0),\n (0, 1, 1, 1, 1), (0, 1, 1, 0, 1), (0, 1, 0, 1, 0),\n (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1),\n (0, 0, 1, 0, 1), (0, 0, 0, 1, 0), (0, 0, 0, 1, 1),\n (0, 0, 0, 0, 1),\n (0.5, 0.5, 0.5, 0.5, 0.5)]\n\n nn_checks = {(0, 1, 0, 1, 1): [(0, 0, 0, 0, 0), (\n 0.5, 0.5, 0.5, 0.5, 0.5), (0, 0, 0, 1, 1), (1, 1, 0, 1, 1),\n (0, 1, 0, 0, 0),\n (0, 1, 0, 0, 1),\n (0, 1, 0, 1, 0),\n (0, 0, 0, 0, 1),\n (1, 1, 1, 1, 1),\n (0, 1, 1, 1, 1),\n (0, 0, 0, 1, 0)]}\n\n init_triangulation(5, 0, check, nn_checks)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_WIMP_cut_region_on_true_data(bolo_name, mass, analysis):\n\t\n\n\t#Load 2D PDF\n\tfWIMP2D, f = PyRPl.open_ROOT_object(\"./ROOT_files/WIMP_PDF2D_\" + analysis + \".root\", \"WIMP_\" + mass + \"_GeV\")\n\n\t#Load cut value on PDF for 95% WIMP box\n\tcut_val_90, cut_val_99 = 0,0\n\twith open (\"./Text_files/WIMP_PDF_90_and_99_cut_value_\" + analysis + \".txt\", \"r\") as fcut:\n\t\tstuff = [elem.rstrip().split(\",\") for elem in fcut.readlines()]\n\t\tfor elem in stuff:\n\t\t\tmass_val = elem[0]\n\t\t\tif int(mass)==int(mass_val):\n\t\t\t\tcut_val_90 = float(elem[1])\n\t\t\t\tcut_val_99 = float(elem[2])\n\t\n\n\tdata_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_Analyse_ERA/Fond_ERA_merged/\"\n\tfilou = TFile(data_path + bolo_name + \"_\" + analysis + \"_fond.root\", \"read\")\n\ttree = filou.Get(\"data\")\n\tnum_pass_cut =0\n\n\thpass = TH2F(\"hpass\", \"hpass\", 100, 0, 15, 100, 0, 15)\n\n\t# #T Check that the events are found where expected\n\t# arr1 = np.random.uniform(0,15,size=(200000,2))\n\t# for i in range(arr1.shape[0]):\n\t# \tPDF_val = fWIMP2D.Eval(arr1[i][0], arr1[i][1])\n\t# \tif (cut_val_99<PDF_val<cut_val_90):\n\t# \t# if (cut_val_99<PDF_val<cut_val_90):\n\t# \t\tnum_pass_cut+=1\n\t# \t\thpass.Fill(arr1[i][0], arr1[i][1])\t\t\n\n\t# hpass.Draw()\n\t# raw_input()\n\n\tfor k in range(tree.GetEntries()):\n\t\ttree.GetEntry(k)\n\t\tER=(1+8./3)*0.5*(tree.EC1+tree.EC2)-0.33*(1.5*tree.EIA+4*tree.EIB+1.5*tree.EIC+4*tree.EID)\n\t\tPDF_val = fWIMP2D.Eval(ER, 0.5*(tree.EIB+tree.EID))\n\t\tif (cut_val_99<PDF_val<cut_val_90 and 0.5*(tree.EIB+tree.EID)>0.7):\n\t\t# if (cut_val_99<PDF_val<cut_val_90):\n\t\t\tnum_pass_cut+=1\n\t\t\thpass.Fill(0.5*(tree.EC1+tree.EC2), 0.5*(tree.EIB+tree.EID))\n\n\tprint num_pass_cut\n\thpass.Draw()\n\traw_input()", "def __init__(self):\n\n # self.threshold = 3.\n self.gamma_min = 3\n self.gamma_max = 12\n self.n_samples = 40\n # self.do_plots = False\n # self.do_albedo = True\n # self.verbose = True\n\n self.nbands = 7\n self.bu = np.array([0.004, 0.015, 0.003, 0.004, 0.013, 0.010, 0.006])\n\n # Determine 250 or 500 meters product\n # self.resolution = 500\n\n # self.pixelWidth = 500\n # self.pixelHeight = 500", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_preservation_of_single_valued_dimension(self):\n percentiles_cube = set_up_percentiles_cube()\n new_model_coord = build_coordinate([0],\n long_name='leading_coord',\n coord_type=DimCoord,\n data_type=int)\n percentiles_cube.add_aux_coord(new_model_coord)\n percentiles_cube = iris.util.new_axis(percentiles_cube,\n scalar_coord='leading_coord')\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n probability_cube = plugin_instance.process(self.orography_cube)\n self.assertEqual(percentiles_cube.coords(dim_coords=True)[0],\n probability_cube.coords(dim_coords=True)[0])", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False", "def test_with_random(unitless=True):\n\n delta = 1.33333\n cube1 = algebra.make_vect(np.random.normal(0, 1,\n size=(257, 124, 68)))\n\n info = {'axes': [\"freq\", \"ra\", \"dec\"], 'type': 'vect',\n 'freq_delta': delta / 3.78, 'freq_centre': 0.,\n 'ra_delta': delta / 1.63, 'ra_centre': 0.,\n 'dec_delta': delta, 'dec_centre': 0.}\n cube1.info = info\n cube2 = copy.deepcopy(cube1)\n\n weight1 = algebra.ones_like(cube1)\n weight2 = algebra.ones_like(cube2)\n\n bin_left, bin_center, bin_right, counts_histo, binavg = \\\n calculate_xspec(cube1, cube2, weight1, weight2,\n window=\"blackman\",\n truncate=False,\n nbins=40,\n unitless=unitless,\n logbins=True)\n\n if unitless:\n pwrspec_input = bin_center ** 3. / 2. / math.pi / math.pi\n else:\n pwrspec_input = np.ones_like(bin_center)\n\n volume = 1.\n for axis_name in cube1.axes:\n axis_vector = cube1.get_axis(axis_name)\n volume *= abs(axis_vector[1] - axis_vector[0])\n\n pwrspec_input *= volume\n\n for specdata in zip(bin_left, bin_center,\n bin_right, counts_histo, binavg,\n pwrspec_input):\n print((\"%10.15g \" * 6) % specdata)", "def test_attributes_inverse_ordering(self):\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n plugin_instance = ProbabilitiesFromPercentiles2D(self.percentiles_cube,\n self.new_name)\n result = plugin_instance.create_probability_cube(self.percentiles_cube,\n self.orography_cube)\n self.assertEqual(result.attributes['relative_to_threshold'], 'above')" ]
[ "0.6977779", "0.6504011", "0.64642787", "0.62994087", "0.6236152", "0.6200467", "0.6112209", "0.6097758", "0.6073408", "0.5821836", "0.57702947", "0.57655525", "0.5731981", "0.56486183", "0.56143403", "0.561078", "0.55911934", "0.55821806", "0.5579705", "0.55728793", "0.5564408", "0.5561576", "0.5555961", "0.55131567", "0.5503887", "0.54797155", "0.54692245", "0.54619604", "0.5453044", "0.54482657" ]
0.68136084
1
Test setting of inverse_order flag using percentiles_cube. In this case the flag should be false as the values associated with the percentiles increase in the same direction as the percentiles.
def test_inverse_order_false(self): plugin_instance = ProbabilitiesFromPercentiles2D( self.test_cube, 'new_name') self.assertFalse(plugin_instance.inverse_ordering)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inverse_order_true(self):\n percentiles_cube = self.test_cube.copy(\n data=np.flipud(self.test_cube.data))\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.assertTrue(plugin_instance.inverse_ordering)", "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_values_inverse_ordering(self):\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected = 1.0 - expected\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_attributes_inverse_ordering(self):\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n plugin_instance = ProbabilitiesFromPercentiles2D(self.percentiles_cube,\n self.new_name)\n result = plugin_instance.create_probability_cube(self.percentiles_cube,\n self.orography_cube)\n self.assertEqual(result.attributes['relative_to_threshold'], 'above')", "def test_values_inverse_ordering(self):\n reference_cube = self.cube.copy()\n plugin = NonLinearWeights(cval=0.85)\n result = plugin.process(self.cube, self.coord_name, inverse_ordering=True)\n expected_result = np.array([0.45945946, 0.54054054])\n self.assertArrayAlmostEqual(result.data, expected_result)\n # check input cube blend coordinate order is unchanged\n self.assertArrayEqual(\n self.cube.coord(self.coord_name).points,\n reference_cube.coord(self.coord_name).points,\n )\n # check weights cube and input cube blend coordinate orders match\n self.assertArrayEqual(\n result.coord(self.coord_name).points,\n reference_cube.coord(self.coord_name).points,\n )", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test__inverse_transform_continuous(self):", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def test_inverse_b(self):\n for q in self.all:\n self.assertTrue(\n (q*q.inverse()).almost_equal(Quaternion(1, 0, 0, 0)))", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube", "def test_inverse_a(self):\n for q in self.all:\n self.assertTrue(\n (q.inverse()*q).almost_equal(Quaternion(1, 0, 0, 0)))", "def test_masked_data_below(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n 1 - self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n self.assertArrayEqual(result.data.mask, expected_mask)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def Q8_test():\n dispo = [False, True, True, False]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n return 1 == indice(0, tab_dist, dispo)", "def _check_inverse_transform(self, Z):\n Z_round_trip = self.inverse_func(self.func(Z))\n if not np.allclose(Z_round_trip, Z, equal_nan=True):\n raise UserWarning(\n \"The provided functions are not strictly\"\n \" inverse of each other. If you are sure you\"\n \" want to proceed regardless, set\"\n \" 'check_inverse=False'.\"\n )", "def test_check_data_specifying_single_percentile_not_as_list(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=25)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_probabilities_not_monotonically_increasing(self):\n data = np.array([0.05, 0.7, 0.95])\n data = data[:, np.newaxis, np.newaxis]\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n warning_msg = \"The probability values used to construct the\"\n with pytest.warns(UserWarning, match=warning_msg):\n Plugin()._probabilities_to_percentiles(cube, self.percentiles)", "def test_inverse_distance(method, test_data, test_grid):\n xp, yp, z = test_data\n xg, yg = test_grid\n\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def quantile(self, hypercube):\n raise NotImplementedError()", "def test_masked_data_above(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"above\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n\n self.assertArrayEqual(result.data.mask, expected_mask)", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1", "def invertible(self):\n a = self._data\n return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]", "def invertierbar(a, N):\n return krypto1.ggT(a, N) == 1" ]
[ "0.79300445", "0.71616274", "0.7156937", "0.6335259", "0.57806724", "0.5717485", "0.55411136", "0.5492557", "0.5461046", "0.5437405", "0.53619474", "0.53439605", "0.53260297", "0.5306396", "0.52882934", "0.5254635", "0.5213377", "0.5212515", "0.5176069", "0.5159069", "0.5150638", "0.51319534", "0.50543195", "0.504499", "0.50395423", "0.5017885", "0.5017442", "0.5003561", "0.49751067", "0.49690893" ]
0.75066394
1
Test setting of inverse_order flag using percentiles_cube. In this case the flag should be true as the values associated with the percentiles increase in the opposite direction to the percentiles.
def test_inverse_order_true(self): percentiles_cube = self.test_cube.copy( data=np.flipud(self.test_cube.data)) plugin_instance = ProbabilitiesFromPercentiles2D( percentiles_cube, 'new_name') self.assertTrue(plugin_instance.inverse_ordering)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inverse_order_false(self):\n plugin_instance = ProbabilitiesFromPercentiles2D(\n self.test_cube, 'new_name')\n self.assertFalse(plugin_instance.inverse_ordering)", "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_values_inverse_ordering(self):\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected = 1.0 - expected\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_attributes_inverse_ordering(self):\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n plugin_instance = ProbabilitiesFromPercentiles2D(self.percentiles_cube,\n self.new_name)\n result = plugin_instance.create_probability_cube(self.percentiles_cube,\n self.orography_cube)\n self.assertEqual(result.attributes['relative_to_threshold'], 'above')", "def test_values_inverse_ordering(self):\n reference_cube = self.cube.copy()\n plugin = NonLinearWeights(cval=0.85)\n result = plugin.process(self.cube, self.coord_name, inverse_ordering=True)\n expected_result = np.array([0.45945946, 0.54054054])\n self.assertArrayAlmostEqual(result.data, expected_result)\n # check input cube blend coordinate order is unchanged\n self.assertArrayEqual(\n self.cube.coord(self.coord_name).points,\n reference_cube.coord(self.coord_name).points,\n )\n # check weights cube and input cube blend coordinate orders match\n self.assertArrayEqual(\n result.coord(self.coord_name).points,\n reference_cube.coord(self.coord_name).points,\n )", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test__inverse_transform_continuous(self):", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube", "def test_inverse_b(self):\n for q in self.all:\n self.assertTrue(\n (q*q.inverse()).almost_equal(Quaternion(1, 0, 0, 0)))", "def test_inverse_a(self):\n for q in self.all:\n self.assertTrue(\n (q.inverse()*q).almost_equal(Quaternion(1, 0, 0, 0)))", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_masked_data_below(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n 1 - self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n self.assertArrayEqual(result.data.mask, expected_mask)", "def quantile(self, hypercube):\n raise NotImplementedError()", "def _check_inverse_transform(self, Z):\n Z_round_trip = self.inverse_func(self.func(Z))\n if not np.allclose(Z_round_trip, Z, equal_nan=True):\n raise UserWarning(\n \"The provided functions are not strictly\"\n \" inverse of each other. If you are sure you\"\n \" want to proceed regardless, set\"\n \" 'check_inverse=False'.\"\n )", "def test_check_data_specifying_single_percentile_not_as_list(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=25)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def Q8_test():\n dispo = [False, True, True, False]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n return 1 == indice(0, tab_dist, dispo)", "def test_probabilities_not_monotonically_increasing(self):\n data = np.array([0.05, 0.7, 0.95])\n data = data[:, np.newaxis, np.newaxis]\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n warning_msg = \"The probability values used to construct the\"\n with pytest.warns(UserWarning, match=warning_msg):\n Plugin()._probabilities_to_percentiles(cube, self.percentiles)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_inverse_distance(method, test_data, test_grid):\n xp, yp, z = test_data\n xg, yg = test_grid\n\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)", "def test_multiple_inverse(self):\r\n # NOTE: multiple_inverse not very accurate close to 1\r\n self.assertFloatEqual(multiple_inverse(1 - 0.9990005, 10000), 1e-7)\r\n self.assertFloatEqual(multiple_inverse(0.4012631, 10), 0.05)\r\n self.assertFloatEqual(multiple_inverse(1e-20, 1), 1e-20)\r\n self.assertFloatEqual(multiple_inverse(1e-300, 1), 1e-300)\r\n self.assertFloatEqual(multiple_inverse(0.96875, 5), 0.5)\r\n self.assertFloatEqual(multiple_inverse(1e-19, 10), 1e-20)", "def test_masked_data_above(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"above\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n\n self.assertArrayEqual(result.data.mask, expected_mask)", "def invertierbar(a, N):\n return krypto1.ggT(a, N) == 1", "def test_percentile_coord(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayEqual(result.coord(\"percentile\").points, self.percentiles)\n self.assertEqual(result.coord(\"percentile\").units, unit.Unit(\"%\"))", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1" ]
[ "0.7347653", "0.7248827", "0.7198209", "0.6305554", "0.57931304", "0.5644359", "0.56396455", "0.5557479", "0.5472661", "0.54367006", "0.54120636", "0.538846", "0.53692895", "0.534631", "0.52895766", "0.52163696", "0.5215714", "0.5201024", "0.5148742", "0.51197976", "0.51032746", "0.51025254", "0.50753045", "0.5052136", "0.50390655", "0.49787223", "0.49780646", "0.4970067", "0.4925025", "0.49149042" ]
0.7871805
0
Test relative_to_threshold attribute is suitable for the inverse_ordering case, when it should be 'above'.
def test_attributes_inverse_ordering(self): self.percentiles_cube.data = np.flipud(self.percentiles_cube.data) plugin_instance = ProbabilitiesFromPercentiles2D(self.percentiles_cube, self.new_name) result = plugin_instance.create_probability_cube(self.percentiles_cube, self.orography_cube) self.assertEqual(result.attributes['relative_to_threshold'], 'above')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def test_unknown_thresholding(self):\n self.cube.coord(var_name=\"threshold\").attributes[\n \"spp__relative_to_threshold\"\n ] = \"between\"\n msg = \"Probabilities to percentiles only implemented for\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)", "def para_lower_than(threshold):\n\n return lambda step, curr_obj, curr_optimized_obj, extra_para: extra_para<threshold", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def check_threshold(data, threshold, above=True, flexibility=.02, cushion=3):\n if above:\n across = (data > threshold) * 1\n across_secondary = (data > (threshold * (1-flexibility))) * 1\n else:\n across = (data < threshold) * 1\n across_secondary = (data < (threshold * (1+flexibility))) * 1\n\n index_backdown = [i + 1 for i, x in enumerate(np.diff(across_secondary)) if x == -1]\n step_down = np.diff(np.concatenate(([0.], np.cumsum(across)[index_backdown])))\n across[index_backdown] = -step_down\n test_across = np.cumsum(across)\n times_across = sum(test_across == cushion)\n\n return across, test_across, times_across", "def test_change_lower(self):\n instance = self.traits_instance\n instance.low = -4.0\n instance.value = -2\n self.assertAlmostEqual(instance.value, -2)", "def LLR_above_thresh(self, threshold, groundtype):\n\t\tLLR = self.log_likelihood_ratios(groundtype=groundtype)\n\t\treturn (LLR >= threshold)", "def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def test_lessThan(self):\n self.assertTrue(Comparable(0) < Comparable(3))\n self.assertFalse(Comparable(2) < Comparable(0))", "def above(self,object):\n if( isinstance(object,Feature) ):\n return( self.maxY() < object.minY() )\n elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):\n return( self.maxY() < object[1] )\n elif( isinstance(object,float) or isinstance(object,int) ):\n return( self.maxY() < object )\n else:\n logger.warning(\"SimpleCV did not recognize the input type to feature.above(). This method only takes another feature, an (x,y) tuple, or a ndarray type.\")\n return None", "def overlay_thresholding_function(threshold, positive=True):\n # from the interface class definition above, there will be 3 values\n # for the thresh type: inactive, less than, greater than\n t = threshold[0]\n if threshold[-1] == 'inactive':\n if positive:\n return lambda x: np.ones(x.shape, 'B')\n return lambda x: np.zeros(x.shape, 'B')\n elif threshold[-1] == 'less than':\n if positive:\n return lambda x: np.less(x,t)\n return lambda x: np.greater_equal(x,t)\n elif threshold[-1] == 'greater than':\n if positive:\n return lambda x: np.greater(x,t)\n return lambda x: np.less_equal(x,t)\n else:\n print 'unrecognized thresholding parameters:', threshold", "def test_lessThanOrEqual(self):\n self.assertTrue(Comparable(3) <= Comparable(3))\n self.assertTrue(Comparable(0) <= Comparable(3))\n self.assertFalse(Comparable(2) <= Comparable(0))", "def test_gt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def round_using_t(prediction, threshold):\n return (prediction >= threshold).astype('int')", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def test_output_is_above_threshold_counterfactuals(self):\n threshold = 0.1\n self._config['Regression threshold'] = str(threshold)\n self._example = {'x_1': 0.0, 'x_2': -5.0}\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n target_score = self._predict_and_return_score(self._example)\n self.assertLess(target_score, threshold)\n self.assertNotEmpty(output)\n for cf_example in output:\n cf_score = self._predict_and_return_score(cf_example)\n self.assertGreaterEqual(cf_score, threshold)", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def test_samples_close_to_inclusion_probability_priority(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n for i in range(n):\n s.process(i, 1.0)\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def test_le_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert a < b", "def define_threshold(self, threshold):\n def func(x, threshold):\n if x > threshold:\n return 'up'\n elif x < -threshold:\n return 'down'\n else:\n return 'stable'\n try:\n self.df['Direction'] = self.df.apply(lambda x: func(x['Return'], threshold), axis=1)\n except:\n print(\"issue\")\n return", "def test_lt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert a < b", "def test_greater_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def test_greater_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ge\"},\n )", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_less_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::le\"},\n )", "def __lt__(self, other_node):\n return self.split_info.gain > other_node.split_info.gain", "def test_ada_boost_stump_classify_partitions_lt(self):\n i = 1\n range_min = self.data_matrix[:, i].min()\n threshold = (range_min * 2)\n inequal = 'lt'\n returned = ada_boost.stump_classify(self.data_matrix,\n i,\n threshold,\n inequal)\n expected = np.mat([1.0, -1.0, -1.0, -1.0])\n\n delta_between_elements = returned - expected.T\n self.assertFalse(delta_between_elements.any())", "def testHrtPrior(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"prior\")\n\n self.util.boolPropertyTest(self, attr, \"prior\")", "def _validate_threshold(self, proposal):\n threshold = proposal[\"value\"]\n if threshold <= 0:\n raise traitlets.TraitError(\"threshold must be greater than 0.\")\n return threshold" ]
[ "0.62599236", "0.62535155", "0.6050937", "0.6029088", "0.602097", "0.59990096", "0.59913033", "0.59730476", "0.5952634", "0.59435964", "0.5829159", "0.5757781", "0.5755329", "0.5735106", "0.57177514", "0.57121515", "0.5703613", "0.56988364", "0.5690524", "0.5688124", "0.5685663", "0.56835777", "0.5682426", "0.5673418", "0.5671962", "0.56500554", "0.5617335", "0.5608225", "0.5598445", "0.55852133" ]
0.6903596
0
Test that interpolated probabilities at given topography heights are sensible when we use the inverse_ordering set to True. This is for situations in which the values associated with the percentiles increase in the opposite direction, e.g. 0 % = 100m, 20% = 50m, etc. In this situation we expect the lowest points to have a probability of 1, and the highest points to have probabilities of 0. The probabilities between should be the inverse of what is found in the usual case.
def test_values_inverse_ordering(self): # Invert the values associated with the percentiles. self.percentiles_cube.data = np.flipud(self.percentiles_cube.data) expected = set_reference_probabilities() expected = 1.0 - expected probability_cube = ProbabilitiesFromPercentiles2D( self.percentiles_cube, 'new_name').percentile_interpolation( self.orography_cube, self.percentiles_cube) self.assertArrayAlmostEqual(probability_cube.data, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_inverse_order_false(self):\n plugin_instance = ProbabilitiesFromPercentiles2D(\n self.test_cube, 'new_name')\n self.assertFalse(plugin_instance.inverse_ordering)", "def test_inverse_order_true(self):\n percentiles_cube = self.test_cube.copy(\n data=np.flipud(self.test_cube.data))\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.assertTrue(plugin_instance.inverse_ordering)", "def test_attributes_inverse_ordering(self):\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n plugin_instance = ProbabilitiesFromPercentiles2D(self.percentiles_cube,\n self.new_name)\n result = plugin_instance.create_probability_cube(self.percentiles_cube,\n self.orography_cube)\n self.assertEqual(result.attributes['relative_to_threshold'], 'above')", "def test_probabilities_not_monotonically_increasing(self):\n data = np.array([0.05, 0.7, 0.95])\n data = data[:, np.newaxis, np.newaxis]\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n warning_msg = \"The probability values used to construct the\"\n with pytest.warns(UserWarning, match=warning_msg):\n Plugin()._probabilities_to_percentiles(cube, self.percentiles)", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def test_samples_close_to_inclusion_probability_priority(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n for i in range(n):\n s.process(i, 1.0)\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def z_tailed_prob(z, tails):\r\n if tails == 'high':\r\n return z_high(z)\r\n elif tails == 'low':\r\n return z_low(z)\r\n else:\r\n return zprob(z)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_values_inverse_ordering(self):\n reference_cube = self.cube.copy()\n plugin = NonLinearWeights(cval=0.85)\n result = plugin.process(self.cube, self.coord_name, inverse_ordering=True)\n expected_result = np.array([0.45945946, 0.54054054])\n self.assertArrayAlmostEqual(result.data, expected_result)\n # check input cube blend coordinate order is unchanged\n self.assertArrayEqual(\n self.cube.coord(self.coord_name).points,\n reference_cube.coord(self.coord_name).points,\n )\n # check weights cube and input cube blend coordinate orders match\n self.assertArrayEqual(\n result.coord(self.coord_name).points,\n reference_cube.coord(self.coord_name).points,\n )", "def test_discrete_rounding_proposal(self):\n\n with pm.Model() as m:\n z = pm.Bernoulli(\"z\", p=0.7)\n like = pm.Potential(\"like\", z * 1.0)\n\n smc = IMH(model=m)\n smc.initialize_population()\n smc._initialize_kernel()\n\n assert smc.prior_logp_func(floatX(np.array([-0.51]))) == -np.inf\n assert np.isclose(smc.prior_logp_func(floatX(np.array([-0.49]))), np.log(0.3))\n assert np.isclose(smc.prior_logp_func(floatX(np.array([0.49]))), np.log(0.3))\n assert np.isclose(smc.prior_logp_func(floatX(np.array([0.51]))), np.log(0.7))\n assert smc.prior_logp_func(floatX(np.array([1.51]))) == -np.inf", "def fisher(probs):\r\n stat = -2 * log(array(probs)).sum()\r\n if isnan(stat):\r\n return nan\r\n else:\r\n try:\r\n return chi_high(stat, 2 * len(probs))\r\n except OverflowError as e:\r\n return nan", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def get_expected_probability(probabilities):\n\n expected = dict()\n for a, b in cwr(probabilities.keys(), 2):\n if a == b:\n expected[\"\".join(sorted([a, b]))] = probabilities[a] * probabilities[b]\n else:\n expected[\"\".join(sorted([a, b]))] = 2 * (probabilities[a] * probabilities[b])\n\n return expected", "def irizarry_enrichment_z(t_dict, pathway_list):\n\n\n t_hat = sum(t_dict[g][0] for g in pathway_list if g in t_dict) / len(pathway_list)\n es_score = numpy.sqrt(len(pathway_list)) * t_hat\n pvalue = 1 - stats.t.cdf(es_score, len(pathway_list) - 1)\n return pvalue", "def test_isentropic_pressure_p_increase_rh_out():\n lev = [85000., 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n rh = np.ones((4, 5, 5))\n rh[0, :] = 20.\n rh[1, :] = 40.\n rh[2, :] = 80.\n rh[3, :] = 100.\n relh = rh * units.percent\n isentlev = 296. * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def order_violations(s, im):\n return np.power(np.linalg.norm(np.maximum(0, s - im)),2)", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def test_valid_inclusion_probabilities(self):\n self.assertEqual(\n private_sampling.PrivateThresholdSampleKeysOnly(\n threshold=1, eps=0.1, delta=0.5**30).compute_inclusion_prob(1),\n 0.5**30)\n self.assertEqual(\n private_sampling.PrivateThresholdSampleKeysOnly(\n threshold=0.5,\n eps=0.1,\n delta=1.0,\n sampling_method=private_sampling.PrioritySamplingMethod)\n .compute_inclusion_prob(1), 0.5)\n s = private_sampling.PrivateThresholdSampleKeysOnly(\n threshold=1, eps=0.1, delta=0.5**10)\n inclusion_prob = [s.compute_inclusion_prob(i) for i in range(0, 1000, 10)]\n for x in inclusion_prob:\n self.assertGreaterEqual(x, 0.0)\n self.assertLessEqual(x, 1.0)\n for i in range(len(inclusion_prob) - 1):\n self.assertGreaterEqual(inclusion_prob[i + 1], inclusion_prob[i])", "def test_isentropic_pressure_additional_args():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def test_prob_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.probs(wires=[0, 1])\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (5, 2)\r\n\r\n expected = (\r\n np.array(\r\n [\r\n [-2 * np.sin(x), 0],\r\n [\r\n -(np.cos(y / 2) ** 2 * np.sin(x)),\r\n -(np.cos(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n -(np.sin(x) * np.sin(y / 2) ** 2),\r\n (np.cos(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n (np.sin(x) * np.sin(y / 2) ** 2),\r\n (np.sin(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n (np.cos(y / 2) ** 2 * np.sin(x)),\r\n -(np.sin(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n ]\r\n )\r\n / 2\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def geo_mech_prob(low, high, epsilon):\n\n assert low <= high, \"low must be less than high\"\n int_high = numpy.floor(high)\n int_low = numpy.ceil(low) \n normalizing_factor = (1.0 - numpy.exp(-epsilon))/(1.0 + numpy.exp(-epsilon))\n prob = 1.0\n if int_high >= 0:\n prob = prob - numpy.exp(-epsilon * (int_high+1))/(1.0-numpy.exp(-epsilon))*normalizing_factor\n else:\n prob = prob - (1.0 - numpy.exp(-epsilon * abs(int_high))/(1.0-numpy.exp(-epsilon))*normalizing_factor)\n if int_low >= 0:\n prob = prob - (1.0 - numpy.exp(-epsilon * int_low)/(1.0-numpy.exp(-epsilon))*normalizing_factor)\n else:\n prob = prob - numpy.exp(-epsilon * (abs(int_low)+1))/(1.0-numpy.exp(-epsilon))*normalizing_factor\n return pro", "def test_estimate_statistics_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n element_weight = math.log(FAILURE_PROBABILITY_INVERSE, math.e)\n s.process(\"a\", element_weight)\n sampling_probability = (FAILURE_PROBABILITY_INVERSE -\n 1) / FAILURE_PROBABILITY_INVERSE\n self.assertEqual(s.estimate_statistics(),\n element_weight / sampling_probability)", "def find_prob(self, z, tails=1):\n normal_table = self.normal_table\n\n if z > 4:\n prob = 0.5\n\n else:\n z0 = round(z, 1)\n z1 = str(round(z, 2) - z0)\n\n prob = round(normal_table[z1][z0], 6)\n prob *= tails\n\n return prob", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def pagerank(dict_prefs, nitems, eps_search=20):\n prefs_mat=np.zeros((nitems,nitems))\n for k,v in dict_prefs.items():\n if v==0:\n continue\n elif v>0:\n prefs_mat[k[1],k[0]]+=v\n else:\n prefs_mat[k[0],k[1]]-=v\n prefs_mat_orig=prefs_mat.copy()\n eps_grid=list(.5**np.logspace(0,1,eps_search))\n best=-10^5\n best_order=None\n \n for eps in eps_grid:\n prefs_mat=prefs_mat_orig.copy()\n for i in range(nitems):\n prefs_mat[:,i]+=eps\n tot=np.sum(prefs_mat[:,i])\n prefs_mat[:,i]=prefs_mat[:,i]/tot\n\n \n pr=np.ones((nitems,1))/nitems\n for i in range(30):\n pr=prefs_mat.dot(pr)\n lst_pagerank=list(np.argsort(pr.reshape(-1)))\n score_this_order=eval_ordering(lst_pagerank,dict_prefs)\n if score_this_order>best:\n best=score_this_order\n best_order=deepcopy(lst_pagerank)\n return best_order", "def findUncertainPairs(field_distances, data_model, bias=0.5):\n\n probability = core.scorePairs(field_distances, data_model)\n\n p_max = (1.0 - bias)\n logger.info(p_max)\n\n informativity = numpy.copy(probability)\n informativity[probability < p_max] /= p_max\n informativity[probability >= p_max] = (1 - probability[probability >= p_max])/(1-p_max)\n\n\n return numpy.argsort(-informativity)", "def edit_probs(result):\n for i in range(TOP_E):\n p = result.data[i][1]\n p = round(p, 4)\n # p_str = str(p)[1:]\n result.data[i][1] = p\n\n return result" ]
[ "0.6936694", "0.6612405", "0.6557043", "0.6536174", "0.5836456", "0.55769354", "0.535456", "0.5231798", "0.52008826", "0.5179406", "0.5144554", "0.508962", "0.50874543", "0.5033925", "0.49582127", "0.49430206", "0.4935415", "0.49341342", "0.49052998", "0.48981342", "0.4893713", "0.48931322", "0.4891794", "0.4884744", "0.48687923", "0.48522568", "0.48407868", "0.48339394", "0.48183018", "0.4813894" ]
0.6670819
1
Test for sensible behaviour when some percentile levels are equal.
def test_equal_percentiles(self): self.percentiles_cube.data[0, :, :].fill(300.) expected = set_reference_probabilities() expected[np.where(expected < 0.25)] = 0. probability_cube = ProbabilitiesFromPercentiles2D( self.percentiles_cube, 'new_name').percentile_interpolation( self.orography_cube, self.percentiles_cube) self.assertArrayAlmostEqual(probability_cube.data, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def _quantile_check(df: DataFrame) -> None:\n expected_percentiles = choose_set_of_percentiles(df[\"percentile\"].nunique())\n\n if not np.allclose(expected_percentiles, df[\"percentile\"].unique()):\n msg = (\n \"The forecast percentiles can not be considered as quantiles. \"\n f\"The forecast percentiles are {df['percentile'].unique()}.\"\n \"Based on the number of percentiles provided, the expected \"\n f\"percentiles would be {expected_percentiles}.\"\n )\n raise ValueError(msg)", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def test_check_data_specifying_single_percentile_not_as_list(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=25)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)", "def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test_likelihoods_equal_priors(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n for obs, exp in zip(likelihoods(equal, equal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n for obs, exp in zip(likelihoods(unequal, equal), unequal_answer):\r\n self.assertFloatEqual(obs, exp)", "def test_run_simulations_and_get_percentile_allele_length_1():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def test_single_percentile(self):\n percentiles_cube = set_up_percentiles_cube()\n percentiles_cube = percentiles_cube[0]\n msg = \"Percentile coordinate has only one value. Interpolation\"\n with self.assertRaisesRegex(ValueError, msg):\n ProbabilitiesFromPercentiles2D(percentiles_cube, 'new_name')", "def test_simple_check_data_above(self):\n expected = np.array([8.15384615, 9.38461538, 11.6])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_multiple_comparisons(self):\r\n self.assertFloatEqual(multiple_comparisons(1e-7, 10000), 1 - 0.9990005)\r\n self.assertFloatEqual(multiple_comparisons(0.05, 10), 0.4012631)\r\n self.assertFloatEqual(multiple_comparisons(1e-20, 1), 1e-20)\r\n self.assertFloatEqual(multiple_comparisons(1e-300, 1), 1e-300)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.95,\r\n 3),\r\n 0.99987499999999996)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.75,\r\n 100),\r\n 0.999999999999679)\r\n self.assertFloatEqual(multiple_comparisons(0.5, 1000), 1)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.01,\r\n 1000),\r\n 0.99995682875259)\r\n self.assertFloatEqual(multiple_comparisons(0.5, 5), 0.96875)\r\n self.assertFloatEqual(multiple_comparisons(1e-20, 10), 1e-19)", "def test_round_number_larger(self):\r\n given_n = 85\r\n total_n = 200\r\n expected_given_percent = 42.5\r\n expected_other_percent = 57.5\r\n result = n_percent(given_n, total_n)\r\n\r\n self.assertEqual(expected_given_percent, result['given_percent'])\r\n self.assertEqual(expected_other_percent, result['other_percent'])", "def test_probabilities_not_monotonically_increasing(self):\n data = np.array([0.05, 0.7, 0.95])\n data = data[:, np.newaxis, np.newaxis]\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n warning_msg = \"The probability values used to construct the\"\n with pytest.warns(UserWarning, match=warning_msg):\n Plugin()._probabilities_to_percentiles(cube, self.percentiles)", "def test_check_data(self):\n data = np.array(\n [\n [[16.8, 8.0, 10.4], [-46, 8.0, -78.4], [-78.4, -86.5, -89.2]],\n [[36.0, 10.0, 12.0], [10.0, 10.0, 8.0], [8.0, -32.5, -46.0]],\n [[55.2, 36.0, 50.4], [36.0, 11.6, 12.0], [11.0, 9.0, -2.8]],\n ],\n dtype=np.float32,\n )\n\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=4)", "def test_run_simulations_and_get_percentile_allele_length_2():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, allele_length=2, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def test_check_data_over_specifying_percentiles(self):\n msg = \"Cannot specify both no_of_percentiles and percentiles\"\n with self.assertRaisesRegex(ValueError, msg):\n Plugin().process(self.cube, no_of_percentiles=3, percentiles=[25, 50, 75])", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def wilks(percentiles, alpha=0.10):\n percentiles = np.asarray(percentiles)\n pvals = list(percentiles.flat) # want in decimal\n pvals = sorted(2 * min(pval, 1 - pval) for pval in pvals)\n ptest = [alpha * i / len(pvals) for i in range(len(pvals))]\n ppick = max(pv for pv, pt in zip(pvals, ptest) if pv <= pt) / 2\n mask = (percentiles <= ppick) | (percentiles >= (1 - ppick))\n return percentiles[mask]", "def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))", "def test_percentile_kurtosis():\n f = np.asarray([\n [0.99, 1.0, 0.5, 0.52],\n [0.69, 0.6, 0.61, 1.0]])\n R = common_metrics.percentile_kurtosis(f, maximise=True)\n expected = np.asarray(\n [1.06382979, 5.0])\n assert np.allclose(R, expected)", "def test_unknown_thresholding(self):\n self.cube.coord(var_name=\"threshold\").attributes[\n \"spp__relative_to_threshold\"\n ] = \"between\"\n msg = \"Probabilities to percentiles only implemented for\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)" ]
[ "0.73339146", "0.72588104", "0.7193721", "0.7059122", "0.70239633", "0.70094275", "0.69339293", "0.6898088", "0.6817949", "0.6797381", "0.66857415", "0.66115236", "0.65848815", "0.6558533", "0.655689", "0.6524207", "0.651964", "0.64494", "0.6404538", "0.6392922", "0.6383709", "0.63339484", "0.63125706", "0.62750775", "0.62621856", "0.6145882", "0.6133841", "0.61115026", "0.60763764", "0.60746664" ]
0.74234265
0
Test for sensible behaviour when all percentile levels are equal at some points.
def test_all_equal_percentiles(self): self.percentiles_cube.data[:, :, 0:2].fill(300.) expected = set_reference_probabilities() expected[0:2, 0:2] = 0 expected[2:, 0:2] = 1 probability_cube = ProbabilitiesFromPercentiles2D( self.percentiles_cube, 'new_name').percentile_interpolation( self.orography_cube, self.percentiles_cube) self.assertArrayAlmostEqual(probability_cube.data, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def _quantile_check(df: DataFrame) -> None:\n expected_percentiles = choose_set_of_percentiles(df[\"percentile\"].nunique())\n\n if not np.allclose(expected_percentiles, df[\"percentile\"].unique()):\n msg = (\n \"The forecast percentiles can not be considered as quantiles. \"\n f\"The forecast percentiles are {df['percentile'].unique()}.\"\n \"Based on the number of percentiles provided, the expected \"\n f\"percentiles would be {expected_percentiles}.\"\n )\n raise ValueError(msg)", "def test_check_data_specifying_single_percentile_not_as_list(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=25)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def test_simple_check_data_above(self):\n expected = np.array([8.15384615, 9.38461538, 11.6])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])", "def test_check_data(self):\n data = np.array(\n [\n [[16.8, 8.0, 10.4], [-46, 8.0, -78.4], [-78.4, -86.5, -89.2]],\n [[36.0, 10.0, 12.0], [10.0, 10.0, 8.0], [8.0, -32.5, -46.0]],\n [[55.2, 36.0, 50.4], [36.0, 11.6, 12.0], [11.0, 9.0, -2.8]],\n ],\n dtype=np.float32,\n )\n\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=4)", "def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)", "def test_likelihoods_equal_priors(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n for obs, exp in zip(likelihoods(equal, equal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n for obs, exp in zip(likelihoods(unequal, equal), unequal_answer):\r\n self.assertFloatEqual(obs, exp)", "def test_single_percentile(self):\n percentiles_cube = set_up_percentiles_cube()\n percentiles_cube = percentiles_cube[0]\n msg = \"Percentile coordinate has only one value. Interpolation\"\n with self.assertRaisesRegex(ValueError, msg):\n ProbabilitiesFromPercentiles2D(percentiles_cube, 'new_name')", "def test_run_simulations_and_get_percentile_allele_length_1():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_multiple_comparisons(self):\r\n self.assertFloatEqual(multiple_comparisons(1e-7, 10000), 1 - 0.9990005)\r\n self.assertFloatEqual(multiple_comparisons(0.05, 10), 0.4012631)\r\n self.assertFloatEqual(multiple_comparisons(1e-20, 1), 1e-20)\r\n self.assertFloatEqual(multiple_comparisons(1e-300, 1), 1e-300)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.95,\r\n 3),\r\n 0.99987499999999996)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.75,\r\n 100),\r\n 0.999999999999679)\r\n self.assertFloatEqual(multiple_comparisons(0.5, 1000), 1)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.01,\r\n 1000),\r\n 0.99995682875259)\r\n self.assertFloatEqual(multiple_comparisons(0.5, 5), 0.96875)\r\n self.assertFloatEqual(multiple_comparisons(1e-20, 10), 1e-19)", "def test_check_single_threshold(self):\n data = np.array(\n [\n [[13.2, 8.0, 13.2], [-46.0, 8.0, -78.4], [-78.4, -86.5, -89.2]],\n [[34, 31.1111, 34.0], [27.5, 31.1111, 8.0], [8.0, -32.5, -46.0]],\n [[54.8, 54.2222, 54.8], [53.5, 54.2222, 49.6], [49.6, 34, -2.8]],\n ],\n dtype=np.float32,\n )\n\n threshold_coord = find_threshold_coordinate(self.cube)\n cube = next(self.cube.slices_over(threshold_coord))\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=4)", "def test_percentile_coord(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayEqual(result.coord(\"percentile\").points, self.percentiles)\n self.assertEqual(result.coord(\"percentile\").units, unit.Unit(\"%\"))", "def test_probabilities_not_monotonically_increasing(self):\n data = np.array([0.05, 0.7, 0.95])\n data = data[:, np.newaxis, np.newaxis]\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n warning_msg = \"The probability values used to construct the\"\n with pytest.warns(UserWarning, match=warning_msg):\n Plugin()._probabilities_to_percentiles(cube, self.percentiles)", "def test_run_simulations_and_get_percentile_allele_length_2():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, allele_length=2, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def test_correct_p_values_large_correction(self):\r\n exp = [1, None, 0.03, 0.03]\r\n obs = self.mc._correct_p_values([0.5, None, 0.01, 0.01])\r\n self.compare_multiple_level_array(obs, exp)", "def wilks(percentiles, alpha=0.10):\n percentiles = np.asarray(percentiles)\n pvals = list(percentiles.flat) # want in decimal\n pvals = sorted(2 * min(pval, 1 - pval) for pval in pvals)\n ptest = [alpha * i / len(pvals) for i in range(len(pvals))]\n ppick = max(pv for pv, pt in zip(pvals, ptest) if pv <= pt) / 2\n mask = (percentiles <= ppick) | (percentiles >= (1 - ppick))\n return percentiles[mask]", "def test_likelihoods_equal_evidence(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n not_unity = [0.7, 0.7, 0.7, 0.7]\r\n\r\n for obs, exp in zip(likelihoods(equal, unequal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n # should be the same if evidences don't sum to 1\r\n for obs, exp in zip(likelihoods(not_unity, unequal), equal_answer):\r\n self.assertFloatEqual(obs, exp)", "def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))" ]
[ "0.7465344", "0.73089546", "0.72304213", "0.7225448", "0.70770556", "0.7074356", "0.7005688", "0.6945462", "0.67612404", "0.67586714", "0.6628192", "0.6621422", "0.6559432", "0.65530586", "0.6547619", "0.65411127", "0.65288323", "0.64875853", "0.644483", "0.6342356", "0.63316226", "0.6285686", "0.62349576", "0.62114435", "0.61836123", "0.6163666", "0.61612785", "0.6147989", "0.61323035", "0.60852754" ]
0.75860626
0
Test for sensible behaviour when some percentile levels are equal in the case of inverse ordering (as described above).
def test_equal_percentiles_inverse_ordering(self): self.percentiles_cube.data[0, :, :].fill(300.) # Invert the values associated with the percentiles. self.percentiles_cube.data = np.flipud(self.percentiles_cube.data) expected = set_reference_probabilities() expected[np.where(expected <= 0.25)] = 0. expected = 1.0 - expected probability_cube = ProbabilitiesFromPercentiles2D( self.percentiles_cube, 'new_name').percentile_interpolation( self.orography_cube, self.percentiles_cube) self.assertArrayAlmostEqual(probability_cube.data, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_values_inverse_ordering(self):\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected = 1.0 - expected\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_inverse_order_true(self):\n percentiles_cube = self.test_cube.copy(\n data=np.flipud(self.test_cube.data))\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.assertTrue(plugin_instance.inverse_ordering)", "def test_inverse_order_false(self):\n plugin_instance = ProbabilitiesFromPercentiles2D(\n self.test_cube, 'new_name')\n self.assertFalse(plugin_instance.inverse_ordering)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_likelihoods_equal_priors(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n for obs, exp in zip(likelihoods(equal, equal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n for obs, exp in zip(likelihoods(unequal, equal), unequal_answer):\r\n self.assertFloatEqual(obs, exp)", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def test_probabilities_not_monotonically_increasing(self):\n data = np.array([0.05, 0.7, 0.95])\n data = data[:, np.newaxis, np.newaxis]\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n warning_msg = \"The probability values used to construct the\"\n with pytest.warns(UserWarning, match=warning_msg):\n Plugin()._probabilities_to_percentiles(cube, self.percentiles)", "def test_check_data_specifying_single_percentile_not_as_list(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=25)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_attributes_inverse_ordering(self):\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n plugin_instance = ProbabilitiesFromPercentiles2D(self.percentiles_cube,\n self.new_name)\n result = plugin_instance.create_probability_cube(self.percentiles_cube,\n self.orography_cube)\n self.assertEqual(result.attributes['relative_to_threshold'], 'above')", "def _quantile_check(df: DataFrame) -> None:\n expected_percentiles = choose_set_of_percentiles(df[\"percentile\"].nunique())\n\n if not np.allclose(expected_percentiles, df[\"percentile\"].unique()):\n msg = (\n \"The forecast percentiles can not be considered as quantiles. \"\n f\"The forecast percentiles are {df['percentile'].unique()}.\"\n \"Based on the number of percentiles provided, the expected \"\n f\"percentiles would be {expected_percentiles}.\"\n )\n raise ValueError(msg)", "def test_likelihoods_equal_evidence(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n not_unity = [0.7, 0.7, 0.7, 0.7]\r\n\r\n for obs, exp in zip(likelihoods(equal, unequal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n # should be the same if evidences don't sum to 1\r\n for obs, exp in zip(likelihoods(not_unity, unequal), equal_answer):\r\n self.assertFloatEqual(obs, exp)", "def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def percentile(histogram, percentile=0.75):\n covered = 0\n normalization = sum(histogram.values())\n for key, frequency in sorted(histogram.items(), reverse=True):\n covered += frequency\n assert covered <= normalization\n if covered > ((1.0 - percentile) * normalization):\n return key\n raise RuntimeError('Percentile computation should have terminated '\n 'mid-loop.')", "def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test_multiple_comparisons(self):\r\n self.assertFloatEqual(multiple_comparisons(1e-7, 10000), 1 - 0.9990005)\r\n self.assertFloatEqual(multiple_comparisons(0.05, 10), 0.4012631)\r\n self.assertFloatEqual(multiple_comparisons(1e-20, 1), 1e-20)\r\n self.assertFloatEqual(multiple_comparisons(1e-300, 1), 1e-300)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.95,\r\n 3),\r\n 0.99987499999999996)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.75,\r\n 100),\r\n 0.999999999999679)\r\n self.assertFloatEqual(multiple_comparisons(0.5, 1000), 1)\r\n self.assertFloatEqual(\r\n multiple_comparisons(\r\n 0.01,\r\n 1000),\r\n 0.99995682875259)\r\n self.assertFloatEqual(multiple_comparisons(0.5, 5), 0.96875)\r\n self.assertFloatEqual(multiple_comparisons(1e-20, 10), 1e-19)", "def test_run_simulations_and_get_percentile_allele_length_1():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def percentile(N, percent):\n N.sort()\n if not N:\n return None\n k = (len(N) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return N[int(k)]\n d0 = N[int(f)] * (c - k)\n d1 = N[int(c)] * (k - f)\n return d0 + d1", "def test_simple_check_data_below(self):\n expected = np.array([8.4, 10.61538462, 11.84615385])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])[::-1]\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32),\n ECC_TEMPERATURE_THRESHOLDS,\n threshold_units=\"degC\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def test_percentile_kurtosis():\n f = np.asarray([\n [0.99, 1.0, 0.5, 0.52],\n [0.69, 0.6, 0.61, 1.0]])\n R = common_metrics.percentile_kurtosis(f, maximise=True)\n expected = np.asarray(\n [1.06382979, 5.0])\n assert np.allclose(R, expected)", "def test_likelihoods_unequal_evidence(self):\r\n not_unity = [1, 0.5, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n products = [1.4545455, 0.7272727, 0.3636364, 0.3636364]\r\n\r\n # if priors and evidence both unequal, likelihoods should change\r\n #(calculated using StarCalc)\r\n for obs, exp in zip(likelihoods(not_unity, unequal), products):\r\n self.assertFloatEqual(obs, exp)", "def test_simple_check_data_above(self):\n expected = np.array([8.15384615, 9.38461538, 11.6])\n expected = expected[:, np.newaxis, np.newaxis]\n\n data = np.array([0.95, 0.3, 0.05])\n data = data[:, np.newaxis, np.newaxis]\n\n cube = set_up_probability_cube(\n data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units=\"degC\"\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n self.assertArrayAlmostEqual(result.data, expected)", "def wilks(percentiles, alpha=0.10):\n percentiles = np.asarray(percentiles)\n pvals = list(percentiles.flat) # want in decimal\n pvals = sorted(2 * min(pval, 1 - pval) for pval in pvals)\n ptest = [alpha * i / len(pvals) for i in range(len(pvals))]\n ppick = max(pv for pv, pt in zip(pvals, ptest) if pv <= pt) / 2\n mask = (percentiles <= ppick) | (percentiles >= (1 - ppick))\n return percentiles[mask]" ]
[ "0.6926413", "0.687153", "0.6708092", "0.66441315", "0.6613673", "0.6609807", "0.65475774", "0.654371", "0.64967823", "0.64276195", "0.639919", "0.6376965", "0.6329518", "0.6276094", "0.6139106", "0.61351883", "0.6096647", "0.60896903", "0.60634524", "0.60582614", "0.60257256", "0.6005926", "0.5997282", "0.5993479", "0.59740686", "0.59667313", "0.5913765", "0.5885069", "0.58680195", "0.58450896" ]
0.77178353
0
Test the "process" function returns a single cube whose shape matches that of the input threshold (orography) field.
def test_basic(self): probability_cube = self.plugin_instance.process(self.orography_cube) self.assertIsInstance(probability_cube, iris.cube.Cube) self.assertSequenceEqual(probability_cube.shape, self.reference_cube.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def test_unit_conversion_compatible(self):\n self.orography_cube.convert_units('ft')\n probability_cube = self.plugin_instance.process(self.orography_cube)\n self.assertIsInstance(probability_cube, iris.cube.Cube)\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def test_basic(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertIsInstance(result, iris.cube.Cube)", "def process(self, cubes: Union[CubeList, List[Cube]]) -> Cube:\n self._extract_input_cubes(cubes)\n processed_falling_level = iris.util.squeeze(\n self.get_discriminating_percentile(self.falling_level_cube)\n )\n\n result_data = np.where(\n self.comparator(self.orography_cube.data, processed_falling_level.data),\n 1,\n 0,\n ).astype(np.int8)\n mandatory_attributes = generate_mandatory_attributes([self.falling_level_cube])\n\n cube = create_new_diagnostic_cube(\n f\"probability_of_{self.param}_at_surface\",\n Unit(\"1\"),\n self.falling_level_cube,\n mandatory_attributes,\n data=result_data,\n )\n return cube", "def test_threshold_dimensions(self, warning_list=None):\n threshold_data_3d = np.broadcast_to(self.orography_cube.data,\n (2, 4, 4))\n grid_x = self.orography_cube.coord(\"projection_x_coordinate\")\n grid_y = self.orography_cube.coord(\"projection_y_coordinate\")\n realization = DimCoord(np.arange(2), standard_name=\"realization\",\n units=\"1\")\n threshold_cube = iris.cube.Cube(threshold_data_3d,\n long_name=\"topography\", units=\"m\",\n dim_coords_and_dims=[(realization, 0),\n (grid_y, 1),\n (grid_x, 2)])\n\n warning_msg = 'threshold cube has too many'\n probability_cube = self.plugin_instance.process(threshold_cube)\n self.assertTrue(any(item.category == UserWarning\n for item in warning_list))\n self.assertTrue(any(warning_msg in str(item)\n for item in warning_list))\n\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)\n self.assertArrayAlmostEqual(probability_cube.data,\n set_reference_probabilities())", "def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)", "def getCube(unique_name):", "def UnitCubeTest(P):\n above = 0\n below = 0\n for (a,b,c) in [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]:\n s = P.test(a, b, c)\n if s > 0:\n above = 1\n elif s < 0:\n below = 1\n return above - below", "def test_smallcube_in_bigcube(self):\n w = mt.createCube(marker=1)\n c = mt.createCube(size=[0.5, 1.0, 1.0], marker=2)\n\n w = mt.mergePLC3D([w, c])\n self.assertEqual(w.nodeCount(), 8+8)\n self.assertEqual(w.boundaryCount(), 8)\n\n # will not work until edge intersection is working\n # d = mt.createCube(size=[0.8, 1.0, 1.0],\n # pos=[0.1, 0.0, 1.0],\n # marker=3)\n # w = mt.mergePLC3D([w, d])\n # self.assertEqual(w.nodeCount(), 8+8)\n # self.assertEqual(w.boundaryCount(), 8)\n\n # print(w)\n pg.show(w)\n pg.show(mt.createMesh(w))", "def basicProcessing(volume, sigma, order, output, mode, truncate):\n\n\n #### Filters ###\n\n result = gaussian_filter(input=volume, sigma=sigma, order=order, output=output, mode=mode, truncate=truncate)\n\n val = threshold_otsu(result)\n print(\"val : {}\".format(val))\n\n mask = np.zeros(volume.shape, dtype=np.int8)\n mask[volume > val] = 1\n #mask = mask.astype(int)\n\n print(\"mask shape: {}\".format(mask.shape))\n print(mask)\n\n\n #### Morphological Operation ###\n\n # Opening removes small objects\n r1 = binary_opening(mask, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n # Closing removes small holes\n r2 = binary_closing(r1, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n\n # 3x3x3 structuring element with connectivity 4 or 8\n struct1 = generate_binary_structure(3, 1) # no diagonal elements\n #struct1 = generate_binary_structure(3, 2) # with diagonal elements\n ############struct1 = struct1.astype(int)\n print (struct1)\n\n\n #r3 = binary_dilation(r2).astype(int)\n r3 = binary_dilation(r2, structure=struct1).astype(int) # using a structure element\n\n # Erosion removes objects smaller than the structure\n r4 = binary_erosion(r3, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n\n #### Measurements ###\n\n struct2 = np.ones((3, 3, 3), dtype=np.int8)\n labeled_array, num_features = label(r4, structure=struct2)\n\n #print(labeled_array)\n print(num_features)\n\n return labeled_array, num_features", "def surface_area_of_cube(side):\n return side", "def test_fails_input_not_a_cube(self):\n plugin = NonLinearWeights(0.85)\n notacube = 0.0\n msg = \"The first argument must be an instance of \" \"iris.cube.Cube\"\n with self.assertRaisesRegex(TypeError, msg):\n plugin.process(notacube, self.coord_name)", "def test_local(self):\n from trimesh.voxel import creation\n\n mesh = g.trimesh.creation.box()\n\n # it should have some stuff\n voxel = creation.local_voxelize(\n mesh=mesh,\n point=[.5, .5, .5],\n pitch=.1,\n radius=5,\n fill=True)\n\n assert len(voxel.shape) == 3\n\n # try it when it definitely doesn't hit anything\n empty = creation.local_voxelize(\n mesh=mesh,\n point=[10, 10, 10],\n pitch=.1,\n radius=5,\n fill=True)\n # shouldn't have hit anything\n assert empty is None\n\n # try it when it is in the center of a volume\n creation.local_voxelize(\n mesh=mesh,\n point=[0, 0, 0],\n pitch=.1,\n radius=2,\n fill=True)", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def cube_shape_check_without_realizations(pressure_slice_cube):\n coord_names = [coord.name() for coord in pressure_slice_cube.coords()]\n assert coord_names == [\n \"latitude\",\n \"longitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"realization\",\n \"time\",\n ]\n assert pressure_slice_cube.shape == (3, 2)", "def _mask_cube(cube):\n cube = cube.copy()\n val_range = np.ma.max(cube.data) - np.ma.min(cube.data)\n threshold = val_range * 5e-2\n cube.data = np.ma.masked_inside(cube.data, -threshold, threshold)\n return cube", "def test_c0q1(self):\n self.check_c0q1(test_hexMesh_3x3=False,use_petsc=True, name=\"_proteusMesh_\")", "def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)", "def match_det2cube(self, input_model,\n x, y, file_slice_no,\n this_par1, this_par2,\n spaxel,\n c1_offset, c2_offset):\n\n#________________________________________________________________________________\n if self.instrument == 'MIRI':\n\n det2ab_transform = input_model.meta.wcs.get_transform('detector', 'alpha_beta')\n detector2v23 = input_model.meta.wcs.get_transform('detector', 'v2v3')\n v23toworld = input_model.meta.wcs.get_transform(\"v2v3\", \"world\")\n worldtov23 = input_model.meta.wcs.get_transform(\"world\", \"v2v3\")\n v2ab_transform = input_model.meta.wcs.get_transform('v2v3',\n 'alpha_beta')\n\n alpha, beta, wave = det2ab_transform(x, y)\n v2, v3, lam23 = detector2v23(x, y)\n ra, dec, lam = v23toworld(v2, v3, lam23)\n\n valid1 = np.isfinite(v2)\n valid2 = np.isfinite(v3)\n\n if self.weighting == 'miripsf':\n wave_resol = self.instrument_info.Get_RP_ave_Wave(this_par1, this_par2)\n alpha_resol = self.instrument_info.Get_psf_alpha_parameters()\n beta_resol = self.instrument_info.Get_psf_beta_parameters()\n\n # transform Cube Spaxel centers to alpha,beta of exposure\n # for MIRI weighting parameters are based on distance in\n # alpha-beta coord system\n # transform the cube coordinate values to alpha and beta values\n # xi,eta -> ra,dec\n # world -> v2,v3\n # v2,v3 -> local alpha,beta\n\n elif self.instrument == 'NIRSPEC':\n islice = file_slice_no\n slice_wcs = nirspec.nrs_wcs_set_input(input_model, islice)\n\n x, y = wcstools.grid_from_bounding_box(slice_wcs.bounding_box, \n step=(1, 1), center=True)\n ra, dec, lam = slice_wcs(x, y) # return v2,v3 are in degrees\n valid1 = np.isfinite(ra)\n valid2 = np.isfinite(dec)\n#________________________________________________________________________________\n#________________________________________________________________________________\n# Slices are curved on detector. A slice region is grabbed by corner regions so\n# the region returned may include pixels not value for slice. There are gaps\n# between the slices. Pixels not belonging to a slice are assigned NaN values.\n\n x = x.astype(np.int)\n y = y.astype(np.int)\n\n flux_all = input_model.data[y, x]\n# error_all = input_model.err[y, x]\n dq_all = input_model.dq[y, x]\n\n valid3 = np.isfinite(lam)\n valid4 = np.isfinite(flux_all)\n valid = valid1 & valid2 & valid3 & valid4\n#________________________________________________________________________________\n# using the DQFlags from the input_image find pixels that should be excluded\n# from the cube mapping\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(dq_all, all_flags) == 0) & (valid == True))\n\n # good data holds the location of pixels we want to map to cube\n flux = flux_all[good_data]\n# error = error_all[good_data]\n wave = lam[good_data]\n\n# xpix = x[good_data] # only used for testing\n# ypix = y[good_data] # only used for testing\n\n ra = ra - c1_offset / 3600.0\n dec = dec - c2_offset / 3600.0\n ra_use = ra[good_data]\n dec_use = dec[good_data]\n if self.instrument == 'MIRI':\n # need alpha,beta if weigthing is miripsf or cubes in alpha-beta space\n alpha_det = alpha[good_data]\n beta_det = beta[good_data]\n# MIRI can make cubes in alpha-beta:\n if self.coord_system == 'alpha-beta':\n coord1 = alpha[good_data]\n coord2 = beta[good_data]\n\n else:\n# xi,eta in arc seconds\n xi, eta = coord.radec2std(self.Crval1, self.Crval2, ra_use, dec_use)\n coord1 = xi\n coord2 = eta\n\n nplane = self.naxis1 * self.naxis2\n lower_limit = 0.01\n\n# iprint = 0\n# now loop over the pixel values for this region and find the spaxels that fall\n# withing the region of interest.\n nn = coord1.size\n\n# print('looping over n points mapping to cloud',nn)\n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n#________________________________________________________________________________\n # Cube.Xcenters, ycenters is a flattened 1-D array of the 2 X 2 xy plane\n # cube coordinates.\n # find the spaxels that fall withing ROI of point cloud defined by\n # coord1,coord2,wave\n# if(ipt > 2): sys.exit('STOP')\n# print('For point ',coord1[ipt],coord2[ipt],wave[ipt],ipt)\n\n# if(ipt == 0):\n# print('size of Xcenters',self.Xcenters.size)\n xdistance = (self.Xcenters - coord1[ipt])\n ydistance = (self.Ycenters - coord2[ipt])\n radius = np.sqrt(xdistance * xdistance + ydistance * ydistance)\n indexr = np.where(radius <= self.rois)\n indexz = np.where(abs(self.zcoord - wave[ipt]) <= self.roiw)\n\n# print('indexz',indexz)\n# print('indexr',indexr)\n zlam = self.zcoord[indexz] # z Cube values falling in wavelength roi\n xi_cube = self.Xcenters[indexr] # x Cube values within radius\n eta_cube = self.Ycenters[indexr] # y cube values with the radius\n\n# print('found xi_cube',xi_cube)\n# print('found eta_cube',eta_cube)\n\n#________________________________________________________________________________\n# loop over the points in the ROI\n for iz, zz in enumerate(indexz[0]):\n istart = zz * nplane\n for ir, rr in enumerate(indexr[0]):\n# yy_cube = int(rr / self.naxis1)\n# xx_cube = rr - yy_cube * self.naxis1\n# print('xx yy cube',rr,self.naxis1,xx_cube,yy_cube)\n#________________________________________________________________________________\n if self.weighting == 'msm':\n d1 = (xi_cube[ir] - coord1[ipt]) / self.Cdelt1\n d2 = (eta_cube[ir] - coord2[ipt]) / self.Cdelt2\n d3 = (zlam[iz] - wave[ipt]) / self.Cdelt3\n\n weight_distance = math.sqrt(d1 * d1 + d2 * d2 + d3 * d3)\n weight_distance = math.pow(weight_distance, self.weight_power)\n#________________________________________________________________________________\n# if weight is miripsf -distances determined in alpha-beta coordinate system\n elif self.weighting == 'miripsf':\n weights = FindNormalizationWeights(wave[ipt],\n wave_resol,\n alpha_resol,\n beta_resol)\n\n\n ra_spaxel, dec_spaxel = coord.std2radec(self.Crval1,\n self.Crval2,\n xi_cube[ir],\n eta_cube[ir])\n\n v2_spaxel, v3_spaxel, zl = worldtov23(ra_spaxel,\n dec_spaxel,\n zlam[iz])\n\n alpha_spaxel, beta_spaxel, wave_spaxel = v2ab_transform(v2_spaxel,\n v3_spaxel,\n zlam[iz])\n alpha_distance = alpha_det[ipt] - alpha_spaxel\n beta_distance = beta_det[ipt] - beta_spaxel\n wave_distance = abs(wave[ipt] - wave_spaxel)\n\n xn = alpha_distance / weights[0]\n yn = beta_distance / weights[1]\n wn = wave_distance / weights[2]\n\n # only included the spatial dimensions\n weight_distance = math.sqrt(xn * xn + yn * yn + wn * wn)\n weight_distance = math.pow(weight_distance, self.weight_power)\n#________________________________________________________________________________\n# We have found the weight_distance based on instrument type\n\n if weight_distance < lower_limit: weight_distance = lower_limit\n weight_distance = 1.0 / weight_distance\n\n cube_index = istart + rr\n spaxel[cube_index].flux = spaxel[cube_index].flux + weight_distance * flux[ipt]\n spaxel[cube_index].flux_weight = spaxel[cube_index].flux_weight + weight_distance\n spaxel[cube_index].iflux = spaxel[cube_index].iflux + 1", "def goal_test(c):\n return c == GOAL_CUBE", "def cube_shape_check_with_realizations(pressure_slice_cube):\n coord_names = [coord.name() for coord in pressure_slice_cube.coords()]\n assert coord_names == [\n \"realization\",\n \"latitude\",\n \"longitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"time\",\n ]\n assert pressure_slice_cube.shape == (2, 3, 2)", "def match_det2cube(x, y, sliceno, start_slice, input_model, transform,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n xcoord, zcoord,\n crval1, crval3, cdelt1, cdelt3, naxis1, naxis2):\n nxc = len(xcoord)\n nzc = len(zcoord)\n\n\n sliceno_use = sliceno - start_slice + 1\n# 1-1 mapping in beta\n yy = sliceno_use - 1\n\n pixel_dq = input_model.dq[y, x]\n\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(pixel_dq, all_flags) == 0))\n\n # good data holds the location of pixels we want to map to cube\n x = x[good_data]\n y = y[good_data]\n\n #center of first pixel, x,y = 1 for Adrian's equations\n # but we want the pixel corners, x,y values passed into this routine start at 0\n pixel_flux = input_model.data[y, x]\n\n yy_bot = y\n yy_top = y + 1\n xx_left = x\n xx_right = x + 1\n\n alpha, beta, lam = transform(x, y)\n alpha1, beta1, lam1 = transform(xx_left, yy_bot)\n alpha2, beta2, lam2 = transform(xx_right, yy_bot)\n alpha3, beta3, lam3 = transform(xx_right, yy_top)\n alpha4, beta4, lam4 = transform(xx_left, yy_top)\n\n nn = len(x)\n # Loop over all pixels in slice\n for ipixel in range(0, nn - 1):\n\n # detector pixel -> 4 corners\n # In alpha,wave space\n # in beta space: beta center + width\n\n alpha_corner = []\n wave_corner = []\n\n alpha_corner.append(alpha1[ipixel])\n alpha_corner.append(alpha2[ipixel])\n alpha_corner.append(alpha3[ipixel])\n alpha_corner.append(alpha4[ipixel])\n\n wave_corner.append(lam1[ipixel])\n wave_corner.append(lam2[ipixel])\n wave_corner.append(lam3[ipixel])\n wave_corner.append(lam4[ipixel])\n\n#________________________________________________________________________________\n# Now it does not matter the WCS method used\n alpha_min = min(alpha_corner)\n alpha_max = max(alpha_corner)\n wave_min = min(wave_corner)\n wave_max = max(wave_corner)\n #_______________________________________________________________________\n\n Area = FindAreaQuad(alpha_min, wave_min, alpha_corner, wave_corner)\n\n # estimate the where the pixel overlaps in the cube\n # find the min and max values in the cube xcoord,ycoord and zcoord\n\n MinA = (alpha_min - crval1) / cdelt1\n MaxA = (alpha_max - crval1) / cdelt1\n ix1 = max(0, int(math.trunc(MinA)))\n ix2 = int(math.ceil(MaxA))\n if ix2 >= nxc:\n ix2 = nxc - 1\n\n MinW = (wave_min - crval3) / cdelt3\n MaxW = (wave_max - crval3) / cdelt3\n iz1 = int(math.trunc(MinW))\n iz2 = int(math.ceil(MaxW))\n if iz2 >= nzc:\n iz2 = nzc - 1\n #_______________________________________________________________________\n # loop over possible overlapping cube pixels\n# noverlap = 0\n nplane = naxis1 * naxis2\n\n for zz in range(iz1, iz2 + 1):\n zcenter = zcoord[zz]\n istart = zz * nplane\n\n for xx in range(ix1, ix2 + 1):\n cube_index = istart + yy * naxis1 + xx #yy = slice # -1\n xcenter = xcoord[xx]\n AreaOverlap = SH_FindOverlap(xcenter, zcenter,\n cdelt1, cdelt3,\n alpha_corner, wave_corner)\n\n if AreaOverlap > 0.0:\n AreaRatio = AreaOverlap / Area\n spaxel_flux[cube_index] = spaxel_flux[cube_index] + (AreaRatio * pixel_flux[ipixel])\n spaxel_weight[cube_index] = spaxel_weight[cube_index] + AreaRatio\n spaxel_iflux[cube_index] = spaxel_iflux[cube_index] + 1", "def createCube():\n subjects, detections, antigen = getAxes()\n cube = np.full([len(subjects), len(detections), len(antigen)], np.nan)\n\n IGG = importIGG()\n glycan, dfGlycan = importGlycan()\n glyCube = np.full([len(subjects), len(glycan)], np.nan)\n\n for k, curAnti in enumerate(antigen):\n lumx = importLuminex(curAnti)\n\n for _, row in lumx.iterrows():\n i = subjects.index(row[\"subject\"])\n j = detections.index(row[\"variable\"])\n cube[i, j, k] = row[\"value\"]\n\n for _, row in dfGlycan.iterrows():\n i = subjects.index(row[\"subject\"])\n j = glycan.index(row[\"variable\"])\n glyCube[i, j] = row[\"value\"]\n\n # Add IgG data on the end as another detection\n for _, row in IGG.iterrows():\n i = subjects.index(row[\"subject\"])\n k = antigen.index(row[\"variable\"])\n cube[i, -1, k] = row[\"value\"]\n\n # Clip to 0 as there are a few strongly negative outliers\n cube = np.clip(cube, 1.0, None)\n glyCube = np.clip(glyCube, 0.1, None)\n\n cube = np.log10(cube)\n glyCube = np.log10(glyCube)\n\n # Mean center each measurement\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n cube -= np.nanmean(cube, axis=0)\n glyCube -= np.nanmean(glyCube, axis=0)\n\n # Check that there are no slices with completely missing data\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 1)))\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 2)))\n assert ~np.any(np.all(np.isnan(cube), axis=(1, 2)))\n\n glyCube *= np.sqrt(np.nanvar(cube) / np.nanvar(glyCube))\n return cube, glyCube", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_marching(self):\n try:\n from skimage import measure # NOQA\n except ImportError:\n g.log.warning('no skimage, skipping marching cubes test')\n return\n\n # make sure offset is correct\n matrix = g.np.ones((3, 3, 3), dtype=bool)\n mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(matrix=matrix)\n assert mesh.is_watertight\n\n mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(\n matrix=matrix).apply_scale(3.0)\n assert mesh.is_watertight", "def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def supported_cuba(self):", "def supported_cuba(self):" ]
[ "0.6223342", "0.581257", "0.5645637", "0.5620068", "0.55189896", "0.5439803", "0.54124796", "0.54022574", "0.5321581", "0.525051", "0.5183953", "0.5169333", "0.5150942", "0.5149574", "0.5130358", "0.5119568", "0.50816226", "0.5041569", "0.5040525", "0.5014353", "0.50084394", "0.5000927", "0.49925333", "0.49841753", "0.49699968", "0.49699968", "0.4953141", "0.4939755", "0.49320287", "0.49320287" ]
0.6590361
0
Test the "process" function converts units appropriately if possible when the input cubes are in different units.
def test_unit_conversion_compatible(self): self.orography_cube.convert_units('ft') probability_cube = self.plugin_instance.process(self.orography_cube) self.assertIsInstance(probability_cube, iris.cube.Cube) self.assertSequenceEqual(probability_cube.shape, self.reference_cube.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unit_conversion_incompatible(self):\n self.orography_cube.units = 'K'\n msg = \"Unable to convert from\"\n with self.assertRaisesRegex(ValueError, msg):\n self.plugin_instance.process(self.orography_cube)", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def test_unit_conversion(self):\n self.cube_uv_down.convert_units(\"kW m-2\")\n scale_factor = 1.0\n expected = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=0.1\n )\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def preprocess_sub_units(self):\n if self.unit == \"char\":\n self.preprocess_char()\n elif self.unit == \"char-ngram\":\n self.preprocess_char_ngram()\n elif self.unit == \"morpheme\":\n self.preprocess_morpheme()\n elif self.unit == \"oracle\":\n self.preprocess_oracle()\n else:\n sys.exit(\"Unknown unit\")", "def test_transform(self):\n result = transform((1, 2) ,2, 2)\n self.assertEqual(result, (4 * PIXEL, 3 * PIXEL))", "def _standardise_dtypes_and_units(cube: Cube) -> None:\n\n def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n \"\"\"\n Returns an object updated if necessary to the required dtype\n\n Args:\n obj:\n The object to be updated\n required_dtype:\n The dtype required\n\n Returns:\n The updated object\n \"\"\"\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj\n\n cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))\n for coord in cube.coords():\n if coord.name() in TIME_COORDS and not check_units(coord):\n coord.convert_units(get_required_units(coord))\n req_dtype = get_required_dtype(coord)\n # ensure points and bounds have the same dtype\n if np.issubdtype(req_dtype, np.integer):\n coord.points = round_close(coord.points)\n coord.points = as_correct_dtype(coord.points, req_dtype)\n if coord.has_bounds():\n if np.issubdtype(req_dtype, np.integer):\n coord.bounds = round_close(coord.bounds)\n coord.bounds = as_correct_dtype(coord.bounds, req_dtype)", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def test__transform_continuous(self):", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def test_transform(self):\n t = Quantize()\n assert t.transform(8.6) == 9\n assert t.transform(8.4) == 8\n assert t.transform(5.3) == 5\n assert numpy.all(t.transform([8.6, 5.3]) == numpy.array([9, 5], dtype=int))", "def convert_units(cube, units):\n try:\n cube.convert_units(units)\n except ValueError:\n if not _try_special_conversions(cube, units):\n raise\n\n return cube", "def useUnits():", "def process_unit_input(in_,\n default_units: UnitLike = None,\n convert: bool = False,\n check_dims: bool = False,\n copy: bool = True,\n registry: unyt.UnitRegistry = None) -> unyt_array:\n parser = UnitInputParser(default_units=default_units,\n convert=convert,\n check_dims=check_dims,\n copy=copy,\n registry=registry)\n return parser.parse(in_)", "def test_too_many_cubes(self):\n temp = self.temperature\n humid = self.relative_humidity\n pressure = self.pressure\n msg = \"Expected 3\"\n with self.assertRaisesRegex(ValueError, msg):\n WetBulbTemperature().process(CubeList([temp, humid, pressure, temp]))", "def test_fails_input_not_a_cube(self):\n plugin = NonLinearWeights(0.85)\n notacube = 0.0\n msg = \"The first argument must be an instance of \" \"iris.cube.Cube\"\n with self.assertRaisesRegex(TypeError, msg):\n plugin.process(notacube, self.coord_name)", "def test_convert():", "def test_different_units(self):\n self.temperature.convert_units(\"celsius\")\n self.relative_humidity.convert_units(\"1\")\n self.pressure.convert_units(\"kPa\")\n result = WetBulbTemperature().create_wet_bulb_temperature_cube(\n self.temperature, self.relative_humidity, self.pressure\n )\n self.assertArrayAlmostEqual(result.data, self.expected_wbt_data, decimal=3)\n self.assertEqual(result.units, Unit(\"K\"))", "def test_transform_default(self):\n result = transform((1, 2))\n self.assertEqual(result, (2 * PIXEL, 1 * PIXEL))", "def convert_same_units(self):\n # Convert pumping rate data\n pump_units = \"%s3/%s\" % (self.len_units, self.time_units)\n flag = _units.validate_units(pump_units)\n if flag == 2:\n self.pumprate.convert_units(self.time_units, pump_units)\n self.pump_units = pump_units", "def test_basic(self):\n probability_cube = self.plugin_instance.process(self.orography_cube)\n self.assertIsInstance(probability_cube, iris.cube.Cube)\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)", "def supported_cuba(self):", "def supported_cuba(self):", "def supported_cuba(self):", "def supported_cuba(self):", "def supported_cuba(self):", "def test_cpu_process_statistics(self):\n from supvisors.statistics import cpu_process_statistics\n stats = cpu_process_statistics(50, 20, 100)\n self.assertIs(float, type(stats))\n self.assertEqual(30, stats)", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def test_conversions(self):\n\n coord_cart_single = np.array([1, 2])\n coord_homo_single = np.array([1, 2, 1])\n coord_cart_multiple = np.array([[1.5, 2.], [2.3, 10.], [11., 18.]])\n coord_homo_multiple = np.array([[1.5, 2., 1.], [2.3, 10., 1.], [11., 18., 1.]])\n\n # Test the single coordinate case\n self.assertTrue(np.allclose(po.cart2homo(coord_cart_single), coord_homo_single))\n self.assertTrue(np.allclose(po.homo2cart(coord_homo_single), coord_cart_single))\n # Test the multiple coordinate case\n self.assertTrue(np.allclose(po.cart2homo(coord_cart_multiple), coord_homo_multiple))\n self.assertTrue(np.allclose(po.homo2cart(coord_homo_multiple), coord_cart_multiple))" ]
[ "0.6625997", "0.6437879", "0.6160189", "0.58628625", "0.5836225", "0.5800775", "0.57642335", "0.5732402", "0.56941307", "0.56460065", "0.5623775", "0.5611578", "0.5577296", "0.555354", "0.55409086", "0.55349946", "0.55279815", "0.55069077", "0.5484457", "0.5435416", "0.53823364", "0.53683823", "0.5346758", "0.5346758", "0.5346758", "0.5346758", "0.5346758", "0.53251535", "0.53216845", "0.53206223" ]
0.74237454
0
Test the "process" function raises an error when trying to convert the units of cubes that have incompatible units.
def test_unit_conversion_incompatible(self): self.orography_cube.units = 'K' msg = "Unable to convert from" with self.assertRaisesRegex(ValueError, msg): self.plugin_instance.process(self.orography_cube)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_too_many_cubes(self):\n temp = self.temperature\n humid = self.relative_humidity\n pressure = self.pressure\n msg = \"Expected 3\"\n with self.assertRaisesRegex(ValueError, msg):\n WetBulbTemperature().process(CubeList([temp, humid, pressure, temp]))", "def test_convert_invalid_unit():\n with pytest.raises(ValueError):\n pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)\n\n with pytest.raises(ValueError):\n pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)", "def test_convert_incompatible_units(self):\n self.assertRaises(ValueError, convert_units, self.arr, 'm')", "def test_invalid_process(self):\n with self.assertRaises(TypeError):\n self.encoder.process([1, 2, 3, 4])", "def test_fails_input_not_a_cube(self):\n plugin = NonLinearWeights(0.85)\n notacube = 0.0\n msg = \"The first argument must be an instance of \" \"iris.cube.Cube\"\n with self.assertRaisesRegex(TypeError, msg):\n plugin.process(notacube, self.coord_name)", "def test_unit_conversion_compatible(self):\n self.orography_cube.convert_units('ft')\n probability_cube = self.plugin_instance.process(self.orography_cube)\n self.assertIsInstance(probability_cube, iris.cube.Cube)\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)", "def test_not_units(self):\n with self.assertRaises(AssertionError):\n _unit_map(\"WiB\")", "def test_measurement_failures(self):\n\n # single qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=1,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))\n\n # multi qubit\n projQ_backend = ProjectqQuantumSimulator(\n register_size=2,\n seed=234,\n backend=Simulator\n )\n\n circuit = [\n [\"START_SESSION\", 0, 0],\n [\"STATE_PREPARATION_ALL\", 0, 0],\n ['X', 0, 0],\n ['QUBIT_MEASURE', 0, 0]\n ]\n\n for commands in circuit:\n\n hal_cmd = command_creator(*commands)\n projQ_backend.accept_command(hal_cmd)\n\n # try double measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n # try manipulation after measurement\n with self.assertRaises(ValueError):\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n\n # re-prepare state of qubit, then try bit-flip and measure\n projQ_backend.accept_command(\n command_creator(*['STATE_PREPARATION', 0, 0])\n )\n projQ_backend.accept_command(\n command_creator(*['X', 0, 0])\n )\n res = projQ_backend.accept_command(\n command_creator(*['QUBIT_MEASURE', 0, 0])\n )\n\n self.assertEqual(res, 1)\n\n projQ_backend.accept_command(command_creator(*['END_SESSION', 0, 0]))", "def test_empty_cube_list(self):\n msg = \"Expected 3\"\n with self.assertRaisesRegex(ValueError, msg):\n WetBulbTemperature().process(CubeList([]))", "def test_invalid_format(self):\n input_file = self.copy_and_mark_for_cleanup(\"Medline/pubmed_result1.txt\")\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")", "def test_unexpected_error_in_processor(self):\n\n one_process_workflow = \"\"\"file://B <- file://A ! buggy_processor\n echo A does not produce B\n \"\"\"\n process = run_first_process(one_process_workflow, BuggyProcessor())\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle processor '\n 'buggy_processor :') >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in processor\")') >= 0, process.error_message\n assert process.error_message.find('will not complete.') >= 0, process.error_message", "def test_invalid_units(self):\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, INVALID_UNIT, LENGTH_METERS, VOLUME_LITERS,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, INVALID_UNIT, VOLUME_LITERS,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, LENGTH_METERS, INVALID_UNIT,\n MASS_GRAMS)\n\n with self.assertRaises(ValueError):\n UnitSystem(SYSTEM_NAME, TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS,\n INVALID_UNIT)", "def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False", "def test_process_args_should_reject_missing_units(self, arg_dict):\n with pytest.raises(KeyError):\n change_resolution.process_args(arg_dict)", "def test_with_process_crash(self):\n if self.num_replicas < 2:\n self.assertTrue(False, msg=\"Required: num_replicas > 1\")\n\n # Override num_of_nodes affected to 1 (Positive case)\n self.num_nodes_affected = 1\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(\n self.bucket.name,\n \"active\")\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Remove active vbuckets from doc_loading to avoid errors\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n load_spec[\"target_vbuckets\"] = list(set(range(0, 1024))\n ^ set(active_vbs_in_target_nodes))\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n self.sleep(5, \"Wait for doc loaders to start loading data\")\n\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud()\n\n # Wait for document_loader tasks to complete\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with process crash\")\n\n if self.simulate_error \\\n not in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Disconnect the shell connection\n shell_conn[node.ip].disconnect()\n self.sleep(10, \"Wait for node recovery to complete\")\n\n # In case of error with Ephemeral bucket, need to rebalance\n # to make sure data is redistributed properly\n if self.bucket_type == Bucket.Type.EPHEMERAL:\n retry_num = 0\n result = None\n while retry_num != 2:\n result = self.task.rebalance(\n self.servers[0:self.nodes_init],\n [], [])\n if result:\n break\n retry_num += 1\n self.sleep(10, \"Wait before retrying rebalance\")\n\n self.assertTrue(result, \"Rebalance failed\")\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Failover stat validation\n if self.simulate_error == CouchbaseError.KILL_MEMCACHED:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n if self.simulate_error != CouchbaseError.STOP_MEMCACHED \\\n and self.bucket_type == Bucket.Type.EPHEMERAL:\n val = failover_info[\"init\"][node.ip] \\\n != failover_info[\"afterCrud\"][node.ip]\n else:\n val = failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats mismatch after error condition:\" \\\n \" %s != %s\" \\\n % (failover_info[\"init\"][node.ip],\n failover_info[\"afterCrud\"][node.ip])\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Doc count validation\n self.validate_test_failure()\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)", "def test_prevent_wrong_memory(self):\n self.assertRaises(cinv.host.Error, self.wrong_memory)", "def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))", "def test_wrongunit(self, a, b, rtol, atol):\n with pytest.raises(u.UnitsError):\n self.func(a, b, rtol, atol)", "def test_convert_unit_bad_args(test_pd_df):\n idf = IamDataFrame(test_pd_df).rename(unit={\"EJ/yr\": \"Mt CH4\"})\n\n # Conversion fails with both *factor* and *registry*\n with pytest.raises(ValueError, match=\"Use either `factor` or `registry`!\"):\n idf.convert_unit(\"Mt CH4\", \"CO2e\", factor=1.0, registry=object())\n\n # Conversion fails with an invalid registry\n with pytest.raises(TypeError, match=\"must be `pint.UnitRegistry`\"):\n idf.convert_unit(\"Mt CH4\", \"CO2e\", registry=object())\n\n # Conversion fails without context; exception provides a usage hint\n match = r\"GWP conversion with IamDataFrame.convert_unit\\(\\) requires...\"\n with pytest.raises(pint.UndefinedUnitError, match=match):\n idf.convert_unit(\"Mt CH4\", \"CO2e\")", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def test_errors(self):\n self.assertRaises(TypeError, columnize, 5, 'reject input - not array')\n return", "def validateProcess(process):\n \n schedule=process.schedule_()\n paths=process.paths_()\n endpaths=process.endpaths_()\n \n # check output mods are in paths and have appropriate settings\n for outputModName in process.outputModules_().keys():\n outputMod = getattr(process, outputModName)\n if not hasattr(outputMod, 'dataset'):\n msg = \"Process contains output module without dataset PSET: %s \\n\" % outputModName\n msg += \" You need to add this PSET to this module to set dataTier and filterName\\n\"\n raise RuntimeError(msg)\n ds=getattr(outputMod,'dataset')\n if not hasattr(ds, \"dataTier\"):\n msg = \"Process contains output module without dataTier parameter: %s \\n\" % outputModName\n msg += \" You need to add an untracked parameter to the dataset PSET of this module to set dataTier\\n\"\n raise RuntimeError(msg)\n\n # check module in path or whatever (not sure of exact syntax for endpath)\n omRun=False\n\n if schedule==None:\n for path in paths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n for path in endpaths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n else:\n for path in schedule:\n if outputModName in path.moduleNames():\n omRun=True\n if omRun==False:\n msg = \"Output Module %s not in endPath\" % outputModName\n raise RuntimeError(msg)", "def test_givens_decomposition_exceptions(unitary_matrix, msg_match):\n\n with pytest.raises(ValueError, match=msg_match):\n givens_decomposition(unitary_matrix)", "def test_mixer_layer_errors(self):\n\n hamiltonian = [[1, 1], [1, 1]]\n\n with pytest.raises(ValueError, match=r\"hamiltonian must be of type pennylane.Hamiltonian\"):\n qaoa.mixer_layer(0.1, hamiltonian)", "def test_errors_for_unequal_image_size() -> None:\n cam = Camera(imgsz=(100, 200), f=(10, 10))\n xcam = Matlab(imgsz=(100, 100), fc=(10, 10))\n with pytest.raises(ValueError):\n Converter(xcam, cam)", "def test_operator_with_invalid_wire(self, monkeypatch, test_batch_result):\n dev = QeQiskitDevice(\n wires=[\"a\", \"b\", \"c\"], shots=1000, backend=\"qasm_simulator\", analytic=False\n )\n\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: test_batch_result,\n )\n\n @qml.qnode(dev)\n def circuit():\n return qml.expval(qml.PauliZ(0))\n\n with pytest.raises(\n qml.qnodes.base.QuantumFunctionError,\n match=\"Operation PauliZ applied to invalid wire\",\n ):\n circuit()", "def test_ns_fail2():\n env = NsSimPyEnvironment()\n env.process(some_process(env, 4e-29))\n env.run()", "def _check_dimensions(self, a, b):\n units_a = self._get_units(a)\n units_b = self._get_units(b)\n dim_a = units_a.dimensions\n dim_b = units_b.dimensions\n if dim_a != dim_b:\n raise UnitConversionError(units_a, dim_a, units_b, dim_b)", "def test_wrongunit(self, a, b, rtol, atol, expected):\n with pytest.raises(u.UnitsError):\n assert self.func(a, b, rtol, atol)", "def test_invalid_process_number():\n\n with pytest.raises(ValueError):\n get_reusable_executor(max_workers=0)\n\n with pytest.raises(ValueError):\n get_reusable_executor(max_workers=-1)" ]
[ "0.6727947", "0.6689959", "0.6644148", "0.66011065", "0.6515232", "0.6496085", "0.6222151", "0.61650944", "0.6138874", "0.6074079", "0.60311204", "0.5983569", "0.59056264", "0.5855763", "0.5833101", "0.58096343", "0.57705975", "0.5756942", "0.57507277", "0.56909555", "0.56720626", "0.5649504", "0.56488675", "0.5645353", "0.563174", "0.56205153", "0.5615665", "0.55981296", "0.5562951", "0.5550444" ]
0.7596227
0
Test that if the pecentiles_cube has other dimension coordinates over which slicing is performed, that these dimensions are properly restored in the resulting probability cube.
def test_preservation_of_dimensions(self): percentiles_cube = set_up_percentiles_cube() test_data = np.array([percentiles_cube.data, percentiles_cube.data]) percentiles = percentiles_cube.coord('percentiles') grid_x = percentiles_cube.coord('projection_x_coordinate') grid_y = percentiles_cube.coord('projection_y_coordinate') new_model_coord = build_coordinate([0, 1], long_name='leading_coord', coord_type=DimCoord, data_type=int) input_cube = iris.cube.Cube( test_data, long_name="snow_level", units="m", dim_coords_and_dims=[(new_model_coord, 0), (percentiles, 1), (grid_y, 2), (grid_x, 3)]) plugin_instance = ProbabilitiesFromPercentiles2D( input_cube, 'new_name') probability_cube = plugin_instance.process(self.orography_cube) self.assertEqual(input_cube.coords(dim_coords=True)[0], probability_cube.coords(dim_coords=True)[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_preservation_of_single_valued_dimension(self):\n percentiles_cube = set_up_percentiles_cube()\n new_model_coord = build_coordinate([0],\n long_name='leading_coord',\n coord_type=DimCoord,\n data_type=int)\n percentiles_cube.add_aux_coord(new_model_coord)\n percentiles_cube = iris.util.new_axis(percentiles_cube,\n scalar_coord='leading_coord')\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n probability_cube = plugin_instance.process(self.orography_cube)\n self.assertEqual(percentiles_cube.coords(dim_coords=True)[0],\n probability_cube.coords(dim_coords=True)[0])", "def test_coordinate_collapse(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n with self.assertRaises(CoordinateNotFoundError):\n result.coord_dims(self.percentile_coordinate)", "def cube_shape_check_without_realizations(pressure_slice_cube):\n coord_names = [coord.name() for coord in pressure_slice_cube.coords()]\n assert coord_names == [\n \"latitude\",\n \"longitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"realization\",\n \"time\",\n ]\n assert pressure_slice_cube.shape == (3, 2)", "def cube_shape_check_with_realizations(pressure_slice_cube):\n coord_names = [coord.name() for coord in pressure_slice_cube.coords()]\n assert coord_names == [\n \"realization\",\n \"latitude\",\n \"longitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"time\",\n ]\n assert pressure_slice_cube.shape == (2, 3, 2)", "def _collapse_scalar_dimensions(cube: Cube) -> Cube:\n coords_to_collapse = []\n for coord in cube.coords(dim_coords=True):\n if len(coord.points) == 1 and \"realization\" not in coord.name():\n coords_to_collapse.append(coord)\n for coord in coords_to_collapse:\n cube = next(cube.slices_over(coord))\n return cube", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube", "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def test_threshold_dimensions(self, warning_list=None):\n threshold_data_3d = np.broadcast_to(self.orography_cube.data,\n (2, 4, 4))\n grid_x = self.orography_cube.coord(\"projection_x_coordinate\")\n grid_y = self.orography_cube.coord(\"projection_y_coordinate\")\n realization = DimCoord(np.arange(2), standard_name=\"realization\",\n units=\"1\")\n threshold_cube = iris.cube.Cube(threshold_data_3d,\n long_name=\"topography\", units=\"m\",\n dim_coords_and_dims=[(realization, 0),\n (grid_y, 1),\n (grid_x, 2)])\n\n warning_msg = 'threshold cube has too many'\n probability_cube = self.plugin_instance.process(threshold_cube)\n self.assertTrue(any(item.category == UserWarning\n for item in warning_list))\n self.assertTrue(any(warning_msg in str(item)\n for item in warning_list))\n\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)\n self.assertArrayAlmostEqual(probability_cube.data,\n set_reference_probabilities())", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_transpose_cube_dimensions(self):\n # Calculate result for nontransposed cube.\n nontransposed_result = Plugin()._probabilities_to_percentiles(\n self.cube, self.percentiles\n )\n\n # Calculate result for transposed cube.\n # Original cube dimensions are [P, Y, X].\n # Transposed cube dimensions are [X, Y, P].\n self.cube.transpose([2, 1, 0])\n transposed_result = Plugin()._probabilities_to_percentiles(\n self.cube, self.percentiles\n )\n\n # Result cube will be [P, X, Y]\n # Transpose cube to be [P, Y, X]\n transposed_result.transpose([0, 2, 1])\n self.assertArrayAlmostEqual(nontransposed_result.data, transposed_result.data)", "def match_det2cube(x, y, sliceno, start_slice, input_model, transform,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n xcoord, zcoord,\n crval1, crval3, cdelt1, cdelt3, naxis1, naxis2):\n nxc = len(xcoord)\n nzc = len(zcoord)\n\n\n sliceno_use = sliceno - start_slice + 1\n# 1-1 mapping in beta\n yy = sliceno_use - 1\n\n pixel_dq = input_model.dq[y, x]\n\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(pixel_dq, all_flags) == 0))\n\n # good data holds the location of pixels we want to map to cube\n x = x[good_data]\n y = y[good_data]\n\n #center of first pixel, x,y = 1 for Adrian's equations\n # but we want the pixel corners, x,y values passed into this routine start at 0\n pixel_flux = input_model.data[y, x]\n\n yy_bot = y\n yy_top = y + 1\n xx_left = x\n xx_right = x + 1\n\n alpha, beta, lam = transform(x, y)\n alpha1, beta1, lam1 = transform(xx_left, yy_bot)\n alpha2, beta2, lam2 = transform(xx_right, yy_bot)\n alpha3, beta3, lam3 = transform(xx_right, yy_top)\n alpha4, beta4, lam4 = transform(xx_left, yy_top)\n\n nn = len(x)\n # Loop over all pixels in slice\n for ipixel in range(0, nn - 1):\n\n # detector pixel -> 4 corners\n # In alpha,wave space\n # in beta space: beta center + width\n\n alpha_corner = []\n wave_corner = []\n\n alpha_corner.append(alpha1[ipixel])\n alpha_corner.append(alpha2[ipixel])\n alpha_corner.append(alpha3[ipixel])\n alpha_corner.append(alpha4[ipixel])\n\n wave_corner.append(lam1[ipixel])\n wave_corner.append(lam2[ipixel])\n wave_corner.append(lam3[ipixel])\n wave_corner.append(lam4[ipixel])\n\n#________________________________________________________________________________\n# Now it does not matter the WCS method used\n alpha_min = min(alpha_corner)\n alpha_max = max(alpha_corner)\n wave_min = min(wave_corner)\n wave_max = max(wave_corner)\n #_______________________________________________________________________\n\n Area = FindAreaQuad(alpha_min, wave_min, alpha_corner, wave_corner)\n\n # estimate the where the pixel overlaps in the cube\n # find the min and max values in the cube xcoord,ycoord and zcoord\n\n MinA = (alpha_min - crval1) / cdelt1\n MaxA = (alpha_max - crval1) / cdelt1\n ix1 = max(0, int(math.trunc(MinA)))\n ix2 = int(math.ceil(MaxA))\n if ix2 >= nxc:\n ix2 = nxc - 1\n\n MinW = (wave_min - crval3) / cdelt3\n MaxW = (wave_max - crval3) / cdelt3\n iz1 = int(math.trunc(MinW))\n iz2 = int(math.ceil(MaxW))\n if iz2 >= nzc:\n iz2 = nzc - 1\n #_______________________________________________________________________\n # loop over possible overlapping cube pixels\n# noverlap = 0\n nplane = naxis1 * naxis2\n\n for zz in range(iz1, iz2 + 1):\n zcenter = zcoord[zz]\n istart = zz * nplane\n\n for xx in range(ix1, ix2 + 1):\n cube_index = istart + yy * naxis1 + xx #yy = slice # -1\n xcenter = xcoord[xx]\n AreaOverlap = SH_FindOverlap(xcenter, zcenter,\n cdelt1, cdelt3,\n alpha_corner, wave_corner)\n\n if AreaOverlap > 0.0:\n AreaRatio = AreaOverlap / Area\n spaxel_flux[cube_index] = spaxel_flux[cube_index] + (AreaRatio * pixel_flux[ipixel])\n spaxel_weight[cube_index] = spaxel_weight[cube_index] + AreaRatio\n spaxel_iflux[cube_index] = spaxel_iflux[cube_index] + 1", "def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)", "def test_basic(self):\n probability_cube = self.plugin_instance.process(self.orography_cube)\n self.assertIsInstance(probability_cube, iris.cube.Cube)\n self.assertSequenceEqual(probability_cube.shape,\n self.reference_cube.shape)", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)", "def test_masked_data_above(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"above\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n\n self.assertArrayEqual(result.data.mask, expected_mask)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_masked_data_below(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n 1 - self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n self.assertArrayEqual(result.data.mask, expected_mask)", "def test_get_second_cube(self):\n self.assertIs(self.cube_2,\n self.fix.get_cube_from_list(self.cubes, \"cube2\"))", "def test_percentile_coord(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayEqual(result.coord(\"percentile\").points, self.percentiles)\n self.assertEqual(result.coord(\"percentile\").units, unit.Unit(\"%\"))", "def test_cropping(self, scaffold_res=9):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n func = self._get_simple_implicit_function(scaffold_res=scaffold_res).to(device)\n\n assert scaffold_res >= 8\n div = (scaffold_res - 1) / 2\n true_min_point = torch.tensor(\n [-3 / div, 0 / div, -3 / div],\n device=device,\n )\n true_max_point = torch.tensor(\n [1 / div, 2 / div, 3 / div],\n device=device,\n )\n\n def new_scaffold(points):\n # 1 if between true_min and true_max point else 0\n # return points.new_ones((*points.shape[:-1], 1))\n return (\n torch.logical_and(true_min_point <= points, points <= true_max_point)\n .all(dim=-1)\n .float()[..., None]\n )\n\n called_crop = []\n\n def assert_min_max_points(min_point, max_point):\n called_crop.append(1)\n self.assertClose(min_point, true_min_point)\n self.assertClose(max_point, true_max_point)\n\n func.voxel_grid_density.crop_self = assert_min_max_points\n func.voxel_grid_color.crop_self = assert_min_max_points\n func.voxel_grid_scaffold.forward = new_scaffold\n func._scaffold_ready = True\n func._crop(epoch=0)\n assert len(called_crop) == 2", "def test_get_second_slice(self):\n self.init()\n assert np.all(get_second_slice(self.i64_3) == self.i64_3[:,:,1])\n assert np.all(get_second_slice(self.fi64_3) == self.fi64_3[:,:,1])\n assert np.all(get_second_slice(self.f64_3) == self.f64_3[:,:,1])\n assert np.all(get_second_slice(self.ff64_3) == self.ff64_3[:,:,1])\n assert get_second_slice(self.i64_3).shape == (3,3)\n assert get_second_slice(self.fi64_3).shape == (3,3)\n assert get_second_slice(self.f64_3).shape == (3,3)\n assert get_second_slice(self.ff64_3).shape == (3,3)\n assert get_second_slice(self.i64_3).dtype == 'float64'\n assert get_second_slice(self.fi64_3).dtype == 'float64'\n assert get_second_slice(self.f64_3).dtype == 'float64'\n assert get_second_slice(self.ff64_3).dtype == 'float64'\n assert get_second_slice(self.i64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.fi64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.f64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.ff64_3).flags['F_CONTIGUOUS'] == True", "def crop( self, remove_bad=True, check_coverage=True ):\n \n if not self._cropped:\n cropper = image_cube_cropper( check_coverage=check_coverage ).fit( self )\n\n # remove corrupt images if desired \n if remove_bad:\n self._remove_steps( cropper.get_null_images() )\n self._remove_steps( cropper.get_corrupt_images() )\n\n # set new bounds and cropped cube indicator\n self._set_bounds( cropper.get_bounds() )\n self._cropped = True\n \n else:\n if ir.config.verbosity_level >= 1:\n print(\"This data cube has already been cropped\")", "def test_cube_attribute_no_seapoints(self):\n result = _make_mask_cube(\n self.mask, self.coords, [self.lower, self.upper], self.units\n )\n self.assertEqual(\n result.attributes[\"topographic_zones_include_seapoints\"], \"False\"\n )", "def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]", "def test_extract_sensitivity_cube(tmpdir, datadir, datevshot):\n \n h5fn = datadir.join(\"test_hdf.h5\").strpath\n outfn = tmpdir.join(\"test.fits\").strpath \n \n if datevshot:\n args = [h5fn, \"--datevshot\", datevshot, \"063\", outfn]\n else:\n args = [h5fn, \"063\", outfn] \n\n extract_sensitivity_cube(args=args)\n \n assert isfile(outfn)", "def test_ordered_pvarray_cuts_for_pvrow_view(ordered_pvarray):\n\n ordered_pvarray.cast_shadows()\n n_surfaces_0 = ordered_pvarray.ground.n_surfaces\n len_0 = ordered_pvarray.ground.length\n ordered_pvarray.cuts_for_pvrow_view()\n n_surfaces_1 = ordered_pvarray.ground.n_surfaces\n len_1 = ordered_pvarray.ground.length\n\n assert n_surfaces_1 == n_surfaces_0 + 3\n assert len_1 == len_0" ]
[ "0.6573338", "0.65155584", "0.6331512", "0.6296247", "0.6127816", "0.61163586", "0.602921", "0.59533024", "0.59429663", "0.58905756", "0.58462465", "0.5840607", "0.5825418", "0.58206326", "0.5819811", "0.5686703", "0.5670416", "0.5668486", "0.5650249", "0.5611526", "0.5593675", "0.54704434", "0.54668397", "0.5450546", "0.54466623", "0.5396939", "0.5377872", "0.53698605", "0.5354533", "0.5327201" ]
0.7481589
0
Test that if the pecentiles_cube has a single value dimension coordinate over which slicing is performed, that this coordinate is restored as a dimension coordinate in the resulting probability cube.
def test_preservation_of_single_valued_dimension(self): percentiles_cube = set_up_percentiles_cube() new_model_coord = build_coordinate([0], long_name='leading_coord', coord_type=DimCoord, data_type=int) percentiles_cube.add_aux_coord(new_model_coord) percentiles_cube = iris.util.new_axis(percentiles_cube, scalar_coord='leading_coord') plugin_instance = ProbabilitiesFromPercentiles2D( percentiles_cube, 'new_name') probability_cube = plugin_instance.process(self.orography_cube) self.assertEqual(percentiles_cube.coords(dim_coords=True)[0], probability_cube.coords(dim_coords=True)[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_preservation_of_dimensions(self):\n percentiles_cube = set_up_percentiles_cube()\n test_data = np.array([percentiles_cube.data, percentiles_cube.data])\n percentiles = percentiles_cube.coord('percentiles')\n grid_x = percentiles_cube.coord('projection_x_coordinate')\n grid_y = percentiles_cube.coord('projection_y_coordinate')\n\n new_model_coord = build_coordinate([0, 1],\n long_name='leading_coord',\n coord_type=DimCoord,\n data_type=int)\n input_cube = iris.cube.Cube(\n test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(new_model_coord, 0),\n (percentiles, 1),\n (grid_y, 2), (grid_x, 3)])\n\n plugin_instance = ProbabilitiesFromPercentiles2D(\n input_cube, 'new_name')\n probability_cube = plugin_instance.process(self.orography_cube)\n self.assertEqual(input_cube.coords(dim_coords=True)[0],\n probability_cube.coords(dim_coords=True)[0])", "def test_coordinate_collapse(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n with self.assertRaises(CoordinateNotFoundError):\n result.coord_dims(self.percentile_coordinate)", "def _collapse_scalar_dimensions(cube: Cube) -> Cube:\n coords_to_collapse = []\n for coord in cube.coords(dim_coords=True):\n if len(coord.points) == 1 and \"realization\" not in coord.name():\n coords_to_collapse.append(coord)\n for coord in coords_to_collapse:\n cube = next(cube.slices_over(coord))\n return cube", "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube", "def test_percentile_coord(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayEqual(result.coord(\"percentile\").points, self.percentiles)\n self.assertEqual(result.coord(\"percentile\").units, unit.Unit(\"%\"))", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def cube_shape_check_without_realizations(pressure_slice_cube):\n coord_names = [coord.name() for coord in pressure_slice_cube.coords()]\n assert coord_names == [\n \"latitude\",\n \"longitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"realization\",\n \"time\",\n ]\n assert pressure_slice_cube.shape == (3, 2)", "def match_det2cube(x, y, sliceno, start_slice, input_model, transform,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n xcoord, zcoord,\n crval1, crval3, cdelt1, cdelt3, naxis1, naxis2):\n nxc = len(xcoord)\n nzc = len(zcoord)\n\n\n sliceno_use = sliceno - start_slice + 1\n# 1-1 mapping in beta\n yy = sliceno_use - 1\n\n pixel_dq = input_model.dq[y, x]\n\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(pixel_dq, all_flags) == 0))\n\n # good data holds the location of pixels we want to map to cube\n x = x[good_data]\n y = y[good_data]\n\n #center of first pixel, x,y = 1 for Adrian's equations\n # but we want the pixel corners, x,y values passed into this routine start at 0\n pixel_flux = input_model.data[y, x]\n\n yy_bot = y\n yy_top = y + 1\n xx_left = x\n xx_right = x + 1\n\n alpha, beta, lam = transform(x, y)\n alpha1, beta1, lam1 = transform(xx_left, yy_bot)\n alpha2, beta2, lam2 = transform(xx_right, yy_bot)\n alpha3, beta3, lam3 = transform(xx_right, yy_top)\n alpha4, beta4, lam4 = transform(xx_left, yy_top)\n\n nn = len(x)\n # Loop over all pixels in slice\n for ipixel in range(0, nn - 1):\n\n # detector pixel -> 4 corners\n # In alpha,wave space\n # in beta space: beta center + width\n\n alpha_corner = []\n wave_corner = []\n\n alpha_corner.append(alpha1[ipixel])\n alpha_corner.append(alpha2[ipixel])\n alpha_corner.append(alpha3[ipixel])\n alpha_corner.append(alpha4[ipixel])\n\n wave_corner.append(lam1[ipixel])\n wave_corner.append(lam2[ipixel])\n wave_corner.append(lam3[ipixel])\n wave_corner.append(lam4[ipixel])\n\n#________________________________________________________________________________\n# Now it does not matter the WCS method used\n alpha_min = min(alpha_corner)\n alpha_max = max(alpha_corner)\n wave_min = min(wave_corner)\n wave_max = max(wave_corner)\n #_______________________________________________________________________\n\n Area = FindAreaQuad(alpha_min, wave_min, alpha_corner, wave_corner)\n\n # estimate the where the pixel overlaps in the cube\n # find the min and max values in the cube xcoord,ycoord and zcoord\n\n MinA = (alpha_min - crval1) / cdelt1\n MaxA = (alpha_max - crval1) / cdelt1\n ix1 = max(0, int(math.trunc(MinA)))\n ix2 = int(math.ceil(MaxA))\n if ix2 >= nxc:\n ix2 = nxc - 1\n\n MinW = (wave_min - crval3) / cdelt3\n MaxW = (wave_max - crval3) / cdelt3\n iz1 = int(math.trunc(MinW))\n iz2 = int(math.ceil(MaxW))\n if iz2 >= nzc:\n iz2 = nzc - 1\n #_______________________________________________________________________\n # loop over possible overlapping cube pixels\n# noverlap = 0\n nplane = naxis1 * naxis2\n\n for zz in range(iz1, iz2 + 1):\n zcenter = zcoord[zz]\n istart = zz * nplane\n\n for xx in range(ix1, ix2 + 1):\n cube_index = istart + yy * naxis1 + xx #yy = slice # -1\n xcenter = xcoord[xx]\n AreaOverlap = SH_FindOverlap(xcenter, zcenter,\n cdelt1, cdelt3,\n alpha_corner, wave_corner)\n\n if AreaOverlap > 0.0:\n AreaRatio = AreaOverlap / Area\n spaxel_flux[cube_index] = spaxel_flux[cube_index] + (AreaRatio * pixel_flux[ipixel])\n spaxel_weight[cube_index] = spaxel_weight[cube_index] + AreaRatio\n spaxel_iflux[cube_index] = spaxel_iflux[cube_index] + 1", "def cube_shape_check_with_realizations(pressure_slice_cube):\n coord_names = [coord.name() for coord in pressure_slice_cube.coords()]\n assert coord_names == [\n \"realization\",\n \"latitude\",\n \"longitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"time\",\n ]\n assert pressure_slice_cube.shape == (2, 3, 2)", "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def test_transpose_cube_dimensions(self):\n # Calculate result for nontransposed cube.\n nontransposed_result = Plugin()._probabilities_to_percentiles(\n self.cube, self.percentiles\n )\n\n # Calculate result for transposed cube.\n # Original cube dimensions are [P, Y, X].\n # Transposed cube dimensions are [X, Y, P].\n self.cube.transpose([2, 1, 0])\n transposed_result = Plugin()._probabilities_to_percentiles(\n self.cube, self.percentiles\n )\n\n # Result cube will be [P, X, Y]\n # Transpose cube to be [P, Y, X]\n transposed_result.transpose([0, 2, 1])\n self.assertArrayAlmostEqual(nontransposed_result.data, transposed_result.data)", "def test_masked_data_below(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n 1 - self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"below\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n self.assertArrayEqual(result.data.mask, expected_mask)", "def test_masked_data_above(self):\n\n expected_mask = np.full_like(self.cube.data, False, dtype=bool)\n expected_mask[:, 0, 0] = True\n expected_mask[1, 0, 2] = True\n expected_mask[2, 0] = True\n expected_mask[2, 1, 2] = True\n expected_mask[2, 1, 0] = True\n\n cube = set_up_probability_cube(\n self.cube.data,\n [200, 1000, 15000],\n variable_name=(\n \"cloud_base_height_assuming_only_consider_cloud_\"\n \"area_fraction_greater_than_4p5_oktas\"\n ),\n threshold_units=\"m\",\n spp__relative_to_threshold=\"above\",\n )\n\n result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(\n cube, self.percentiles\n )\n\n self.assertArrayEqual(result.data.mask, expected_mask)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_single_percentile(self):\n percentiles_cube = set_up_percentiles_cube()\n percentiles_cube = percentiles_cube[0]\n msg = \"Percentile coordinate has only one value. Interpolation\"\n with self.assertRaisesRegex(ValueError, msg):\n ProbabilitiesFromPercentiles2D(percentiles_cube, 'new_name')", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def test_get_first_cube(self):\n self.assertIs(self.cube_1,\n self.fix.get_cube_from_list(self.cubes, \"cube1\"))", "def _mask_cube(cube):\n cube = cube.copy()\n val_range = np.ma.max(cube.data) - np.ma.min(cube.data)\n threshold = val_range * 5e-2\n cube.data = np.ma.masked_inside(cube.data, -threshold, threshold)\n return cube", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def quantile(self, hypercube):\n raise NotImplementedError()", "def test_cube_attribute_include_seapoints(self):\n result = _make_mask_cube(\n self.mask,\n self.coords,\n [self.lower, self.upper],\n self.units,\n sea_points_included=\"True\",\n )\n self.assertEqual(\n result.attributes[\"topographic_zones_include_seapoints\"], \"True\"\n )", "def test_cube_attribute_no_seapoints(self):\n result = _make_mask_cube(\n self.mask, self.coords, [self.lower, self.upper], self.units\n )\n self.assertEqual(\n result.attributes[\"topographic_zones_include_seapoints\"], \"False\"\n )", "def test_cropping(self, scaffold_res=9):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n func = self._get_simple_implicit_function(scaffold_res=scaffold_res).to(device)\n\n assert scaffold_res >= 8\n div = (scaffold_res - 1) / 2\n true_min_point = torch.tensor(\n [-3 / div, 0 / div, -3 / div],\n device=device,\n )\n true_max_point = torch.tensor(\n [1 / div, 2 / div, 3 / div],\n device=device,\n )\n\n def new_scaffold(points):\n # 1 if between true_min and true_max point else 0\n # return points.new_ones((*points.shape[:-1], 1))\n return (\n torch.logical_and(true_min_point <= points, points <= true_max_point)\n .all(dim=-1)\n .float()[..., None]\n )\n\n called_crop = []\n\n def assert_min_max_points(min_point, max_point):\n called_crop.append(1)\n self.assertClose(min_point, true_min_point)\n self.assertClose(max_point, true_max_point)\n\n func.voxel_grid_density.crop_self = assert_min_max_points\n func.voxel_grid_color.crop_self = assert_min_max_points\n func.voxel_grid_scaffold.forward = new_scaffold\n func._scaffold_ready = True\n func._crop(epoch=0)\n assert len(called_crop) == 2", "def test_get_second_cube(self):\n self.assertIs(self.cube_2,\n self.fix.get_cube_from_list(self.cubes, \"cube2\"))", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def slice_to_cube(self, axis, chunk, **kwargs):\n if self.data.ndim == 3:\n raise cu.CubeError(4, 'Can only slice a hypercube into a cube')\n\n item = [slice(None, None, None) for _ in range(4)]\n if isinstance(chunk, tuple):\n if cu.iter_isinstance(chunk, (u.Quantity, u.Quantity)):\n pixel0 = cu.convert_point(chunk[0].value, chunk[0].unit,\n self.axes_wcs, axis)\n pixel1 = cu.convert_point(chunk[1].value, chunk[1].unit,\n self.axes_wcs, axis)\n item[axis] = slice(pixel0, pixel1, None)\n elif cu.iter_isinstance((chunk, int, int)):\n item[axis] = slice(chunk[0], chunk[1], None)\n else:\n raise cu.CubeError(5, \"Parameters must be of the same type\")\n newdata = self.data[item].sum(axis)\n else:\n unit = chunk.unit if isinstance(chunk, u.Quantity) else None\n pixel = cu.convert_point(chunk, unit, self.axes_wcs, axis)\n item[axis] = pixel\n newdata = self.data[item]\n wcs_indices = [0, 1, 2, 3]\n wcs_indices.remove(3 - axis)\n newwcs = wu.reindex_wcs(self.axes_wcs, np.array(wcs_indices))\n if axis == 2 or axis == 3:\n newwcs = wu.add_celestial_axis(newwcs)\n newwcs.was_augmented = True\n cube = Cube(newdata, newwcs, meta=self.meta, **kwargs)\n return cube", "def test_fix_data(self):\n cube = self.fix.fix_data(self.cube)\n np.testing.assert_allclose(cube.data[0], 1.0)\n np.testing.assert_allclose(cube.data[2], 2.0)\n assert not np.ma.is_masked(cube.data[0])\n assert np.ma.is_masked(cube.data[1])\n assert not np.ma.is_masked(cube.data[2])", "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_values_inverse_ordering(self):\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected = 1.0 - expected\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)" ]
[ "0.7051559", "0.68129355", "0.6192699", "0.59999096", "0.59750366", "0.5854034", "0.5833827", "0.5687848", "0.5681342", "0.56353337", "0.5614267", "0.5514567", "0.5513307", "0.5488463", "0.5487292", "0.54499996", "0.5447126", "0.5413848", "0.53965026", "0.53933626", "0.5348202", "0.53439325", "0.5327404", "0.53008085", "0.5269695", "0.52614117", "0.523042", "0.52194166", "0.52187943", "0.5216616" ]
0.71103495
0
Creates a Gateway Load Balancer and resturns response and ARN
def create_gwlb(gwlb_name, subnet_id_list): logging.info(f"Creating gateway load balancer: {gwlb_name}") waiter = elbv2.get_waiter('load_balancer_available') try: response = elbv2.create_load_balancer( Name=gwlb_name, Subnets=subnet_id_list, Tags=[{'Key': 'Name', 'Value': gwlb_name}], Type='gateway' ) gwlb_arn = response['LoadBalancers'][0]['LoadBalancerArn'] logging.info("Waiting for GWLB's state to change to available") waiter.wait( LoadBalancerArns=[gwlb_arn], WaiterConfig={ 'Delay': 15, 'MaxAttempts': 40 } ) return response, gwlb_arn except ClientError as e: logging.error(e) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_balancer(self):\n app_env = self.get_current_env()\n balancer_name = self.get_balancer_name()\n subnet_ids = self.get_subnet_ids()\n\n response = self.client.create_load_balancer(\n Name=balancer_name,\n Subnets=subnet_ids,\n SecurityGroups=[self.get_security_group_id(self.get_security_group_short_name())],\n Scheme='internet-facing',\n Tags=[\n {\n 'Key': 'chops-aws-project',\n 'Value': self.get_aws_project_name(),\n },\n {\n 'Key': 'environment',\n 'Value': app_env,\n },\n ],\n Type='application',\n IpAddressType='ipv4',\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['LoadBalancers'][0]", "def post(self, request):\n return create_loadbalancer(request)", "def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')", "def create_balancer(ctx):\n if not self.balancer_exists():\n data = self.create_balancer()\n ctx.info('Successfully created load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} already exists, nothing to create.'.format(\n self.get_balancer_name()\n ))", "def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)", "def create_loadbalancer(call=None, kwargs=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"The create_address function must be called with -f or --function.\"\n )\n\n if kwargs is None:\n kwargs = {}\n\n conn = get_conn()\n datacenter_id = get_datacenter_id()\n loadbalancer = LoadBalancer(\n name=kwargs.get(\"name\"), ip=kwargs.get(\"ip\"), dhcp=kwargs.get(\"dhcp\")\n )\n\n response = conn.create_loadbalancer(datacenter_id, loadbalancer)\n _wait_for_completion(conn, response, 60, \"loadbalancer\")\n\n return response", "def create_loadbalancer(self, context, loadbalancer, driver_name):\n LOG.info(\"Received request 'Create Loadbalancer' for LB:%(lb)s \"\n \"with driver:%(driver_name)s\",\n {'lb': loadbalancer['id'],\n 'driver_name': driver_name})\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n 'driver_name': driver_name\n }\n self._send_event(lb_const.EVENT_CREATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def create(\n self,\n name, # type: str\n load_balancer_type, # type: LoadBalancerType\n algorithm=None, # type: Optional[LoadBalancerAlgorithm]\n services=None, # type: Optional[List[LoadBalancerService]]\n targets=None, # type: Optional[List[LoadBalancerTarget]]\n labels=None, # type: Optional[Dict[str, str]]\n location=None, # type: Optional[Location]\n network_zone=None, # type: Optional[str]\n public_interface=None, # type: Optional[bool]\n network=None # type: Optional[Union[Network,BoundNetwork]]\n ):\n # type: (...) -> CreateLoadBalancerResponse:\n data = {\"name\": name, \"load_balancer_type\": load_balancer_type.id_or_name}\n if network is not None:\n data[\"network\"] = network.id\n if public_interface is not None:\n data[\"public_interface\"] = public_interface\n if labels is not None:\n data[\"labels\"] = labels\n if algorithm is not None:\n data[\"algorithm\"] = {\"type\": algorithm.type}\n if services is not None:\n service_list = []\n for service in services:\n service_list.append(self.get_service_parameters(service))\n data[\"services\"] = service_list\n\n if targets is not None:\n target_list = []\n for target in targets:\n target_data = {\n \"type\": target.type,\n \"use_private_ip\": target.use_private_ip\n }\n if target.type == \"server\":\n target_data['server'] = {\"id\": target.server.id}\n elif target.type == \"label_selector\":\n target_data['label_selector'] = {\"selector\": target.label_selector.selector}\n elif target.type == \"ip\":\n target_data['ip'] = {\"ip\": target.ip.ip}\n target_list.append(target_data)\n\n data[\"targets\"] = target_list\n\n if network_zone is not None:\n data[\"network_zone\"] = network_zone\n if location is not None:\n data[\"location\"] = location.id_or_name\n\n response = self._client.request(url=\"/load_balancers\", method=\"POST\", json=data)\n\n return CreateLoadBalancerResponse(load_balancer=BoundLoadBalancer(self, response[\"load_balancer\"]),\n action=BoundAction(self._client.actions, response['action']))", "def ensure_load_balancer_created(vpc, security_group, subnet1, subnet2, target_group_arn, ssl_certificate_arn, environment):\n name = environment + '-load-balancer'\n\n # If it already exists, create returns the existing data\n response = ELB.create_load_balancer(\n Name=name,\n Subnets=[ subnet1.id, subnet2.id ],\n SecurityGroups=[ security_group.id ],\n IpAddressType='dualstack',\n Tags=[\n { 'Key': 'Name', 'Value': name },\n { 'Key': 'Environment', 'Value': environment }\n ]\n )\n\n load_balancer = response['LoadBalancers'][0]\n arn = load_balancer['LoadBalancerArn']\n\n # There seems to be no harm in creating listeners if they already exist\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{ 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTPS',\n Port=443,\n SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',\n Certificates=[ { 'CertificateArn': ssl_certificate_arn } ],\n DefaultActions=[ { 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n return load_balancer", "def create_loadbalancer(self, context, lb):\n super(ArrayDeviceDriverV2, self).create_loadbalancer(context, lb)\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n if deployment_model == \"PER_LOADBALANCER\":\n self.update_loadbalancer(context, lb, None)", "def create_load_balancer(self,\n instance_id: str,\n dnszone_id: str,\n *,\n name: str = None,\n fallback_pool: str = None,\n default_pools: List[str] = None,\n description: str = None,\n enabled: bool = None,\n ttl: int = None,\n az_pools: List['LoadBalancerAzPoolsItem'] = None,\n x_correlation_id: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n if instance_id is None:\n raise ValueError('instance_id must be provided')\n if dnszone_id is None:\n raise ValueError('dnszone_id must be provided')\n if az_pools is not None:\n az_pools = [convert_model(x) for x in az_pools]\n headers = {\n 'X-Correlation-ID': x_correlation_id\n }\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_load_balancer')\n headers.update(sdk_headers)\n\n data = {\n 'name': name,\n 'fallback_pool': fallback_pool,\n 'default_pools': default_pools,\n 'description': description,\n 'enabled': enabled,\n 'ttl': ttl,\n 'az_pools': az_pools\n }\n data = {k: v for (k, v) in data.items() if v is not None}\n data = json.dumps(data)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/instances/{0}/dnszones/{1}/load_balancers'.format(\n *self.encode_path_vars(instance_id, dnszone_id))\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n data=data)\n\n response = self.send(request)\n return response", "def create(self, region, **kwargs):\n params = {\n \"region\": region.id if isinstance(region, Base) else region,\n }\n params.update(kwargs)\n\n result = self.client.post(\"/nodebalancers\", data=params)\n\n if not \"id\" in result:\n raise UnexpectedResponseError(\n \"Unexpected response when creating Nodebalaner!\", json=result\n )\n\n n = NodeBalancer(self.client, result[\"id\"], result)\n return n", "def process_load_balancer ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n iam_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n params,\n aws_account_type,\n app_visibility = None,\n public_dns_cname = None,\n public_tcp_ports = [],\n app_tcp_ports = [],\n use_ssl = False,\n ssl_hostname = None\n ) :\n\n if not app_name :\n app_name = params[ 'app-name' ]\n\n if app_visibility == 'PUBLIC' :\n subnet_type = 'PRIVATE' # Public apps have app LB's that sit private. The PROXY LB is public.\n elif app_visibility == 'HBO' :\n subnet_type = 'PUBLIC' # HBO apps have app LB's that site public.\n elif app_visibility == 'PRIVATE' :\n subnet_type = 'PRIVATE'\n else :\n subnet_type = params[ 'subnet-type' ]\n\n if not public_dns_cname :\n public_dns_cname = params.get( 'public-dns-alias' )\n\n create = params.get( 'create', 'NO' )\n if create == 'YES':\n print \"Creating load balancer security group.\"\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n if not lb_secgrp :\n lb_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, app_name ),\n 'Controls access to the ' + app_name + ' LB' )\n remove_all_rules( ec2_conn, [ lb_secgrp ] , deep=True, base_name=base_name)\n ## reload the security group after removing the rules\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n \n health_check_port = params.get( 'health-check-port', 8080 )\n health_check_url = params.get( 'health-check-url' )\n if not health_check_url :\n health_check_url = '/' + app_name + '/ping.html'\n\n ## Figure out if we need to find the SSL cert.\n ssl_cert_arn = None\n if use_ssl :\n cert = get_aws_ssl_certificate( iam_conn, ssl_cert_name )\n if cert :\n ssl_cert_arn = cert.arn\n else :\n print \"ERROR: Use SSL was specified, but could not find certificate matching host: \" + ssl_cert_name\n sys.exit( 5 )\n\n ## Generate the correct listener rules\n listeners = [ ( 80, 8080, 'http' ) ] # Default listener\n if params.get( 'listener-rules' ) :\n listeners = []\n for listener_rule in params[ 'listener-rules' ] :\n if params[ 'protocol' ] == 'https' :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ],\n ssl_cert_arn) )\n else :\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ] ) )\n ##\n ## FIX: There is a bug here where the public ports are supposed to be set on the proxy if\n ## app_visibility is PUBLIC. Don't have time to fix/regression test now...\n ##\n elif len( public_tcp_ports ) == len( app_tcp_ports ) and len( public_tcp_ports ) > 0 :\n listeners = []\n for public_port, app_port in zip( public_tcp_ports, app_tcp_ports ) :\n if public_port == 443 :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( public_port, app_port, 'https', ssl_cert_arn ) )\n else :\n listeners.append( ( public_port, app_port, 'http' ) )\n\n\n print \"Creating load balancer.\"\n elb = create_elb( elb_conn,\n get_elb_name( base_name, app_name ),\n get_vpc_subnets( vpc_conn, vpc, subnet_type ),\n listeners,\n lb_secgrp,\n health_check_port,\n health_check_url,\n subnet_type == 'PUBLIC' )\n \n elb = find_elb(elb_conn, elb.name)\n \n if params.get( 'monitors' ) :\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, params[ 'monitors' ] )\n\n if subnet_type == 'PUBLIC' :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, elb.dns_name )\n else :\n dns_alias = create_internal_elb_dns_name( base_name, app_name )\n print \"Configuring DNS name for load balancer: \" + dns_alias\n set_dns_cname( r53_conn, dns_alias, elb.dns_name )\n\n if app_visibility == 'HBO' :\n for port in public_tcp_ports :\n lb_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n elif app_visibility == 'PUBLIC' :\n print \"Creating proxy load balancer.\"\n proxy_type = app_name + '-PX'\n proxy_secgrp = find_secgrp(ec2_conn, get_secgrp_name( base_name, proxy_type ))\n if not proxy_secgrp :\n proxy_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_secgrp_name( base_name, proxy_type ),\n 'Controls access to the ' + proxy_type + ' servers.' )\n \n lb_proxy_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, proxy_type ))\n \n if not lb_proxy_secgrp :\n lb_proxy_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, proxy_type ),\n 'Controls access to the ' + proxy_type + ' load balancer.' )\n\n remove_all_rules( ec2_conn, [ lb_proxy_secgrp, proxy_secgrp ], deep=True, base_name=base_name) \n ## reload the security group after removing the rules\n lb_proxy_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, proxy_type ))\n proxy_secgrp = find_secgrp(ec2_conn, get_secgrp_name( base_name, proxy_type ))\n\n \n ##\n ## FIX: In reality, we need to set the group rules between lb_proxy and proxy to match\n ## the listener ports that were passed in/configured.\n ##\n grant_ssh_access( ec2_conn, [ proxy_secgrp ], find_group( ec2_conn, base_name, 'NAT' ) )\n \n \n ## proxy server port is always 80\n ## updated by yliu, 2014/6/13\n ##if use_ssl :\n ## proxy_port = 443\n ##else :\n ## proxy_port = 80\n proxy_port = 80\n\n ## backend elb port that the proxy server passes request to \n if use_ssl :\n proxy_to_elb_port = 443\n else :\n proxy_to_elb_port = 80\n\n grant_grp_access( ec2_conn, [ lb_proxy_secgrp ], proxy_secgrp, proxy_port )\n grant_grp_access( ec2_conn, [ proxy_secgrp ], lb_secgrp, proxy_to_elb_port )\n for port in public_tcp_ports :\n lb_proxy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr ) \n\n proxy_listeners = [ ( 80, 80, 'http' ) ]\n if use_ssl :\n proxy_listeners = [ ( 443, proxy_port, 'https', ssl_cert_arn ) ]\n\n proxy_elb = create_elb( elb_conn,\n get_elb_name( base_name, proxy_type ),\n get_vpc_subnets( vpc_conn, vpc, 'PUBLIC' ),\n proxy_listeners,\n lb_proxy_secgrp,\n proxy_port,\n '/robots.txt',\n True )\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, proxy_lb_monitor_rules )\n\n if public_dns_cname :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, proxy_elb.dns_name )\n else :\n public_dns_cname = ''\n\n print \"Creating proxy instances.\"\n proxy_ami = get_ami_by_name( ec2_conn, proxy_ami_name )\n subnets = get_vpc_subnets( vpc_conn, vpc, 'PRIVATE' )\n\n ## direct proxy server to access backend elb over given protocol\n ## added by yliu, 2014/6/13\n if use_ssl :\n app_elb_protocol = 'https'\n else :\n app_elb_protocol = 'http'\n \n proxy_userdata = get_proxy_userdata( public_dns_cname, elb.dns_name, app_elb_protocol, app_name )\n proxy_instances = []\n \n proxy_keypair = get_keypair_name( aws_account_type, vpc.region.name, \"APACHE\" )\n \n for subnet in subnets : \n instance = launch_instance_vpc( ec2_conn,\n proxy_ami,\n base_name = base_name,\n instance_type = proxy_type,\n keypair = proxy_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = proxy_secgrp.id ,\n subnet_id = subnet.id,\n user_data = proxy_userdata,\n public_ip = False )\n proxy_instances.append( instance )\n\n print \"Setting alarms on the proxy\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, 'PROXY', base_topicarn, proxy_monitor_rules )\n \n proxy_instance_ids = [ i.id for i in proxy_instances ]\n\n print \"Waiting for proxy instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, proxy_instance_ids )\n\n print \"Adding the new proxy instances into the load balancer.\"\n \n status = swap_elb_instances( elb_conn = elb_conn,\n elb = proxy_elb,\n new_instance_ids = proxy_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = False )\n\n else :\n elb = find_elb( elb_conn, get_elb_name( base_name, app_name ) )\n print \"Processing load-balancer actions.\"\n for action_param in params.get( 'actions', [] ) :\n if action_param[ 'type' ] == 'RESTART_INSTANCES' :\n restart_elb_instances( ec2_conn, elb_conn, elb, params.get( 'restart-smoothly', 'YES' ) == 'YES' )\n\n lb_secgrp = find_group( ec2_conn, base_name, get_lb_secgrp_type( app_name ) )\n dns_alias = None\n\n return ( elb, lb_secgrp, dns_alias )", "def create_elb(tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n elb_name=None, s3_logs_bucket=None,\n tls_priv_key=None, tls_fullchain_cert=None,\n region_name=None, dry_run=False):\n if not elb_name:\n elb_name = '%selb' % _clean_tag_prefix(tag_prefix)\n\n elb_client = boto3.client('elbv2', region_name=region_name)\n resp = elb_client.create_load_balancer(\n Name=elb_name,\n Subnets=[subnet['SubnetId'] for subnet in web_subnet_by_cidrs.values()\n if subnet],\n SecurityGroups=[\n moat_sg_id,\n ],\n Scheme='internet-facing',\n Type='application',\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix}])\n load_balancer = resp['LoadBalancers'][0]\n load_balancer_arn = load_balancer['LoadBalancerArn']\n load_balancer_dns = load_balancer['DNSName']\n LOGGER.info(\"%s found/created application load balancer %s available at %s\",\n tag_prefix, load_balancer_arn, load_balancer_dns)\n\n attributes = [{\n 'Key': 'deletion_protection.enabled',\n 'Value': 'true'\n }, {\n #pylint:disable=line-too-long\n #https://stackoverflow.com/questions/58848623/what-does-alb-consider-a-valid-header-field\n 'Key': 'routing.http.drop_invalid_header_fields.enabled',\n 'Value': 'true'\n }]\n if s3_logs_bucket:\n attributes += [{\n 'Key': 'access_logs.s3.enabled',\n 'Value': 'true'\n }, {\n 'Key': 'access_logs.s3.bucket',\n 'Value': s3_logs_bucket\n }, {\n 'Key': 'access_logs.s3.prefix',\n 'Value': 'var/log/elb'\n }]\n\n update_load_balancer_attributes = False\n resp = elb_client.describe_load_balancer_attributes(\n LoadBalancerArn=load_balancer_arn)\n for attr in attributes:\n for curr_attr in resp['Attributes']:\n if attr['Key'] == curr_attr['Key']:\n if attr['Value'] != curr_attr['Value']:\n update_load_balancer_attributes = True\n break\n if update_load_balancer_attributes:\n resp = elb_client.modify_load_balancer_attributes(\n LoadBalancerArn=load_balancer_arn,\n Attributes=attributes)\n LOGGER.info(\"%s updated attributes for load balancer %s\",\n tag_prefix, load_balancer_arn)\n else:\n LOGGER.info(\"%s found expected attributes for load balancer %s\",\n tag_prefix, load_balancer_arn)\n\n try:\n resp = elb_client.create_listener(\n LoadBalancerArn=load_balancer_arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{\n \"Type\": \"redirect\",\n \"RedirectConfig\": {\n \"Protocol\": \"HTTPS\",\n \"Port\": \"443\",\n \"Host\": \"#{host}\",\n \"Path\": \"/#{path}\",\n \"Query\": \"#{query}\",\n \"StatusCode\": \"HTTP_301\"\n }\n }])\n LOGGER.info(\"%s created HTTP application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'DuplicateListener':\n raise\n LOGGER.info(\"%s found HTTP application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n\n # We will need a default TLS certificate for creating an HTTPS listener.\n default_cert_location = None\n resp = elb_client.describe_listeners(\n LoadBalancerArn=load_balancer_arn)\n for listener in resp['Listeners']:\n if listener['Protocol'] == 'HTTPS':\n for certificate in listener['Certificates']:\n if 'IsDefault' not in certificate or certificate['IsDefault']:\n default_cert_location = certificate['CertificateArn']\n LOGGER.info(\"%s found default TLS certificate %s\",\n tag_prefix, default_cert_location)\n break\n if not default_cert_location:\n if tls_priv_key and tls_fullchain_cert:\n resp = _store_certificate(\n tls_fullchain_cert, tls_priv_key,\n tag_prefix=tag_prefix, region_name=region_name,\n dry_run=dry_run)\n default_cert_location = resp['CertificateArn']\n else:\n LOGGER.warning(\"default_cert_location is not set and there are no\"\\\n \" tls_priv_key and tls_fullchain_cert either.\")\n\n try:\n resp = elb_client.create_listener(\n LoadBalancerArn=load_balancer_arn,\n Protocol='HTTPS',\n Port=443,\n Certificates=[{'CertificateArn': default_cert_location}],\n DefaultActions=[{\n 'Type': 'fixed-response',\n 'FixedResponseConfig': {\n 'MessageBody': '%s ELB' % tag_prefix,\n 'StatusCode': '200',\n 'ContentType': 'text/plain'\n }\n }])\n LOGGER.info(\n \"%s created HTTPS application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'DuplicateListener':\n raise\n LOGGER.info(\"%s found HTTPS application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n\n return load_balancer_arn", "def _create_body(self, name, port=None, protocol=None, nodes=None,\n virtual_ips=None, algorithm=None, halfClosed=None, accessList=None,\n connectionLogging=None, connectionThrottle=None, healthMonitor=None,\n metadata=None, timeout=None, sessionPersistence=None,\n httpsRedirect=None):\n required = (virtual_ips, port, protocol)\n if not all(required):\n raise exc.MissingLoadBalancerParameters(\"Load Balancer creation \"\n \"requires at least one virtual IP, a protocol, and a port.\")\n nodes = utils.coerce_to_list(nodes)\n virtual_ips = utils.coerce_to_list(virtual_ips)\n bad_conditions = [node.condition for node in nodes\n if node.condition.upper() not in (\"ENABLED\", \"DISABLED\")]\n if bad_conditions:\n raise exc.InvalidNodeCondition(\"Nodes for new load balancer must be \"\n \"created in either 'ENABLED' or 'DISABLED' condition; \"\n \"received the following invalid conditions: %s\" %\n \", \".join(set(bad_conditions)))\n node_dicts = [nd.to_dict() for nd in nodes]\n vip_dicts = [vip.to_dict() for vip in virtual_ips]\n body = {\"loadBalancer\": {\n \"name\": name,\n \"port\": port,\n \"protocol\": protocol,\n \"nodes\": node_dicts,\n \"virtualIps\": vip_dicts,\n \"algorithm\": algorithm or \"RANDOM\",\n \"halfClosed\": halfClosed,\n \"accessList\": accessList,\n \"connectionLogging\": connectionLogging,\n \"connectionThrottle\": connectionThrottle,\n \"healthMonitor\": healthMonitor,\n \"metadata\": metadata,\n \"timeout\": timeout,\n \"sessionPersistence\": sessionPersistence,\n \"httpsRedirect\": httpsRedirect,\n }}\n return body", "def post_loadbalancer_member_create(self, resource_dict):\n pass", "def get(self, load_balancer_id):\n response.status = 201\n return None", "def create_load_balancer_rule(self, name, description, algorithm, \n privateport, publicport, \n publicipid, protocol='tcp'): \n params = {'command':'createLoadBalancerRule',\n 'networkid':self.id,\n 'algorithm':algorithm,\n 'name':name,\n 'description':description,\n 'privateport':privateport,\n 'publicport':publicport,\n 'protocol':protocol,\n 'publicipid':publicipid} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createloadbalancerruleresponse']['jobid']\n self.logger.debug('Start job - createLoadBalancerRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)", "def create_api_gateway(args):\n return create_pool(_create_api_gateway_from_meta, args, 1)", "def process_load_balancer_in_dev ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n iam_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n params,\n aws_account_type,\n app_visibility = None,\n public_dns_cname = None,\n public_tcp_ports = [],\n app_tcp_ports = [],\n use_ssl = False,\n ssl_hostname = None\n ) :\n\n if not use_ssl : \n print( \"Do not create load balancer since use_ssl is False\" );\n return (None, None, None, None);\n \n\n if not app_name :\n app_name = params[ 'app-name' ]\n\n # in dev vpc, initialize local variables\n if use_ssl:\n app_visibility = 'PUBLIC'\n\n if not public_dns_cname :\n public_dns_cname = ssl_hostname\n\n if len( public_tcp_ports ) == 0 :\n public_tcp_ports = [443]\n \n if len( app_tcp_ports ) == 0 : \n app_tcp_ports = [8080]\n\n if app_visibility == 'PUBLIC' :\n subnet_type = 'PRIVATE' # Public apps have app LB's that sit private. The PROXY LB is public.\n elif app_visibility == 'HBO' :\n subnet_type = 'PUBLIC' # HBO apps have app LB's that site public.\n elif app_visibility == 'PRIVATE' :\n subnet_type = 'PRIVATE'\n else :\n subnet_type = params[ 'subnet-type' ]\n\n if not public_dns_cname :\n public_dns_cname = params.get( 'public-dns-alias' )\n\n create = params.get( 'create', 'NO' )\n if create == 'YES':\n print \"Creating load balancer security group.\"\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n if not lb_secgrp :\n lb_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, app_name ),\n 'Controls access to the ' + app_name + ' LB' )\n\n ## set deep as False, because there is no dev nat security group\n remove_all_rules( ec2_conn, [ lb_secgrp ] , deep=False, base_name=base_name)\n\n ## reload the security group after removing the rules\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n \n health_check_port = params.get( 'health-check-port', 8080 )\n health_check_url = params.get( 'health-check-url' )\n if not health_check_url :\n health_check_url = '/' + app_name + '/ping.html'\n\n ## Figure out if we need to find the SSL cert.\n ssl_cert_arn = None\n if use_ssl :\n cert = get_aws_ssl_certificate( iam_conn, ssl_cert_name )\n if cert :\n ssl_cert_arn = cert.arn\n else :\n print \"ERROR: Use SSL was specified, but could not find certificate matching host: \" + ssl_cert_name\n sys.exit( 5 )\n\n ## Generate the correct listener rules\n listeners = [ ( 80, 8080, 'http' ) ] # Default listener\n if params.get( 'listener-rules' ) :\n listeners = []\n for listener_rule in params[ 'listener-rules' ] :\n if params[ 'protocol' ] == 'https' :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ],\n ssl_cert_arn) )\n else :\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ] ) )\n\n ##\n ## FIX: There is a bug here where the public ports are supposed to be set on the proxy if\n ## app_visibility is PUBLIC. Don't have time to fix/regression test now...\n ##\n elif len( public_tcp_ports ) == len( app_tcp_ports ) and len( public_tcp_ports ) > 0 :\n listeners = []\n for public_port, app_port in zip( public_tcp_ports, app_tcp_ports ) :\n if public_port == 443 :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( public_port, app_port, 'https', ssl_cert_arn ) )\n else :\n listeners.append( ( public_port, app_port, 'http' ) )\n\n\n ## find subnet in dev vpc.\n ## TODO: should we define subnet-cidr prarameter to get subnet?\n subnets = vpc_conn.get_all_subnets( filters = [ ( \"vpcId\", [ vpc.id ] ) ] ) \n \n\n print \"Creating load balancer.\"\n elb = create_elb( elb_conn,\n get_elb_name( base_name, app_name ),\n subnets,\n listeners,\n lb_secgrp,\n health_check_port,\n health_check_url,\n subnet_type == 'PUBLIC' )\n \n elb = find_elb(elb_conn, elb.name)\n \n if params.get( 'monitors' ) :\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, params[ 'monitors' ] )\n\n if subnet_type == 'PUBLIC' :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, elb.dns_name )\n else :\n # create dna alias for internal elb in dev vpc.\n dns_alias = create_dns_name( base_name, app_name + '.internal' )\n print \"Configuring DNS name for load balancer: \" + dns_alias\n set_dns_cname( r53_conn, dns_alias, elb.dns_name )\n\n if app_visibility == 'HBO' :\n for port in public_tcp_ports :\n lb_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n elif app_visibility == 'PUBLIC' :\n print \"Creating public load balancer.\"\n lb_public_name = app_name + '-PB'\n\n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n \n if not lb_public_secgrp :\n lb_public_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, lb_public_name ),\n 'Controls access to the ' + lb_public_name + ' load balancer.' )\n\n ## set deep as False, because there is no dev nat security group\n remove_all_rules( ec2_conn, [ lb_public_secgrp ], deep=False, base_name=base_name) \n \n ## reload the security group after removing the rules\n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n\n \n for port in public_tcp_ports :\n lb_public_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n lb_public_listeners = [ ( 80, 80, 'http' ) ]\n if use_ssl :\n lb_public_listeners = [ ( 443, 8080, 'https', ssl_cert_arn ) ]\n\n public_elb = create_elb( elb_conn,\n get_elb_name( base_name, lb_public_name ),\n subnets,\n lb_public_listeners,\n lb_public_secgrp,\n health_check_port,\n health_check_url,\n True )\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, proxy_lb_monitor_rules )\n\n if public_dns_cname :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, public_elb.dns_name )\n else :\n public_dns_cname = ''\n else :\n elb = find_elb( elb_conn, get_elb_name( base_name, app_name ) )\n print \"Processing load-balancer actions.\"\n for action_param in params.get( 'actions', [] ) :\n if action_param[ 'type' ] == 'RESTART_INSTANCES' :\n restart_elb_instances( ec2_conn, elb_conn, elb, params.get( 'restart-smoothly', 'YES' ) == 'YES' )\n\n lb_secgrp = find_group( ec2_conn, base_name, get_lb_secgrp_type( app_name ) )\n dns_alias = None\n \n lb_public_name = app_name + '-PB' \n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n\n return ( elb, lb_secgrp, dns_alias, lb_public_secgrp )", "def post_loadbalancer_pool_create(self, resource_dict):\n pass", "def get_create_load_balancer_flow(self, load_balancer_id, topology, project_id,\n listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(vthunder_tasks.VthunderInstanceBusy(\n requires=a10constants.COMPUTE_BUSY))\n\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n lb_create_flow.add(a10_database_tasks.CheckExistingVthunderTopology(\n requires=constants.LOADBALANCER,\n inject={\"topology\": topology}))\n\n # Attaching vThunder to LB in database\n if topology == constants.TOPOLOGY_ACTIVE_STANDBY:\n lb_create_flow.add(*self._create_active_standby_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n elif topology == constants.TOPOLOGY_SINGLE:\n lb_create_flow.add(*self._create_single_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n else:\n LOG.error(\"Unknown topology: %s. Unable to build load balancer.\",\n topology)\n raise exceptions.InvalidTopology(topology=topology)\n\n # IMP: Now creating vThunder config here\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n vthunder = self._vthunder_repo.get_vthunder_by_project_id(db_apis.get_session(),\n project_id)\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(\n self.get_post_lb_vthunder_association_flow(\n post_amp_prefix, load_balancer_id, topology, vthunder,\n mark_active=(not listeners)))\n lb_create_flow.add(a10_database_tasks.CountLoadbalancersWithFlavor(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER),\n provides=a10constants.LB_COUNT_FLAVOR))\n lb_create_flow.add(vthunder_tasks.AllowL2DSR(\n requires=(constants.SUBNET, constants.AMPHORA,\n a10constants.LB_COUNT_FLAVOR, constants.FLAVOR_DATA)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA)))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_flow=True))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_vthunder_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n\n return lb_create_flow", "def load_balancer_example(lb_info, lb_id, status,\n current_time):\n lb_example = {\"name\": lb_info[\"name\"],\n \"id\": lb_id,\n \"protocol\": lb_info[\"protocol\"],\n \"port\": lb_info.get(\"port\", 80),\n \"algorithm\": lb_info.get(\"algorithm\") or \"RANDOM\",\n \"status\": status,\n \"cluster\": {\"name\": \"test-cluster\"},\n \"timeout\": lb_info.get(\"timeout\", 30),\n \"created\": {\"time\": current_time},\n \"virtualIps\": [{\"address\": \"127.0.0.1\",\n \"id\": 1111, \"type\": \"PUBLIC\", \"ipVersion\": \"IPV4\"},\n {\"address\": \"0000:0000:0000:0000:1111:111b:0000:0000\",\n \"id\": 1111,\n \"type\": \"PUBLIC\",\n \"ipVersion\": \"IPV6\"}],\n \"sourceAddresses\": {\"ipv6Public\": \"0000:0001:0002::00/00\",\n \"ipv4Servicenet\": \"127.0.0.1\",\n \"ipv4Public\": \"127.0.0.1\"},\n \"httpsRedirect\": lb_info.get(\"httpsRedirect\", False),\n \"updated\": {\"time\": current_time},\n \"halfClosed\": lb_info.get(\"halfClosed\", False),\n \"connectionLogging\": lb_info.get(\"connectionLogging\", {\"enabled\": False}),\n \"contentCaching\": {\"enabled\": False}}\n if lb_info.get(\"metadata\"):\n lb_example.update({\"metadata\": _format_meta(lb_info[\"metadata\"])})\n return lb_example", "def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)", "def pre_loadbalancer_member_create(self, resource_dict):\n pass", "def create(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.create_internet_gateway()", "def from_boto_dict(cls, load_balancer) -> \"ELB\":\n\n name = load_balancer[\"LoadBalancerName\"]\n dns_name = load_balancer[\"DNSName\"]\n hosted_zone_name = load_balancer.get(\"CanonicalHostedZoneName\")\n hosted_zone_id = load_balancer[\"CanonicalHostedZoneNameID\"]\n listener_descriptions = load_balancer[\"ListenerDescriptions\"]\n policies = load_balancer[\"Policies\"]\n backend_server_descriptions = load_balancer[\"BackendServerDescriptions\"]\n availability_zones = load_balancer[\"AvailabilityZones\"]\n subnets = load_balancer[\"Subnets\"]\n vpc_id = load_balancer[\"VPCId\"]\n instance_ids = load_balancer[\"Instances\"]\n health_check = ELBHealthCheck.from_boto_dict(load_balancer[\"HealthCheck\"])\n source_security_group = load_balancer[\"SourceSecurityGroup\"]\n security_groups = load_balancer[\"SecurityGroups\"]\n created_time = load_balancer[\"CreatedTime\"]\n scheme = ELBScheme.from_str(load_balancer[\"Scheme\"])\n\n return cls(\n name,\n dns_name,\n hosted_zone_name,\n hosted_zone_id,\n listener_descriptions,\n policies,\n backend_server_descriptions,\n availability_zones,\n subnets,\n vpc_id,\n instance_ids,\n health_check,\n source_security_group,\n security_groups,\n created_time,\n scheme,\n )", "def update_load_balancer(self,\n instance_id: str,\n dnszone_id: str,\n lb_id: str,\n *,\n name: str = None,\n description: str = None,\n enabled: bool = None,\n ttl: int = None,\n fallback_pool: str = None,\n default_pools: List[str] = None,\n az_pools: List['LoadBalancerAzPoolsItem'] = None,\n x_correlation_id: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n if instance_id is None:\n raise ValueError('instance_id must be provided')\n if dnszone_id is None:\n raise ValueError('dnszone_id must be provided')\n if lb_id is None:\n raise ValueError('lb_id must be provided')\n if az_pools is not None:\n az_pools = [convert_model(x) for x in az_pools]\n headers = {\n 'X-Correlation-ID': x_correlation_id\n }\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='update_load_balancer')\n headers.update(sdk_headers)\n\n data = {\n 'name': name,\n 'description': description,\n 'enabled': enabled,\n 'ttl': ttl,\n 'fallback_pool': fallback_pool,\n 'default_pools': default_pools,\n 'az_pools': az_pools\n }\n data = {k: v for (k, v) in data.items() if v is not None}\n data = json.dumps(data)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/instances/{0}/dnszones/{1}/load_balancers/{2}'.format(\n *self.encode_path_vars(instance_id, dnszone_id, lb_id))\n request = self.prepare_request(method='PUT',\n url=url,\n headers=headers,\n data=data)\n\n response = self.send(request)\n return response", "def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id')}\n return create_listener(request, **kwargs)" ]
[ "0.7186875", "0.7094904", "0.70398855", "0.6917474", "0.68913764", "0.6481388", "0.6477398", "0.636849", "0.61646646", "0.6153201", "0.6110362", "0.6108698", "0.6096575", "0.59728664", "0.5910449", "0.59034276", "0.585251", "0.5678679", "0.56646043", "0.56287825", "0.56194615", "0.56033564", "0.55942744", "0.5564207", "0.5503333", "0.548975", "0.5484651", "0.5434709", "0.54216003", "0.5387411" ]
0.7172102
1
Creates a listener for the specified GWLB.
def create_fwd_listener(gwlb_arn, tg_arn): try: response = elbv2.create_listener( LoadBalancerArn=gwlb_arn, DefaultActions=[ { 'Type': 'forward', 'TargetGroupArn': tg_arn, } ] ) listener_arn = response['Listeners'][0]['ListenerArn'] return response, listener_arn except ClientError as e: logging.error(e) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_listener(self, context, listener):\n LOG.info(\"Received request 'Create Listener' for LB:%(lb)s \",\n {'lb': listener['loadbalancer_id']})\n arg_dict = {'context': context,\n lb_const.LISTENER: listener,\n }\n self._send_event(lb_const.EVENT_CREATE_LISTENER_V2, arg_dict,\n serialize=True,\n binding_key=listener['loadbalancer_id'],\n key=listener['id'])", "def AddListener(self, listener):\n pass", "def create_listener(self, handler_function, options=None):\n return self.Listener(handler_function)", "def create_gwlb(gwlb_name, subnet_id_list):\n logging.info(f\"Creating gateway load balancer: {gwlb_name}\")\n waiter = elbv2.get_waiter('load_balancer_available')\n try:\n response = elbv2.create_load_balancer(\n Name=gwlb_name,\n Subnets=subnet_id_list,\n Tags=[{'Key': 'Name', 'Value': gwlb_name}],\n Type='gateway'\n )\n gwlb_arn = response['LoadBalancers'][0]['LoadBalancerArn']\n logging.info(\"Waiting for GWLB's state to change to available\")\n waiter.wait(\n LoadBalancerArns=[gwlb_arn],\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 40\n }\n )\n return response, gwlb_arn\n except ClientError as e:\n logging.error(e)\n return None", "def listener(self, aaidee=0):\n return Listener(self._ptr, aaidee)", "def create_listener(request, **kwargs):\n data = request.DATA\n\n try:\n default_tls_ref = data['certificates'][0]\n except (KeyError, IndexError):\n default_tls_ref = None\n\n conn = get_sdk_connection(request)\n # TODO(johnsom) Add SNI support\n # https://bugs.launchpad.net/octavia/+bug/1714294\n listener = conn.load_balancer.create_listener(\n protocol=data['listener']['protocol'],\n protocol_port=data['listener']['protocol_port'],\n load_balancer_id=kwargs['loadbalancer_id'],\n name=data['listener'].get('name'),\n description=data['listener'].get('description'),\n connection_limit=data['listener'].get('connection_limit'),\n default_tls_container_ref=default_tls_ref,\n sni_container_refs=None,\n admin_state_up=data['listener'].get('admin_state_up'),\n insert_headers=data['listener'].get('insert_headers'),\n timeout_client_data=data['listener'].get('timeout_client_data'),\n timeout_member_connect=data['listener'].get('timeout_member_connect'),\n timeout_member_data=data['listener'].get('timeout_member_data'),\n timeout_tcp_inspect=data['listener'].get('timeout_tcp_inspect'),\n allowed_cidrs=data['listener'].get('allowed_cidrs'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['listener'].get('tls_ciphers') or None,\n )\n\n if data.get('pool'):\n args = (request, kwargs['loadbalancer_id'], create_pool)\n kwargs = {'callback_kwargs': {'listener_id': listener.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(listener)", "def build_listener(self):\n # background = LiveSpeech(**sc.background_config)\n\n \"\"\"Creating an object for an activation word\"\"\"\n activation = LiveSpeech(activation_config={\n 'lm': False,\n 'keyphrase': 'eva',\n 'kws_threshold': self.settings.twsVol,\n })\n\n status = threading.Event()\n\n signal.signal(signal.SIGUSR1, self.handler)\n\n pid = os.getpid()\n\n activation_thread = threading.Thread(name='wait_activ_phrase', target=self.processing_activation_phrase,\n args=(activation, status, pid))\n\n activation_thread.start()", "def prep_listener(self):\n # Search for listener.\n listener = search.object_search(self.key+\"-listener\",\n typeclass=Listener)\n\n if listener:\n # Use an existing listener.\n listener = listener[0]\n listener.move_to(self.db.ev_location, quiet=True)\n self.db.listener = listener\n listener.db.bot = self\n else:\n # Create a new listener.\n listener = create.create_object(Listener, key=self.key+\"-listener\",\n location=self.db.ev_location)\n self.db.listener = listener\n listener.db.bot = self", "def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)", "def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def add_listener(self, listener):\r\n self.listeners.append(listener)", "def addServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...", "def addListener(self, listener):\n ret = TaurusAttribute.addListener(self, listener)\n\n if not ret:\n return ret\n\n if self.__subscription_state == SubscriptionState.Unsubscribed:\n self.__subscription_state = SubscriptionState.Subscribed\n\n assert len(self._listeners) >= 1\n\n if self.isPollingActive():\n Manager().addJob(self.__fireRegisterEvent, None, (listener,))\n return ret", "def from_boto_dict(cls, listener) -> \"ELBListener\":\n\n protocol = listener[\"Protocol\"]\n load_balancer_port = listener[\"LoadBalancerPort\"]\n instance_protocol = listener[\"InstanceProtocol\"]\n instance_port = listener[\"InstancePort\"]\n ssl_certificate_id = listener.get(\"SSLCertificateId\")\n\n return cls(\n protocol,\n load_balancer_port,\n instance_protocol,\n instance_port,\n ssl_certificate_id,\n )", "def add_listener ( cls, listener, class_name = '' ):\n MetaHasTraits._listeners.setdefault( class_name, [] ).append( listener )", "def add_listener(self, listener: CBListenerType) -> None:\n with self._lock:\n self._listeners.append(listener) # type: ignore[arg-type]", "def add_listener(self, listener):\n self.listeners.append(listener)", "def add_listener(self, callback, state_changes=[\"stopped\"]):\n self.listeners.append({\"callback\": callback, \"state_changes\": state_changes})", "def _add_listener_from_class(self, lstnr):\n if is_string(lstnr):\n # Get listener class from string\n try:\n lclass = utils.get_module_element_from_path(lstnr)\n except Exception as e:\n raise CpoException(\"Unable to retrieve solver listener class '{}': {}\".format(lstnr, e))\n if not inspect.isclass(lclass):\n raise CpoException(\"Solver listener '{}' is not a class.\".format(lstnr))\n if not issubclass(lclass, CpoSolverListener):\n raise CpoException(\"Solver listener class '{}' should extend CpoSolverListener.\".format(lstnr))\n else:\n # Listener is assumed to directly be a class\n lclass = lstnr\n if not inspect.isclass(lclass):\n raise CpoException(\"Solver listener '{}' is not a class.\".format(lclass))\n if not issubclass(lclass, CpoSolverListener):\n raise CpoException(\"Solver listener class '{}' should extend CpoSolverListener.\".format(lclass))\n # Add listener\n self.add_listener(lclass())", "def add_listener(cls, listener: ConfigUnitListener) -> None:\n cls.listener.append(listener)", "def listen(eventType):\n def _decoration(fcn):\n fcn.listen = True\n fcn.eventType = eventType\n return fcn\n return _decoration", "def set_listener(self, listener):\n\t\tth = current_thread()\n\n\t\t#print '>> SET listener on', th.name, listener\n\n\t\tth.listener = listener", "def events_init(sc, drivers, rpcmgr):\n ev_ids = [lb_const.EVENT_CREATE_LOADBALANCER_V2,\n lb_const.EVENT_UPDATE_LOADBALANCER_V2,\n lb_const.EVENT_DELETE_LOADBALANCER_V2,\n\n lb_const.EVENT_CREATE_LISTENER_V2,\n lb_const.EVENT_UPDATE_LISTENER_V2,\n lb_const.EVENT_DELETE_LISTENER_V2,\n\n lb_const.EVENT_CREATE_POOL_V2, lb_const.EVENT_UPDATE_POOL_V2,\n lb_const.EVENT_DELETE_POOL_V2,\n\n lb_const.EVENT_CREATE_MEMBER_V2,\n lb_const.EVENT_UPDATE_MEMBER_V2,\n lb_const.EVENT_DELETE_MEMBER_V2,\n\n lb_const.EVENT_CREATE_HEALTH_MONITOR_V2,\n lb_const.EVENT_UPDATE_HEALTH_MONITOR_V2,\n lb_const.EVENT_DELETE_HEALTH_MONITOR_V2,\n\n lb_const.EVENT_AGENT_UPDATED_V2,\n lb_const.EVENT_COLLECT_STATS_V2\n ]\n\n evs = []\n for ev_id in ev_ids:\n ev = nfp_event.Event(id=ev_id, handler=LBaaSV2EventHandler(\n sc, drivers, rpcmgr))\n evs.append(ev)\n sc.register_events(evs)", "def edit(env, identifier, listener, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'listenerUuid': listener\n }\n\n arg_to_option = {\n 'frontprotocol': 'frontendProtocol',\n 'backprotocol': 'backendProtocol',\n 'frontport': 'frontendPort',\n 'backport': 'backendPort',\n 'method': 'loadBalancingMethod',\n 'connections': 'maxConn',\n 'sticky': 'sessionType',\n 'clienttimeout': 'clientTimeout',\n 'sslcert': 'tlsCertificateId'\n }\n\n for key, value in args.items():\n if value:\n new_listener[arg_to_option[key]] = value\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def listen(self, callback: Callable, *,\n event_filter: Optional[List[str]] = None,\n active_monitor_filter: Optional[List[Union[str, int]]] = None) -> EventListener:\n stats.inc('num_listeners', 'EVENT')\n listener = EventListener(self, callback, event_filter=event_filter, active_monitor_filter=active_monitor_filter)\n self.listeners.add(listener)\n return listener", "def add(self, evtype, fileno, cb):\n listener = self.lclass(evtype, fileno, cb)\n bucket = self.listeners[evtype]\n if fileno in bucket:\n if g_prevent_multiple_readers:\n raise RuntimeError(\"Second simultaneous %s on fileno %s \"\\\n \"detected. Unless you really know what you're doing, \"\\\n \"make sure that only one greenthread can %s any \"\\\n \"particular socket. Consider using a pools.Pool. \"\\\n \"If you do know what you're doing and want to disable \"\\\n \"this error, call \"\\\n \"eventlet.debug.hub_multiple_reader_prevention(False)\" % (\n evtype, fileno, evtype))\n # store off the second listener in another structure\n self.secondaries[evtype].setdefault(fileno, []).append(listener)\n else:\n bucket[fileno] = listener\n return listener", "def listener(cls):\n func = cls.__init__\n\n # Wraps the class constructor to automate the subscription of methods to\n # event handlers\n @wraps(cls.__init__)\n def new_init(self, *args, **kwargs):\n _subscribe_marked_events(self)\n func(self, *args, **kwargs)\n\n # Patching the constructor\n cls.__init__ = new_init\n return cls", "def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id')}\n return create_listener(request, **kwargs)", "def listen(obj, name, func):\n _signals(obj, name).append(func)", "def add_listener(self, listener):\n self._listeners.append(listener)" ]
[ "0.680093", "0.6062638", "0.5872074", "0.568026", "0.562014", "0.55364203", "0.5465429", "0.54544973", "0.5450853", "0.5429485", "0.53733176", "0.5358816", "0.5357443", "0.53148276", "0.53111404", "0.5308438", "0.52377367", "0.5230723", "0.5210393", "0.51991904", "0.5175915", "0.5172461", "0.50238955", "0.50153726", "0.50148976", "0.49987003", "0.49754584", "0.49591768", "0.4918652", "0.49113208" ]
0.65823334
1
Creates Appliance Gateway (GWLB) and associated Target Group (TG) and Listener and registers target(s)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--tg_name', required=True, help='specify target group name', type=str) parser.add_argument('--gwlb_name', required=True, help='specify gateway load balancer name', type=str) parser.add_argument('--vpc_id', required=True, help='specify vpc id', type=str) parser.add_argument('--subnet_ids', nargs='+', required=True, help='specify subnet ids') parser.add_argument('--target_ids', nargs='+', required=True, help='specify target ids') args = parser.parse_args() ############################ # Define script variables: ############################ tg_name = args.tg_name gwlb_name = args.gwlb_name vpc_id = args.vpc_id subnet_ids = args.subnet_ids target_ids = args.target_ids tg1_args = { 'name': tg_name, 'protocol': 'GENEVE', 'port': 6081, 'healthchkproto': 'HTTP', 'healthchkport': '80', 'healthchkpath': '/', 'vpc_id': vpc_id, 'type': 'instance' } ############################# # Target Group: tg1 = create_tg(**tg1_args) print(f"TG ARN: {tg1[1]}") # GWLB: gwlb1 = create_gwlb(gwlb_name, subnet_ids) print(f"GWLB ARN: {gwlb1[1]}") # Listener: listener1 = create_fwd_listener(gwlb1[1], tg1[1]) print(f"LISTENER ARN: {listener1[1]}") # Register Targets: register_targets(tg1[1], target_ids[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gwlb(gwlb_name, subnet_id_list):\n logging.info(f\"Creating gateway load balancer: {gwlb_name}\")\n waiter = elbv2.get_waiter('load_balancer_available')\n try:\n response = elbv2.create_load_balancer(\n Name=gwlb_name,\n Subnets=subnet_id_list,\n Tags=[{'Key': 'Name', 'Value': gwlb_name}],\n Type='gateway'\n )\n gwlb_arn = response['LoadBalancers'][0]['LoadBalancerArn']\n logging.info(\"Waiting for GWLB's state to change to available\")\n waiter.wait(\n LoadBalancerArns=[gwlb_arn],\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 40\n }\n )\n return response, gwlb_arn\n except ClientError as e:\n logging.error(e)\n return None", "def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')", "def run(self, api, media_type, no_of_bridges):\n # header for bridge API\n header = {\"Content-Type\": media_type, \"X-Auth-Token\": \"%s\" %\n AUTH_TOKEN}\n for _ in range(no_of_bridges):\n # generate payload for creating bridge\n # creating bridge requires bridge name\n # bridge name is generated randomly\n bridge_name = ''.join(random.choice(string.ascii_lowercase) for x\n in range(10))\n data = {\"name\": bridge_name, \"tenantId\": \"\"}\n # create bridge\n self._create_bridge(\"POST\", api, header, data)", "def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False", "def main():\n insert_gateway_values(\"hermes/bin/gateways.txt\")\n return", "def CreateRequests(self, args):\n\n target_vpn_gateway_ref = self.TARGET_VPN_GATEWAY_ARG.ResolveAsResource(\n args,\n self.resources,\n scope_lister=compute_flags.GetDefaultScopeLister(self.compute_client,\n self.project))\n network_ref = self.NETWORK_ARG.ResolveAsResource(args, self.resources)\n\n request = self.messages.ComputeTargetVpnGatewaysInsertRequest(\n project=self.project,\n region=target_vpn_gateway_ref.region,\n targetVpnGateway=self.messages.TargetVpnGateway(\n description=args.description,\n name=target_vpn_gateway_ref.Name(),\n network=network_ref.SelfLink()\n ))\n return [request]", "def remote_createTunnel(self, name, targetIP):\r\n if name not in self._bridges:\r\n raise InternalError('Bridge does not exist.')\r\n\r\n key = (name, targetIP)\r\n\r\n if key in self._uid:\r\n raise InternalError('Tunnel already exists.')\r\n\r\n while 1:\r\n uid = randomString(self._UID_LEN)\r\n\r\n if uid not in self._uid.itervalues():\r\n break\r\n\r\n self._uid[key] = uid\r\n port = 'gre-{0}'.format(uid)\r\n\r\n return execute(('/usr/bin/ovs-vsctl', 'add-port', 'br-{0}'.format(name),\r\n port, '--', 'set', 'interface', port, 'type=gre',\r\n 'options:remote_ip={0}'.format(targetIP)),\r\n reactor=self._reactor)", "def register_feg_gw(c):\n subprocess.check_call(\n 'fab register-feg-gw', shell=True, cwd=FEG_FAB_PATH,\n )", "def create_fwd_listener(gwlb_arn, tg_arn):\n try:\n response = elbv2.create_listener(\n LoadBalancerArn=gwlb_arn,\n DefaultActions=[\n {\n 'Type': 'forward',\n 'TargetGroupArn': tg_arn,\n }\n ]\n )\n listener_arn = response['Listeners'][0]['ListenerArn']\n return response, listener_arn\n except ClientError as e:\n logging.error(e)\n return None", "def create_api_gateway(args):\n return create_pool(_create_api_gateway_from_meta, args, 1)", "def _add(self, arn, targets):\n\n # TODO: In the future, add support for the optional Port and\n # AvailabilityZone parameters. For now, keeping this dead simple.\n targets = [{'Id': t} for t in targets]\n\n try:\n yield self.api_call(\n self.elbv2_conn.register_targets,\n TargetGroupArn=arn,\n Targets=targets)\n except botocore.exceptions.ClientError as e:\n raise exceptions.UnrecoverableActorFailure(str(e))", "def _create_tunnels(self):\n if not self.is_active:\n try:\n self._connect_to_gateway()\n except socket.gaierror: # raised by paramiko.Transport\n msg = 'Could not resolve IP address for {0}, aborting!' \\\n .format(self.ssh_host)\n self.logger.error(msg)\n return\n except (paramiko.SSHException, socket.error) as e:\n template = 'Could not connect to gateway {0}:{1} : {2}'\n msg = template.format(self.ssh_host, self.ssh_port, e.args[0])\n self.logger.error(msg)\n return\n for (rem, loc) in zip(self._remote_binds, self._local_binds):\n try:\n self._make_ssh_forward_server(rem, loc)\n except BaseSSHTunnelForwarderError as e:\n msg = 'Problem setting SSH Forwarder up: {0}'.format(e.value)\n self.logger.error(msg)", "def create_gateway():\n # Start the gateway to connect with py4j\n if 'gateway' not in os.popen('jps').read():\n os.system('java gateway &')\n delay(0.5)\n LOGGER.debug('Started Java gateway')\n # Connect to gateway getting jvm object\n LOGGER.debug('Connecting to gateway from py4j')\n gate = JavaGateway()\n return gate", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def test_mcp_pike_ovs_l2gw_bgpvpn_install(self, underlay,\n openstack_deployed,\n openstack_actions):\n openstack_actions._salt.local(\n tgt='*', fun='cmd.run',\n args='service ntp stop; ntpd -gq; service ntp start')\n\n registry = 'docker-dev-local.docker.mirantis.net/mirantis/networking'\n name = 'rally-tempest-net-features:latest'\n\n if settings.RUN_TEMPEST:\n openstack_actions.run_tempest(\n pattern=settings.PATTERN,\n conf_name='net_features.conf',\n registry='{0}/{1}'.format(registry, name)\n )\n openstack_actions.download_tempest_report()\n LOG.info(\"*************** DONE **************\")", "def test_mcp_pike_ovs_l2gw_bgpvpn_install(self, underlay,\n openstack_deployed,\n openstack_actions):\n openstack_actions._salt.local(\n tgt='*', fun='cmd.run',\n args='service ntp stop; ntpd -gq; service ntp start')\n\n registry = 'docker-dev-local.docker.mirantis.net/mirantis/networking'\n name = 'rally-tempest-net-features:latest'\n\n if settings.RUN_TEMPEST:\n openstack_actions.run_tempest(\n pattern=settings.PATTERN,\n conf_name='net_features.conf',\n registry='{0}/{1}'.format(registry, name)\n )\n openstack_actions.download_tempest_report()\n LOG.info(\"*************** DONE **************\")", "def build(self):\n\n LOG.debug('-' * 80)\n LOG.debug(\"build\")\n LOG.debug('-' * 80)\n for b in self._bridges:\n bridge = b['bridge']\n # TODO(tomohiko) Need to something when not bridge['provided']?\n if bridge['provided']:\n LOG.info('Skipped building bridge=%r', bridge)\n\n for h in self._hosts:\n host = h['host']\n if host.get('tunnel_zone'):\n tz_data = host.get('tunnel_zone')\n tzs = self._api.get_tunnel_zones()\n\n # Ensure that TZ exists\n tz = [t for t in tzs if t.get_name() == tz_data['name']]\n if tz == []:\n if is_vxlan_enabled():\n tz = self._api.add_vxlan_tunnel_zone()\n else:\n tz = self._api.add_gre_tunnel_zone()\n tz.name(tz_data['name'])\n tz.create()\n else:\n tz = tz[0]\n\n # Ensure that the host is in the TZ\n tz_hosts = tz.get_hosts()\n tz_host = filter(\n lambda x: x.get_host_id() == host['mn_host_id'],\n tz_hosts)\n if tz_host == []:\n tz_host = tz.add_tunnel_zone_host()\n tz_host.ip_address(tz_data['ip_addr'])\n tz_host.host_id(host['mn_host_id'])\n tz_host.create()\n\n\n if host['provided'] == True:\n LOG.info('Skipped building host=%r', host)\n else:\n #TODO(tomoe): when we support provisioning Midolman host with\n # this tool.\n pass\n interfaces = host['interfaces']\n\n futures = []\n for i in interfaces:\n iface = Interface(i['interface'], host)\n self._interfaces[(host['id'], i['interface']['id'])] = iface\n f = iface.create()\n futures.append(f)\n\n wait_on_futures(futures)\n\n LOG.debug('-' * 80)\n LOG.debug(\"end build\")\n LOG.debug('-' * 80)", "def create_gateway_device(self, body=None):\r\n return self.post(self.gateway_devices_path, body=body)", "def create_network(self, network_o):\n # Retrieve the tenant or group from the network object\n tenant_mo = self.moDir.lookupByDn(network_o.group)\n # Query the children bellow the tenant\n tenant_children = self.query_child_objects(network_o.group)\n # Filters the children in memory looking for the ones that belongs to the Ap class and with an specific name.\n ap_list = filter(lambda x: type(x).__name__ == 'Ap' and x.name == AP_NAME,\n tenant_children)\n # Check if Application profile exists, if not creates one.\n if len(ap_list) == 0:\n network_ap = self.create_ap(str(tenant_mo.dn), AP_NAME)\n else:\n network_ap = ap_list[0]\n\n # Creates bridge domain\n bd_mo = self.create_bd('vlan' + str(network_o.encapsulation), tenant_mo, None)\n\n\n # Set BD parameters. This one are needed so that the bridge domain floods the VLAN packets across the fabric\n bd_mo.arpFlood = YES\n bd_mo.multiDstPktAct = BD_FLOOD\n bd_mo.unicastRoute = NO\n bd_mo.unkMacUcastAct = FLOOD\n bd_mo.unkMcastAct = FLOOD\n\n self.commit(bd_mo)\n\n # Filters the tenant children in memory looking for the ones that belongs to the Ctx\n # class and with an specific name.\n tenant_ctxs = filter(lambda x: type(x).__name__ == 'Ctx' and x.name == VRF_NAME,\n self.query_child_objects(str(tenant_mo.dn)))\n\n # check if vrf exists, if not creates one\n if len(tenant_ctxs) == 0:\n bd_ctx = self.create_vrf(tenant_mo.dn, VRF_NAME)\n else:\n bd_ctx = tenant_ctxs[0]\n\n # Filters the bridge domain children in memory looking for the ones that belongs to the RsCtx class\n bd_cxts = filter(lambda x: type(x).__name__ == 'RsCtx',\n self.query_child_objects(str(bd_mo.dn)))\n # Selects the first RsCtx object and assign the tnFvCtxName to the context/vrf name to create the relashionship\n if len(bd_cxts) > 0:\n bd_cxts[0].tnFvCtxName = bd_ctx.name\n self.commit(bd_cxts[0])\n\n # Creates and return an EPG\n return self.create_epg(str(network_ap.dn), str(bd_mo.dn), network_o.name + VLAN_SUFIX +\n str(network_o.encapsulation))", "def create_outbound(self, addr, use_new_connection=False):", "def createNet(self):\n\n sw = OVSKernelSwitch\n topo = G2Topo(self.config.topoData)\n ctrl = RemoteController('c', ip=REMOTE_CONTROLLER_IP, port=CONTROLLER_PORT)\n\n # Default link parameters.\n # HTB: Hierarchical Token Bucket rate limiter.\n spec = self.config.topoData['defaultLinkInfo']\n if spec:\n mybw = float(spec['bw'])\n mydelay = spec['delay']\n myloss = float(spec['loss'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] == 'N/A':\n myqueue = int(spec['max_queue_size'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue)\n if spec['max_queue_size'] == 'N/A' and spec['use_htb'] != 'N/A':\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, use_htb=myhtb)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] != 'N/A':\n myqueue = int(spec['max_queue_size'])\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue, use_htb=myhtb)\n else:\n # No spec for default parameters, using Mininet defaults.\n info(\"**** [G2]: using Mininet default parameters for links other than those configured in link_info \\n\")\n link = TCLink\n\n # Configure bw, delay, loss, etc. for some links that are specified in config file.\n for spec in self.config.topoData['linkInfos']:\n src = spec['src']\n dst = spec['dst']\n try:\n linkInfo = topo.linkInfo(src, dst)\n if spec['bw'] != 'N/A':\n linkInfo['bw'] = float(spec['bw']) # Mbit\n if spec['delay'] != 'N/A':\n linkInfo['delay'] = spec['delay'] # ms\n if spec['loss'] != 'N/A':\n linkInfo['loss'] = float(spec['loss']) # Percentage\n if spec['max_queue_size'] != 'N/A':\n linkInfo['max_queue_size'] = int(spec['max_queue_size'])\n if spec['use_htb'] != 'N/A':\n linkInfo['use_htb'] = bool(spec['use_htb'])\n\n topo.setlinkInfo(src,dst,linkInfo)\n except KeyError:\n info(\"**** [G2]: no link exists between switch pair (%s, %s) \\n\" %(src, dst))\n\n # Assign a fraction of overall CPU time to Mininet hosts.\n nHosts = float(len(self.config.topoData['hosts']))\n cpuHostFrac = 0.50/nHosts\n # 'cpu' is the fraction of CPU that each host would get.\n # Indirectly, it sets 'cpu.cfs_quota_us': the total available run-time within a period (in microseconds).\n # Mininet uses the following scheme: cfs_quota_us = (cpuHostFrac * nCPU * period_us) microseconds.\n # 'period_us' sets cpu.cfs_period_us.\n # Larger period would allow for increased burst capacity.\n host = custom(CPULimitedHost, cpu=cpuHostFrac, period_us=100000)\n\n net = Mininet(topo=topo,\n host=host,\n switch=sw,\n controller=ctrl,\n waitConnected=True,\n autoStaticArp=True,\n link=link)\n\n # Create a default route for each host.\n # Turn on tcpdump on each host if debug mode is on.\n for hs in topo.hosts():\n net.getNodeByName(hs).setDefaultRoute(intf='%s-eth0' %hs) # 1st interface on hosts is hi-eth0\n if self.config.isDebug:\n net.getNodeByName(hs).cmd('tcpdump -w %s.pcap -i %s-eth0 &' %(hs,hs))\n return net", "def create_listeners(self):\n target_groups_config = self.get_target_groups_config()\n balancer_arn = self.get_balancer_arn()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n response = self.client.create_listener(\n LoadBalancerArn=balancer_arn,\n DefaultActions=[\n {\n 'Type': 'forward',\n 'TargetGroupArn': self.get_target_group_arn(short_name)\n }\n ],\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info('Target group {group} bound to {balancer} load balancer.'.format(\n group=target_group_name,\n balancer=self.get_balancer_name(),\n ))\n response_data[target_group_name] = response['Listeners']\n\n return response_data", "def createtarget(self, lang, gentarget, dependees):\r\n raise NotImplementedError", "def create_listener(self, context, listener):\n LOG.info(\"Received request 'Create Listener' for LB:%(lb)s \",\n {'lb': listener['loadbalancer_id']})\n arg_dict = {'context': context,\n lb_const.LISTENER: listener,\n }\n self._send_event(lb_const.EVENT_CREATE_LISTENER_V2, arg_dict,\n serialize=True,\n binding_key=listener['loadbalancer_id'],\n key=listener['id'])", "def create_ig(ec2):\n ## create internet gateway\n print(\"\\n===Creating Internet Gateway...\")\n ig = ec2.create_internet_gateway(TagSpecifications=[{\n \"ResourceType\":\"internet-gateway\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": IG_NAME},\n ]}])\n print(\"===Internet gateway is reay!!\")\n return ig", "def __init__(self):\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n self.bus = dbus.SystemBus()\n self.adapter = self._find_adapter()\n if not self.adapter:\n IFaceNotFoundException('%s interface not found' % GATT_MANAGER_IFACE)\n self.service_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter),\n GATT_MANAGER_IFACE)\n\n self.mainloop = GObject.MainLoop()\n self.ctx = GattContext(self.bus, self.mainloop)\n self.app = Application(self.ctx)\n\n #print('Registering GATT application...')\n self.service_manager.RegisterApplication(self.app.get_path(), {},\n reply_handler=register_app_cb,\n error_handler=register_app_error_cb)", "def _create_protocol_ws_and_hgap_in_sync():\n data_file = mockwiredata.DATA_FILE_VM_SETTINGS.copy()\n\n with mock_wire_protocol(data_file) as protocol:\n timestamp = datetime.datetime.utcnow()\n incarnation = '111'\n etag = '111111'\n protocol.mock_wire_data.set_incarnation(incarnation, timestamp=timestamp)\n protocol.mock_wire_data.set_etag(etag, timestamp=timestamp)\n protocol.mock_wire_data.set_vm_settings_source(GoalStateSource.Fabric)\n\n # Do a few checks on the mock data to ensure we catch changes in internal implementations\n # that may invalidate this setup.\n vm_settings, _ = protocol.client.get_host_plugin().fetch_vm_settings()\n if vm_settings.etag != etag:\n raise Exception(\"The HostGAPlugin is not in sync. Expected ETag {0}. Got {1}\".format(etag, vm_settings.etag))\n if vm_settings.source != GoalStateSource.Fabric:\n raise Exception(\"The HostGAPlugin should be returning a Fabric goal state. Got {0}\".format(vm_settings.source))\n\n goal_state = GoalState(protocol.client)\n if goal_state.incarnation != incarnation:\n raise Exception(\"The WireServer is not in sync. Expected incarnation {0}. Got {1}\".format(incarnation, goal_state.incarnation))\n\n if goal_state.extensions_goal_state.correlation_id != vm_settings.correlation_id:\n raise Exception(\n \"The correlation ID in the WireServer and HostGAPlugin are not in sync. WS: {0} HGAP: {1}\".format(\n goal_state.extensions_goal_state.correlation_id, vm_settings.correlation_id))\n\n yield protocol", "async def create_route_igw(self, rtb_id, cidr_block, gateway_id):\n self._client.create_route(\n DestinationCidrBlock=cidr_block,\n RouteTableId=rtb_id,\n GatewayId=gateway_id,\n )" ]
[ "0.62968814", "0.61144644", "0.55747527", "0.5550602", "0.5515989", "0.55029714", "0.54040945", "0.53575385", "0.5312576", "0.52309376", "0.52106285", "0.52051246", "0.5162079", "0.5135037", "0.5105967", "0.5101289", "0.50615025", "0.50615025", "0.5044468", "0.5033211", "0.5015754", "0.5000626", "0.4968081", "0.49508822", "0.49497032", "0.49314877", "0.49240804", "0.49089465", "0.49058273", "0.49039283" ]
0.6617608
0
Read a bufr file and convert to a Pandas DataFrame Variables used inside the DataFrame are already CDM compliant
def bufr_to_dataframe(file=''): if debug: print("Running bufr_to_dataframe for: ", file) check_read_file (file = file, read= False) f = open(file) #source_file = [l for l in file.split('/') if '.bfr' in l][0] read_data = [] """ Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) """ #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body'] lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan obs_id, report_id = -1, 0 # progressive observation id stations_id = [] while 1: #lista = [] # temporary list bufr = codes_bufr_new_from_file(f) if bufr is None: break codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section date = '19'+codes_get_array(bufr, "typicalDate")[0][2:] timePeriod = codes_get_array(bufr, "typicalTime")[0] year, month, day = date[0:4], date[4:6] , date[6:8] hour, minutes = timePeriod[0:2] , timePeriod[2:4] idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M') iday = int(year + month + day ) pressure = codes_get_array(bufr, "pressure") temperature = codes_get_array(bufr, "airTemperature") wind_direction = codes_get_array(bufr, "windDirection") wind_speed = codes_get_array(bufr, "windSpeed") try: # not all the bufr files have the dewpoint dew_point = codes_get_array(bufr, "dewpointTemperature") except: dew_point= np.empty((1, len(temperature))) dew_point[:] = np.nan num_lev = len(pressure) # number of distinct pressure levels try: geopotential = codes_get_array(bufr, "nonCoordinateGeopotentialHeight") except: geopotential = np.full( (1,len(temperature)) , np.nan )[0,:] if report_id == 0: ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time ''' lat = codes_get(bufr, "latitude") lon = codes_get(bufr, "longitude") alt = float(codes_get(bufr, "heightOfStation")) blockNumber = codes_get(bufr, "blockNumber") stationNumber = codes_get(bufr, "stationNumber") #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str statid = blockNumber*1000+stationNumber if statid not in stations_id: stations_id.append(statid) codes_release(bufr) miss_value = -1.e100 for i in range(len(temperature)): obs_id = obs_id + 1 airT = temperature[i] winds = wind_speed[i] windd = wind_direction[i] press = pressure[i] gph = geopotential[i] dp = dew_point[i] if press == miss_value: press = np.nan if dp == miss_value: dp = np.nan if airT == miss_value : # replacing none values with numpy nans airT = np.nan if winds == miss_value: winds = np.nan if gph == miss_value: gph = np.nan if windd == 2147483647 or windd == -2147483647: windd = np.nan for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ): obs_id = obs_id + 1 if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1 z_type = 1 read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 z_type = 2 read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) else: z_type = -2147483648 read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) report_id += 1 df = pd.DataFrame(data= read_data, columns= column_names) df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length ) df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan) df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) return df, stations_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_swc(file_name):\n\n df = pd.read_csv(file_name, delimiter=' ', header=None, comment='#',\n names=['sample', 'identifier', 'x', 'y', 'z', 'r', 'parent'],\n skipinitialspace=True).astype({'sample':int,'identifier':int,'x':float,'y':float,'z':float,'r':float,'parent':int})\n return df", "def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")", "def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df", "def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def load_swc(file_name):\n df = pd.read_csv(file_name, sep = ' ', header=None, comment='#', index_col = False,\n names=['sample', 'identifier', 'x', 'y', 'z', 'r', 'parent'],\n skipinitialspace=True)\n return df", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df", "def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def _csv2df(data_file):\n df = pd.read_csv(data_file, encoding=\"ISO-8859-1\", low_memory=False)\n return df", "def read_csv_to_dataframe(file_name):\n df = pd.read_csv(file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n return df", "def getDataframe(file_name):\n # maak pandas dataframe van KNMI zonder comments\n if '.csv' in file_name:\n dataframe = pd.read_csv(file_name, delimiter = ';', comment='#')\n return dataframe\n elif '.txt' in file_name:\n dataframe = pd.read_csv(file_name, delimiter = ',', comment='#')\n return dataframe\n else:\n quit('Usage: use files of .csv or .txt format')", "def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res", "def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")", "def read_from_file_no_check(file_name: str) -> pd.DataFrame:\n return pd.read_csv(file_name)", "def get_ctffind_4_1_0_meta(file_name: str) -> pd.DataFrame:\n extract_dict: typing.Dict[str, str]\n ctffind_meta_data: pd.DataFrame\n lines: typing.List[str]\n match: typing.Optional[typing.Match[str]]\n non_string_values: typing.Set[str]\n\n extract_dict = get_ctffind_4_1_0_extract_dict()\n ctffind_meta_data = pd.DataFrame(index=[0], columns=extract_dict.keys())\n with open(file_name, 'r') as read:\n lines = read.readlines()\n\n non_string_values = set([\n 'MicrographNameNoDW',\n 'version'\n ])\n for line in lines:\n for key, value in extract_dict.items():\n match = re.match(value, line)\n if match is not None:\n try:\n ctffind_meta_data[key] = float(match.group(1))\n except ValueError:\n assert key in non_string_values, f'{key}: {match.group(1)}'\n ctffind_meta_data[key] = match.group(1)\n else:\n pass\n return ctffind_meta_data", "def read_bugs(filepath: str) -> pd.DataFrame:\n data = pd.read_csv(filepath)\n df_bugs = data[data['real'] == 'bug']\n df_bugs = df_bugs.replace(\n {\"type\": {\n \"classical\": \"Classical\",\n \"quantum\": \"Quantum\"\n }})\n return df_bugs", "def generate_DataFrame(file_path):\n # print (\"Generating DataFrame\")\n __log(1, 'Generating DataFrame....')\n\n df = pd.read_csv(file_path)\n df = df.rename(columns=lambda x: x.strip())\n df = df.dropna()\n\n for i in list(df.keys()):\n df[i] = df[i].apply(cleaning)\n\n # print (\"DataFrame Generated Successfully\")\n __log(1, 'DataFrame Generated Sucessfully.')\n return df", "def test_read_csv_to_dataframe(fname):\n df = read_csv_to_dataframe(fname)\n print(df.head())", "def load_data(fn):\n return pandas.read_csv(fn, dtype={'Name': str, 'Reason': str, 'Amount': float, 'Day': int})", "def prepare_data(file_path: str):\n movie_industry_df = pd.read_csv(file_path, encoding='latin-1')\n return movie_industry_df", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def _get_dataframe(self, filePathList):\n filePath = filePathList[0]\n df = pd.read_csv(filePath, sep=\"\\t\", comment=\"#\")\n return df", "def initialize_from_file(filename):\r\n df = pd.read_csv(filename)\r\n return df", "def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame", "def read_data(fname, cols):\n df = (pd.read_csv(fname, header=None, sep=r\"\\s+\", comment=\"#\",\n names=cols, dtype=np.float64)\n .iloc[1:]) # First line is the total number of trees\n # Could reset_index, but we don't shuffle the DataFrame\n return df", "def load_dataset(self, file_path: str,file_name: str) -> pd.DataFrame:\n combined_path=os.path.join(file_path,file_name)\n self.raw_data=pd.read_csv(combined_path)\n return self.raw_data" ]
[ "0.6760399", "0.66910934", "0.66843355", "0.6596809", "0.65731204", "0.64530325", "0.6444564", "0.64268804", "0.64126265", "0.6340299", "0.6340299", "0.6340299", "0.6326406", "0.63134426", "0.6298015", "0.6291843", "0.628385", "0.62818545", "0.6272516", "0.6217461", "0.6215234", "0.62043357", "0.6201659", "0.6200513", "0.6200327", "0.6198028", "0.61775744", "0.6174985", "0.6158075", "0.6150232" ]
0.7691618
0
Read an uadb stationfile in ASCII format and convert to a Pandas DataFrame.
def uadb_ascii_to_dataframe(file=''): if debug: print("Running uadb_ascii_to_dataframe for: ", file) data = check_read_file(file=file, read=True) # TODO #source_file = [l for l in file.split('/') if '.txt' in l][0] nmiss = 0 search_h = False read_data = [] usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan #usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd, iday, ident, numlev= 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 obs_id = 0 stations_id = [] for i, line in enumerate(data): if line[0] == 'H': try: # Header usi = int(line[2:14]) # unique station identifier ident = int(line[15:21].replace(' ',''))# WMO if ident not in stations_id: stations_id.append(ident) #if len(ident) == 4: # ident = '0' + ident #idflag = int(line[22:24]) # id flag #d_src = int(line[25:28]) # source dataset #version = float(line[29:34]) # version #dateflag = int(line[35:37]) # date flag year = line[38:42] # year month = "%02d" % int(line[43:45]) day = "%02d" % int(line[46:48]) hour = line[49:53] #locflag = int(line[54:56]) # Location Flag lat = float(line[57:67]) lon = float(line[68:78]) #ele = float(line[79:85]) #stype = int(line[86:88]) numlev = int(line[89:93]) #pvers = line[94:102] if '99' in hour: hour = hour.replace('99', '00') if '99' in day: search_h = True continue minutes = int(hour) % 100 hour = "%02d" % (int(hour) // 100) if minutes > 60 or minutes < 0: minutes = 0 minutes = "%02d" % minutes idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M') iday = int(year + month + day) #pday = int(day) search_h = False except Exception as e: #print("Error: ", i, line, repr(e), "Skipping Block:") search_h = True #iprev = i elif search_h: nmiss += 1 continue # Skipping block else: # Data #ltyp = int(line[0:4]) p = float(line[5:13]) if p != -99999.0 and p != 9999.9: press = float(line[5:13])*100 # converting to Pa, since P is given in mb (1 mb = 1 hPa) else: press = np.nan gph = float(line[14:22]) # gph [m] if gph == -999.0 or gph == -99999.00 or gph >= 99999.0: gph = np.nan temp = float(line[23:29]) if temp == -999.0: temp = np.nan else: temp = temp + 273.15 rh = float(line[30:36]) # % if rh == -999.0: rh = np.nan else: rh = rh / 100. # convert to absolute ratio TODO wdir = float(line[37:43]) if wdir == -999.0 or wdir == -999 : wdir = np.nan wspd = float(line[44:50]) # [m/s], module of the velocity if wspd <0 : wspd = np.nan try: for value,var in zip([ gph, temp, wspd, wdir, rh], [ 'gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity'] ): obs_id = obs_id +1 if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1 z_type = 1 read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) ) elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 z_type = 2 read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) ) else: z_type = -2147483648 read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) ) except: 0 #column_names = ['source_file', 'product_code', 'report_id', 'observation_id', 'report_timestamp' , 'iday', 'station_id', 'lat@hdr', 'lon@hdr', 'vertco_reference_1@body', 'obsvalue@body', 'varno@body' , 'units', 'number_of_pressure_levels' ] df = pd.DataFrame(data= read_data, columns= column_names) df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length ) df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan) #df['observations_id'] =numpy.char.zfill(numpy.arange(ivar.shape[0]).astype('S10'), 10) df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) #df['report_id'] = numpy.int64 (df['report_id'] ) #df['observation_id'] = numpy.int64 (df['observation_id'] ) df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) print('Done reading DF') return df , stations_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uadb_ascii_to_dataframe(filename, **kwargs):\n import datetime\n import zipfile\n import gzip\n import os\n import io\n import numpy as np\n import pandas as pd\n from . import support as sp\n\n if not os.path.isfile(filename):\n raise IOError(\"File not Found! %s\" % filename)\n\n if '.zip' in filename:\n archive = zipfile.ZipFile(filename, 'r')\n inside = archive.namelist()\n tmp = archive.open(inside[0])\n tmp = io.TextIOWrapper(tmp, encoding='utf-8')\n tmp = tmp.read()\n archive.close()\n data = tmp.splitlines() # Memory (faster)\n elif '.gz' in filename:\n\n with gzip.open(filename, 'rt', encoding='utf-8') as infile:\n tmp = infile.read() # alternative readlines (slower)\n data = tmp.splitlines() # Memory (faster)\n else:\n with open(filename, 'rt') as infile:\n tmp = infile.read() # alternative readlines (slower)\n data = tmp.splitlines() # Memory (faster)\n\n raw = []\n headers = []\n dates = []\n nmiss = 0\n iprev = 0\n search_h = False\n i = 0\n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n ident = line[15:21] # WMO\n idflag = int(line[22:24]) # id flag\n d_src = int(line[25:28]) # source dataset\n version = float(line[29:34]) # version\n dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%2d\" % int(line[46:48])\n hour = line[49:53]\n locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n ele = float(line[79:85])\n stype = int(line[86:88])\n numlev = int(line[89:93])\n pvers = line[94:102]\n\n # wired stuff !?\n if '99' in hour:\n hour = hour.replace('99', '00')\n\n if '99' in day:\n search_h = True\n continue\n\n minutes = int(hour) % 100\n hour = \"%02d\" % (int(hour) // 100)\n\n if minutes > 60 or minutes < 0:\n minutes = 0\n\n elif minutes == 60:\n minutes = 59\n\n else:\n pass\n minutes = \"%02d\" % minutes\n idate = datetime.datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n headers.append((idate, usi, numlev, lat, lon, ele, stype))\n pday = int(day)\n search_h = False\n\n except Exception as e:\n print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n if kwargs.get('debug', False):\n raise e\n\n search_h = True\n iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n ltyp = int(line[0:4])\n press = float(line[5:13]) # hPa\n gph = float(line[14:22])\n temp = float(line[23:29]) # degree\n rh = float(line[30:36]) # %\n wdir = float(line[37:43])\n wspd = float(line[44:50]) # m/s\n raw.append((press, gph, temp, rh, wdir, wspd))\n dates.append(idate)\n\n sp.message(\"UADB Lines read:\", i, \"skipped:\", nmiss, \"Header:\", len(headers), **kwargs)\n\n out = pd.DataFrame(data=raw, index=dates, columns=['pres', 'gph', 'temp', 'rhumi', 'windd', 'winds'])\n out = out.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9], np.nan)\n # fix units\n out['pres'] *= 100. # need Pa\n out.index.name = 'date'\n headers = pd.DataFrame(data=headers, columns=['date', 'uid', 'numlev', 'lat', 'lon', 'alt', 'stype']).set_index(\n 'date')\n return out, headers", "def load_data(fpath: str, station: Dict[str, Any]) -> pd.DataFrame:\n df = pd.read_csv(\n fpath,\n skiprows=station['header_line_num']-1,\n usecols=['date', 'rain'],\n )\n\n # format the date from a string to a proper datetime object\n df['date'] = pd.to_datetime(df['date'])\n\n # extract year, month, week, and day to separate columns\n df['year'] = df['date'].dt.year\n df['month'] = df['date'].dt.month\n df['day'] = df['date'].dt.dayofyear\n df['week'] = df['date'].dt.weekofyear\n df['year_month'] = df['date'].dt.to_period('M')\n\n return df", "def read_pipe_table_to_pandas(filename):\n\n astropy_data = astropy.io.ascii.read(filename)\n data_stream = StringIO()\n astropy_data[2:].write(data_stream, format='ascii.basic', delimiter='|')\n data_stream.seek(0)\n return pandas.read_csv(data_stream,\n comment='#',\n sep='|',\n skipinitialspace=True)", "def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def parse_station(station):\n if not station:\n return pd.DataFrame()\n header = get_header(station[0])\n header['ftime'] = get_fntime(station[1], station[2], header) \n df = get_rows(header, station)\n return df", "def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id", "def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res", "def airport_file_to_df(self):\n\t\tdf = pd.read_csv(\n\t\t\tfilepath_or_buffer=os.path.join(ROOT_DIR, \"raw\", \"airports.csv\".format(self.year)),\n\t\t\tsep=\",\", encoding=\"utf-8\",\n\t\t\tusecols=[\"iata\", \"airport\", \"city\", \"state\", \"country\", \"lat\", \"long\"]\n\t\t)\n\n\t\treturn df", "def igra2_ascii_to_dataframe(file=''):\n if debug:\n print(\"Running igra2_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True)\n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n read_data = [] # Lists containing the raw data from the ascii file, and the observation dates\n \"\"\" Data to be extracted and stored from the igra2 station files \n Some info is contained in the header of each ascent, some in the following data \"\"\"\n\n \"\"\" Initialize the variables that can be read from the igra2 files \"\"\"\n ident,year,month,day,hour,reltime,p_src,np_src,lat, lon = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan \n lvltyp1,lvltyp2,etime,press,pflag,gph,zflag,temp,tflag,rh,dpdep,wdir,wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan # initialize to zeros\n stations_id = []\n idate = np.nan\n count = 0\n head_count = 0\n \n obs_id = 0\n \n def make_release_time(date_time, hour, release):\n \"\"\" build a sonde release time \n ex 2019 02 20 00 2349 \n ex 2019 01 10 00 0011 \n They round the release time to the closest hour. \n It can be the same day or the following !!!\n date_time = date_time pytohn object, \n date, time, release = original strings \n \"\"\"\n release_h = int(release[:2])\n release_m = int(release[2:4])\n \n if release_h == 99:\n return 0 #largest integer number int 64 \n \n else:\n if release_m == 99:\n release_m = 0\n release_date_time = date_time.replace(hour= release_h, minute= release_m) \n \n \"\"\" Here, I have to subtract one day to the release time stamp if the hour of the time stamp is in th evening,\n but the nominal time is reported at midnight hence in the following day. For example 2019 02 20 00 2349 from file VMM00048820 \"\"\"\n if hour == '00':\n if release_h > 20:\n release_date_time = release_date_time - timedelta(days=1)\n else:\n pass\n \n return release_date_time \n \n \n for i, line in enumerate(data):\n if line[0] == '#':\n head_count = head_count +1 \n # Info from the Header line of each ascent \n ident = line[1:12] # station identifier\n ident = ident[6:12]\n if ident not in stations_id:\n stations_id.append(ident)\n \n year = line[13:17] # year, months, day, hour of the observation\n month = line[18:20]\n day = line[21:23]\n hour = line[24:26] \n reltime = line[27:31] # release time of the sounding.\n numlev = int(line[32:36]) # number of levels in the sounding == number of data recorded in the ascent\n p_src = line[37:45] # data source code for the pressure levels \n np_src = line[46:54] # data source code for non-pressure levels\n lat = int(line[55:62]) / 10000. # latitude and longitude\n lon = int(line[63:71]) / 10000.\n #observation_id = i\n if int(hour) == 99:\n time = reltime + '00'\n else:\n time = hour + '0000'\n \n if '99' in time:\n time = time.replace('99', '00')\n\n idate = datetime.strptime(year + month + day + time, '%Y%m%d%H%M%S') # constructed according to CDM\n \n release_time = make_release_time(idate, hour, reltime) # making the release time \n \n \n iday = int(year + month + day)\n count = count + 1\n else:\n # Data of each ascent\n lvltyp1 = int(line[0]) # 1- 1 integer major level type indicator\n lvltyp2 = int(line[1]) # 2- 2 integer minor level type indicator\n etime = int(line[3:8]) # 4- 8 integer elapsed time since launch\n press = int(line[9:15]) # 10- 15 integer reported pressure\n \n if press == -9999:\n press = np.nan\n \n pflag = line[15] # 16- 16 character pressure processing flag\n \n gph = int(line[16:21]) # 17- 21 integer geopotential height [m]\n \n if gph == -9999 or gph == -8888: # reading the values andh check if they are missing or removed as -9999 or -8888 before dividing by 10 as the instructions say \n gph = np.nan # 23- 27 integer temperature, [Celsius to Kelvin ] \n \n zflag = line[21] # 22- 22 character gph processing flag, \n \n temp = int(line[22:27]) \n if temp != -9999 and temp != -8888: # reading the values andh check if they are missing or removed as -9999 or -8888 before dividing by 10 as the instructions say \n temp = temp / 10. + 273.15 # 23- 27 integer temperature, [Celsius to Kelvin ] \n else:\n temp = np.nan \n \n tflag = line[27] # 28- 28 character temperature processing flag\n \n rh = int(line[28:33]) # 30- 34 integer relative humidity [%] \n if rh != -8888 and rh != -9999:\n rh = rh / 1000. # converting from percentage to absolute ratio \n else:\n rh = np.nan\n \n dpdp = int(line[34:39]) \n if dpdp != -9999 and dpdp !=-8888: \n dpdp = dpdp / 10. # 36- 40 integer dew point depression (degrees to tenth e.g. 11=1.1 C) \n else:\n dpdp = np.nan \n \n wdir = int(line[40:45]) # 41- 45 integer wind direction (degrees from north, 90 = east)\n if wdir == -8888 or wdir == -9999 :\n wdir = np.nan \n \n wspd = int(line[46:51]) # 47- 51 integer wind speed (meters per second to tenths, e.g. 11 = 1.1 m/s [m/s]\n if wspd != -8888 and wspd != -9999 :\n wspd = wspd / 10. \n else:\n wspd = np.nan \n if reltime == 9999.0:\n reltime = np.nan \n \n z_type = np.nan\n if not (np.isnan(press)):\n z_type = 1\n elif (np.isnan(press) and not np.isnan(gph) ) :\n z_type = 2 \n \n for value,var in zip([gph, temp, wspd, wdir, rh, dpdp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity' , 'dew_point_depression'] ):\n obs_id = obs_id +1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1 \n z_type = 1 \n read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) )\n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) )\n else:\n z_type = -2147483648 \n read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) )\n\n\n df = pd.DataFrame(data= read_data, columns= column_names_igra2)\n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) # FF check here !!!! \n \n return df, stations_id", "def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")", "def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df", "def stateToDf(statefile):\n return pd.read_csv(statefile,\n compression='gzip',\n sep=' ',\n skiprows=[1,2]\n )", "def _read_raw(self):\n return pd.read_csv('data/oma/orthologs.tsv', sep='\\t', header=None,\n usecols=[0, 1], names=['CE_WORMPEP', 'HS_ENSG']) \\\n .drop_duplicates()", "def _read_data(self, fp):\n names = [\n \"Year\",\n \"Month\",\n \"Day\",\n \"Hour\",\n \"Minute\",\n \"Data Source and Uncertainty Flags\",\n \"Dry Bulb Temperature\",\n \"Dew Point Temperature\",\n \"Relative Humidity\",\n \"Atmospheric Station Pressure\",\n \"Extraterrestrial Horizontal Radiation\",\n \"Extraterrestrial Direct Normal Radiation\",\n \"Horizontal Infrared Radiation Intensity\",\n \"Global Horizontal Radiation\",\n \"Direct Normal Radiation\",\n \"Diffuse Horizontal Radiation\",\n \"Global Horizontal Illuminance\",\n \"Direct Normal Illuminance\",\n \"Diffuse Horizontal Illuminance\",\n \"Zenith Luminance\",\n \"Wind Direction\",\n \"Wind Speed\",\n \"Total Sky Cover\",\n \"Opaque Sky Cover (used if Horizontal IR Intensity missing)\",\n \"Visibility\",\n \"Ceiling Height\",\n \"Present Weather Observation\",\n \"Present Weather Codes\",\n \"Precipitable Water\",\n \"Aerosol Optical Depth\",\n \"Snow Depth\",\n \"Days Since Last Snowfall\",\n \"Albedo\",\n \"Liquid Precipitation Depth\",\n \"Liquid Precipitation Quantity\",\n ]\n\n first_row = self._first_row_with_climate_data(fp)\n df = pd.read_csv(fp, skiprows=first_row, header=None, names=names)\n return df", "def convert_pcap_to_dataframe(input_file):\r\n if not os.path.exists(input_file):\r\n raise IOError(\"File \" + input_file + \" does not exist\")\r\n\r\n tshark_fields = \"-e frame.time_epoch \" \\\r\n \"-e _ws.col.Source \" \\\r\n \"-e _ws.col.Destination \" \\\r\n \"-e _ws.col.Protocol \" \\\r\n \"-e frame.len \" \\\r\n \"-e ip.ttl \" \\\r\n \"-e ip.flags.mf \" \\\r\n \"-e ip.frag_offset \" \\\r\n \"-e icmp.type \" \\\r\n \"-e tcp.srcport \" \\\r\n \"-e tcp.dstport \" \\\r\n \"-e udp.srcport \" \\\r\n \"-e udp.dstport \" \\\r\n \"-e dns.qry.name \" \\\r\n \"-e dns.qry.type \" \\\r\n \"-e http.request \" \\\r\n \"-e http.response \" \\\r\n \"-e http.user_agent \" \\\r\n \"-e tcp.flags.str \" \\\r\n \"-e ntp.priv.reqcode \"\r\n\r\n temporary_file = tempfile.TemporaryFile(\"r+b\")\r\n\r\n # print(shutil.which(command))\r\n\r\n p = subprocess.Popen([settings.TSHARK + \" -n -r \\\"\" + input_file + \"\\\" -E separator='\\x03' -E header=y -T fields \" + tshark_fields],\r\n shell=True, stdout=temporary_file) #\\x03 is ETX\r\n p.communicate()\r\n p.wait()\r\n\r\n # Reset file pointer to start of file\r\n temporary_file.seek(0)\r\n\r\n df = pd.read_csv(temporary_file, sep=\"\\x03\", low_memory=False, error_bad_lines=False)\r\n\r\n temporary_file.close()\r\n\r\n if ('tcp.srcport' in df.columns) and ('udp.srcport' in df.columns) and ('tcp.dstport' in df.columns) and \\\r\n ('udp.dstport' in df.columns):\r\n # Combine source and destination ports from tcp and udp\r\n df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])\r\n df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])\r\n\r\n df['srcport'] = df['srcport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n df['dstport'] = df['dstport'].apply(lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)\r\n\r\n # Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport'\r\n df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport'], axis=1, inplace=True)\r\n\r\n # Drop all empty columns (for making the analysis more efficient! less memory.)\r\n df.dropna(axis=1, how='all', inplace=True)\r\n df = df.fillna(0)\r\n\r\n if 'icmp.type' in df.columns:\r\n df['icmp.type'] = df['icmp.type'].astype(str)\r\n\r\n if 'ip.frag_offset' in df.columns:\r\n df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)\r\n\r\n if 'ip.flags.mf' in df.columns:\r\n df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)\r\n\r\n if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):\r\n # Analyse fragmented packets\r\n df['fragmentation'] = (df['ip.flags.mf'] == '1') | (df['ip.frag_offset'] != '0')\r\n df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)\r\n\r\n if 'tcp.flags.str' in df.columns:\r\n df['tcp.flags.str'] = df['tcp.flags.str'].str.encode(\"utf-8\") \r\n\r\n return df", "def table_to_dataframe(file):\n columns = ['instrument', 'dataset', 'flowcell', 'well', \n 'well_tile', 'cell', 'blob', 'position_i', 'position_j',\n 'read', 'quality']\n\n columns_drop = ['instrument', 'flowcell', 'dataset', 'well_tile']\n\n df = pd.read_csv(file, sep='\\s+', header=None, quoting=3)\n df.columns = columns\n df['tile'] = df['well_tile'] % 1000\n df = df.drop(columns_drop, axis=1)\n return df", "def stationlist(filename, verbose=1):\n from .support import message\n import numpy as np\n import pandas as pd\n\n try:\n infile = open(filename)\n tmp = infile.read()\n data = tmp.splitlines()\n message(\"Data read from:\", filename, verbose=verbose)\n except IOError as e:\n message(\"File not found: \" + filename, verbose=verbose)\n raise e\n else:\n infile.close()\n\n out = pd.DataFrame(columns=['id', 'wmo', 'lat', 'lon', 'alt', 'state', 'name', 'start', 'end', 'total'])\n\n for i, line in enumerate(data):\n id = line[0:11]\n\n try:\n id2 = \"%06d\" % int(line[5:11]) # substring\n\n except ValueError:\n id2 = \"\"\n\n lat = float(line[12:20])\n lon = float(line[21:30])\n alt = float(line[31:37])\n state = line[38:40]\n name = line[41:71]\n start = int(line[72:76])\n end = int(line[77:81])\n count = int(line[82:88])\n out.loc[i] = (id, id2, lat, lon, alt, state, name, start, end, count)\n\n message(\"Data processed\", i, verbose=verbose)\n out.loc[out.lon <= -998.8, 'lon'] = np.nan # repalce missing values\n out.loc[out.alt <= -998.8, 'alt'] = np.nan # repalce missing values\n out.loc[out.lat <= -98.8, 'lat'] = np.nan # replace missing values\n out['name'] = out.name.str.strip()\n out = out.set_index('id')\n return out", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def tire_data(tire_path):\n\n df = pd.read_csv(tire_path, index_col=0, sep=\";\", low_memory=False)\n\n return df", "def read_insitu_gas(cls, full_file_path):\n\n with open(full_file_path, 'r') as f:\n hlines = f.readline().rstrip().split(': ')[1]\n\n df = pd.read_csv(full_file_path, skiprows=int(hlines), skipinitialspace=True,\n delimiter=' ', header=None, names=['site', 'year', 'month', cls._gas_name])\n\n # set datetime index in df (requires 'day' column)\n df['day'] = 1\n df.set_index(pd.to_datetime(df[['year', 'month', 'day']]), inplace=True)\n\n return df", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def _pdread2astrotable(csvgzdir):\n df = pd.read_csv(csvgzdir)\n tb = Table.from_pandas(df)\n return tb", "def load_raw_data(path: str) -> pd.DataFrame:\n data = []\n with open(path) as file:\n for line in file:\n data.append(line)\n data_df = pd.DataFrame(data, columns = {'tweet'})\n return data_df", "def load(file):\n return pq.read_table(file).to_pandas()", "def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df", "def _csv2df(data_file):\n df = pd.read_csv(data_file, encoding=\"ISO-8859-1\", low_memory=False)\n return df", "def read_data(self):\n data = pd.read_table(self.file_dir, sep=\"\\t\", header=None)\n data.columns = [\"FromNodeId\", \"ToNodeId\"]\n return data" ]
[ "0.7591771", "0.685794", "0.6593989", "0.62999815", "0.6292323", "0.6279624", "0.6228682", "0.6220584", "0.61573863", "0.61280155", "0.611027", "0.6012557", "0.5959872", "0.5870615", "0.5867335", "0.5846116", "0.5834367", "0.58262587", "0.58182746", "0.581537", "0.57865024", "0.5772095", "0.5771084", "0.5771084", "0.5767674", "0.5766929", "0.57643604", "0.5722627", "0.57198524", "0.5701841" ]
0.70143044
1
Read an igra2 stationfile in ASCII format and convert to a Pandas DataFrame.
def igra2_ascii_to_dataframe(file=''): if debug: print("Running igra2_ascii_to_dataframe for: ", file) data = check_read_file(file=file, read=True) #source_file = [l for l in file.split('/') if '.txt' in l][0] read_data = [] # Lists containing the raw data from the ascii file, and the observation dates """ Data to be extracted and stored from the igra2 station files Some info is contained in the header of each ascent, some in the following data """ """ Initialize the variables that can be read from the igra2 files """ ident,year,month,day,hour,reltime,p_src,np_src,lat, lon = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan lvltyp1,lvltyp2,etime,press,pflag,gph,zflag,temp,tflag,rh,dpdep,wdir,wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan # initialize to zeros stations_id = [] idate = np.nan count = 0 head_count = 0 obs_id = 0 def make_release_time(date_time, hour, release): """ build a sonde release time ex 2019 02 20 00 2349 ex 2019 01 10 00 0011 They round the release time to the closest hour. It can be the same day or the following !!! date_time = date_time pytohn object, date, time, release = original strings """ release_h = int(release[:2]) release_m = int(release[2:4]) if release_h == 99: return 0 #largest integer number int 64 else: if release_m == 99: release_m = 0 release_date_time = date_time.replace(hour= release_h, minute= release_m) """ Here, I have to subtract one day to the release time stamp if the hour of the time stamp is in th evening, but the nominal time is reported at midnight hence in the following day. For example 2019 02 20 00 2349 from file VMM00048820 """ if hour == '00': if release_h > 20: release_date_time = release_date_time - timedelta(days=1) else: pass return release_date_time for i, line in enumerate(data): if line[0] == '#': head_count = head_count +1 # Info from the Header line of each ascent ident = line[1:12] # station identifier ident = ident[6:12] if ident not in stations_id: stations_id.append(ident) year = line[13:17] # year, months, day, hour of the observation month = line[18:20] day = line[21:23] hour = line[24:26] reltime = line[27:31] # release time of the sounding. numlev = int(line[32:36]) # number of levels in the sounding == number of data recorded in the ascent p_src = line[37:45] # data source code for the pressure levels np_src = line[46:54] # data source code for non-pressure levels lat = int(line[55:62]) / 10000. # latitude and longitude lon = int(line[63:71]) / 10000. #observation_id = i if int(hour) == 99: time = reltime + '00' else: time = hour + '0000' if '99' in time: time = time.replace('99', '00') idate = datetime.strptime(year + month + day + time, '%Y%m%d%H%M%S') # constructed according to CDM release_time = make_release_time(idate, hour, reltime) # making the release time iday = int(year + month + day) count = count + 1 else: # Data of each ascent lvltyp1 = int(line[0]) # 1- 1 integer major level type indicator lvltyp2 = int(line[1]) # 2- 2 integer minor level type indicator etime = int(line[3:8]) # 4- 8 integer elapsed time since launch press = int(line[9:15]) # 10- 15 integer reported pressure if press == -9999: press = np.nan pflag = line[15] # 16- 16 character pressure processing flag gph = int(line[16:21]) # 17- 21 integer geopotential height [m] if gph == -9999 or gph == -8888: # reading the values andh check if they are missing or removed as -9999 or -8888 before dividing by 10 as the instructions say gph = np.nan # 23- 27 integer temperature, [Celsius to Kelvin ] zflag = line[21] # 22- 22 character gph processing flag, temp = int(line[22:27]) if temp != -9999 and temp != -8888: # reading the values andh check if they are missing or removed as -9999 or -8888 before dividing by 10 as the instructions say temp = temp / 10. + 273.15 # 23- 27 integer temperature, [Celsius to Kelvin ] else: temp = np.nan tflag = line[27] # 28- 28 character temperature processing flag rh = int(line[28:33]) # 30- 34 integer relative humidity [%] if rh != -8888 and rh != -9999: rh = rh / 1000. # converting from percentage to absolute ratio else: rh = np.nan dpdp = int(line[34:39]) if dpdp != -9999 and dpdp !=-8888: dpdp = dpdp / 10. # 36- 40 integer dew point depression (degrees to tenth e.g. 11=1.1 C) else: dpdp = np.nan wdir = int(line[40:45]) # 41- 45 integer wind direction (degrees from north, 90 = east) if wdir == -8888 or wdir == -9999 : wdir = np.nan wspd = int(line[46:51]) # 47- 51 integer wind speed (meters per second to tenths, e.g. 11 = 1.1 m/s [m/s] if wspd != -8888 and wspd != -9999 : wspd = wspd / 10. else: wspd = np.nan if reltime == 9999.0: reltime = np.nan z_type = np.nan if not (np.isnan(press)): z_type = 1 elif (np.isnan(press) and not np.isnan(gph) ) : z_type = 2 for value,var in zip([gph, temp, wspd, wdir, rh, dpdp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity' , 'dew_point_depression'] ): obs_id = obs_id +1 if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1 z_type = 1 read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) ) elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 z_type = 2 read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) ) else: z_type = -2147483648 read_data.append ( ( 'IGRA2'.rjust(10), head_count, int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'], int(cdmvar_dic[var]['cdm_unit']), numlev, z_type, release_time ) ) df = pd.DataFrame(data= read_data, columns= column_names_igra2) df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length ) df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan) df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) # FF check here !!!! return df, stations_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uadb_ascii_to_dataframe(filename, **kwargs):\n import datetime\n import zipfile\n import gzip\n import os\n import io\n import numpy as np\n import pandas as pd\n from . import support as sp\n\n if not os.path.isfile(filename):\n raise IOError(\"File not Found! %s\" % filename)\n\n if '.zip' in filename:\n archive = zipfile.ZipFile(filename, 'r')\n inside = archive.namelist()\n tmp = archive.open(inside[0])\n tmp = io.TextIOWrapper(tmp, encoding='utf-8')\n tmp = tmp.read()\n archive.close()\n data = tmp.splitlines() # Memory (faster)\n elif '.gz' in filename:\n\n with gzip.open(filename, 'rt', encoding='utf-8') as infile:\n tmp = infile.read() # alternative readlines (slower)\n data = tmp.splitlines() # Memory (faster)\n else:\n with open(filename, 'rt') as infile:\n tmp = infile.read() # alternative readlines (slower)\n data = tmp.splitlines() # Memory (faster)\n\n raw = []\n headers = []\n dates = []\n nmiss = 0\n iprev = 0\n search_h = False\n i = 0\n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n ident = line[15:21] # WMO\n idflag = int(line[22:24]) # id flag\n d_src = int(line[25:28]) # source dataset\n version = float(line[29:34]) # version\n dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%2d\" % int(line[46:48])\n hour = line[49:53]\n locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n ele = float(line[79:85])\n stype = int(line[86:88])\n numlev = int(line[89:93])\n pvers = line[94:102]\n\n # wired stuff !?\n if '99' in hour:\n hour = hour.replace('99', '00')\n\n if '99' in day:\n search_h = True\n continue\n\n minutes = int(hour) % 100\n hour = \"%02d\" % (int(hour) // 100)\n\n if minutes > 60 or minutes < 0:\n minutes = 0\n\n elif minutes == 60:\n minutes = 59\n\n else:\n pass\n minutes = \"%02d\" % minutes\n idate = datetime.datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n headers.append((idate, usi, numlev, lat, lon, ele, stype))\n pday = int(day)\n search_h = False\n\n except Exception as e:\n print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n if kwargs.get('debug', False):\n raise e\n\n search_h = True\n iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n ltyp = int(line[0:4])\n press = float(line[5:13]) # hPa\n gph = float(line[14:22])\n temp = float(line[23:29]) # degree\n rh = float(line[30:36]) # %\n wdir = float(line[37:43])\n wspd = float(line[44:50]) # m/s\n raw.append((press, gph, temp, rh, wdir, wspd))\n dates.append(idate)\n\n sp.message(\"UADB Lines read:\", i, \"skipped:\", nmiss, \"Header:\", len(headers), **kwargs)\n\n out = pd.DataFrame(data=raw, index=dates, columns=['pres', 'gph', 'temp', 'rhumi', 'windd', 'winds'])\n out = out.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9], np.nan)\n # fix units\n out['pres'] *= 100. # need Pa\n out.index.name = 'date'\n headers = pd.DataFrame(data=headers, columns=['date', 'uid', 'numlev', 'lat', 'lon', 'alt', 'stype']).set_index(\n 'date')\n return out, headers", "def load_data(fpath: str, station: Dict[str, Any]) -> pd.DataFrame:\n df = pd.read_csv(\n fpath,\n skiprows=station['header_line_num']-1,\n usecols=['date', 'rain'],\n )\n\n # format the date from a string to a proper datetime object\n df['date'] = pd.to_datetime(df['date'])\n\n # extract year, month, week, and day to separate columns\n df['year'] = df['date'].dt.year\n df['month'] = df['date'].dt.month\n df['day'] = df['date'].dt.dayofyear\n df['week'] = df['date'].dt.weekofyear\n df['year_month'] = df['date'].dt.to_period('M')\n\n return df", "def uadb_ascii_to_dataframe(file=''): \n \n if debug:\n print(\"Running uadb_ascii_to_dataframe for: \", file) \n \n data = check_read_file(file=file, read=True) # TODO\n \n #source_file = [l for l in file.split('/') if '.txt' in l][0]\n\n nmiss = 0\n search_h = False \n read_data = []\n \n usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n\n #usi,idate, usi, lat, lon, lat, stype, press, gph, temp, rh, wdir, wspd, iday, ident, numlev= 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n obs_id = 0\n stations_id = [] \n \n for i, line in enumerate(data):\n if line[0] == 'H':\n try:\n # Header\n usi = int(line[2:14]) # unique station identifier\n \n ident = int(line[15:21].replace(' ',''))# WMO\n if ident not in stations_id:\n stations_id.append(ident)\n \n #if len(ident) == 4:\n # ident = '0' + ident \n #idflag = int(line[22:24]) # id flag\n #d_src = int(line[25:28]) # source dataset\n #version = float(line[29:34]) # version\n #dateflag = int(line[35:37]) # date flag\n year = line[38:42] # year\n month = \"%02d\" % int(line[43:45])\n day = \"%02d\" % int(line[46:48])\n hour = line[49:53]\n #locflag = int(line[54:56]) # Location Flag\n lat = float(line[57:67])\n lon = float(line[68:78])\n #ele = float(line[79:85])\n #stype = int(line[86:88])\n numlev = int(line[89:93])\n #pvers = line[94:102]\n\n if '99' in hour:\n hour = hour.replace('99', '00')\n \n if '99' in day:\n search_h = True\n continue\n \n minutes = int(hour) % 100 \n hour = \"%02d\" % (int(hour) // 100)\n if minutes > 60 or minutes < 0:\n minutes = 0\n minutes = \"%02d\" % minutes\n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day)\n #pday = int(day)\n search_h = False\n\n except Exception as e:\n #print(\"Error: \", i, line, repr(e), \"Skipping Block:\")\n search_h = True\n #iprev = i\n\n elif search_h:\n nmiss += 1\n continue # Skipping block\n\n else:\n # Data\n #ltyp = int(line[0:4])\n p = float(line[5:13])\n \n if p != -99999.0 and p != 9999.9: \n press = float(line[5:13])*100 # converting to Pa, since P is given in mb (1 mb = 1 hPa) \n else:\n press = np.nan \n \n gph = float(line[14:22]) # gph [m]\n \n if gph == -999.0 or gph == -99999.00 or gph >= 99999.0:\n gph = np.nan\n \n temp = float(line[23:29])\n if temp == -999.0:\n temp = np.nan \n else:\n temp = temp + 273.15\n \n rh = float(line[30:36]) # %\n if rh == -999.0:\n rh = np.nan\n else:\n rh = rh / 100. # convert to absolute ratio TODO\n\n wdir = float(line[37:43]) \n if wdir == -999.0 or wdir == -999 :\n wdir = np.nan\n \n wspd = float(line[44:50]) # [m/s], module of the velocity\n if wspd <0 :\n wspd = np.nan \n \n try:\n \n for value,var in zip([ gph, temp, wspd, wdir, rh], [ 'gph', 'temperature', 'wind_speed', 'wind_direction', 'relative_humidity'] ):\n obs_id = obs_id +1\n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n else:\n z_type = -2147483648 \n read_data.append( ( 'NCAR'.rjust(10), int(usi), int(obs_id), idate, iday, ident, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']), numlev , z_type) )\n \n except:\n 0\n \n \n \n #column_names = ['source_file', 'product_code', 'report_id', 'observation_id', 'report_timestamp' , 'iday', 'station_id', 'lat@hdr', 'lon@hdr', 'vertco_reference_1@body', 'obsvalue@body', 'varno@body' , 'units', 'number_of_pressure_levels' ]\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n #df['observations_id'] =numpy.char.zfill(numpy.arange(ivar.shape[0]).astype('S10'), 10)\n \n df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n #df['report_id'] = numpy.int64 (df['report_id'] ) \n #df['observation_id'] = numpy.int64 (df['observation_id'] ) \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n print('Done reading DF')\n return df , stations_id", "def read_pipe_table_to_pandas(filename):\n\n astropy_data = astropy.io.ascii.read(filename)\n data_stream = StringIO()\n astropy_data[2:].write(data_stream, format='ascii.basic', delimiter='|')\n data_stream.seek(0)\n return pandas.read_csv(data_stream,\n comment='#',\n sep='|',\n skipinitialspace=True)", "def readData(f):\n line = f.readline()\n fieldnames = [x.strip() for x in line.split(\",\")]\n line = f.readline().strip()\n data = []\n while line != \"\":\n if line[0] != \"#\":\n fields = line.split(\",\")\n data.append((fields[0], [extractSI(v)[0] for v in fields[1:]]))\n line = f.readline().strip()\n # Man, working out this next incantation out was non-trivial!\n # They really want you to be snarfing data in csv or some other format they understand!\n res = pd.DataFrame.from_items(data, columns=fieldnames[1:], orient=\"index\")\n return res", "def airport_file_to_df(self):\n\t\tdf = pd.read_csv(\n\t\t\tfilepath_or_buffer=os.path.join(ROOT_DIR, \"raw\", \"airports.csv\".format(self.year)),\n\t\t\tsep=\",\", encoding=\"utf-8\",\n\t\t\tusecols=[\"iata\", \"airport\", \"city\", \"state\", \"country\", \"lat\", \"long\"]\n\t\t)\n\n\t\treturn df", "def read_insitu_gas(cls, full_file_path):\n\n with open(full_file_path, 'r') as f:\n hlines = f.readline().rstrip().split(': ')[1]\n\n df = pd.read_csv(full_file_path, skiprows=int(hlines), skipinitialspace=True,\n delimiter=' ', header=None, names=['site', 'year', 'month', cls._gas_name])\n\n # set datetime index in df (requires 'day' column)\n df['day'] = 1\n df.set_index(pd.to_datetime(df[['year', 'month', 'day']]), inplace=True)\n\n return df", "def parse_station(station):\n if not station:\n return pd.DataFrame()\n header = get_header(station[0])\n header['ftime'] = get_fntime(station[1], station[2], header) \n df = get_rows(header, station)\n return df", "def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")", "def read(self):\n \n self.df = pd.read_csv(self.path, encoding = \"ISO-8859-1\")", "def read_iaga(fname, return_xyzf=True):\n # Get the header information\n header_records = read_iaga_header(fname)\n\n # This contains 4 letters stating the reported values\n # - XYZF, HDZF, etc...\n if len(header_records['reported']) % 4 != 0:\n raise ValueError(\"The header record does not contain 4 values: {0}\".format(\n header_records['reported']))\n\n record_length = len(header_records['reported']) // 4\n # This keeps the last letter of each reported channel\n column_names = [x for x in header_records['reported'][record_length-1::record_length]]\n # This splits the string into 4 equal parts.\n # Some reportings from USGS can be 3 letters each, rather than 1\n #column_names = [header_records['reported'][i:i+record_length]\n # for i in range(0, len(header_records['reported']), record_length)]\n\n df = pd.read_csv(fname, header=header_records[\"header_length\"],\n delim_whitespace=True,\n parse_dates=[[0,1]], infer_datetime_format=True,\n index_col=0, usecols=[0,1,3,4,5,6],\n na_values=[99999.90, 99999.0, 88888.80, 88888.00],\n names=[\"Date\", \"Time\"] + column_names)\n df.index.name = \"Time\"\n if (return_xyzf and\n \"X\" not in column_names and\n \"Y\" not in column_names):\n # Convert the data to XYZF format\n # Only convert HD\n if \"H\" not in column_names or \"D\" not in column_names:\n raise ValueError(\"Only have a converter for HDZF->XYZF\\n\" +\n \"Input file is: \" + header_records['reported'])\n\n # IAGA-2002 D is reported in minutes of arc.\n df[\"X\"] = df[\"H\"] * np.cos(np.deg2rad(df[\"D\"]/60.))\n df[\"Y\"] = df[\"H\"] * np.sin(np.deg2rad(df[\"D\"]/60.))\n del df[\"H\"], df[\"D\"]\n return df", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def bufr_to_dataframe(file=''):\n \n if debug:\n print(\"Running bufr_to_dataframe for: \", file)\n \n check_read_file (file = file, read= False)\n f = open(file)\n #source_file = [l for l in file.split('/') if '.bfr' in l][0]\n read_data = []\n \n \"\"\" Name of the columns as they will appear in the pandas dataframe (not necessarily CDM compliant) \"\"\"\n #column_names = ['report_timestamp' , 'iday', 'station_id', 'latitude', 'longitude', 'pressure', 'value','varno@body']\n \n lat, lon, alt, blockNumber, stationNumber, statid = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n \n obs_id, report_id = -1, 0 # progressive observation id\n stations_id = [] \n \n while 1:\n #lista = [] # temporary list\n bufr = codes_bufr_new_from_file(f)\n \n if bufr is None:\n break\n \n codes_set(bufr, 'unpack', 1) # eCcodes must expand all the descriptors and unpack the data section\n \n date = '19'+codes_get_array(bufr, \"typicalDate\")[0][2:]\n timePeriod = codes_get_array(bufr, \"typicalTime\")[0] \n \n year, month, day = date[0:4], date[4:6] , date[6:8]\n hour, minutes = timePeriod[0:2] , timePeriod[2:4]\n \n idate = datetime.strptime(year + month + day + hour + minutes, '%Y%m%d%H%M')\n iday = int(year + month + day )\n\n pressure = codes_get_array(bufr, \"pressure\") \n temperature = codes_get_array(bufr, \"airTemperature\") \n wind_direction = codes_get_array(bufr, \"windDirection\")\n wind_speed = codes_get_array(bufr, \"windSpeed\")\n \n try: # not all the bufr files have the dewpoint \n dew_point = codes_get_array(bufr, \"dewpointTemperature\")\n except:\n dew_point= np.empty((1, len(temperature)))\n dew_point[:] = np.nan\n \n num_lev = len(pressure) # number of distinct pressure levels \n \n try:\n geopotential = codes_get_array(bufr, \"nonCoordinateGeopotentialHeight\") \n except:\n geopotential = np.full( (1,len(temperature)) , np.nan )[0,:]\n \n if report_id == 0:\n ''' Check again but these values should remain the same for all cnt, so it makes no sense to read them every time '''\n lat = codes_get(bufr, \"latitude\")\n lon = codes_get(bufr, \"longitude\")\n alt = float(codes_get(bufr, \"heightOfStation\"))\n blockNumber = codes_get(bufr, \"blockNumber\")\n stationNumber = codes_get(bufr, \"stationNumber\")\n #statid = str(blockNumber*1000+stationNumber) # changed to int instead of str\n statid = blockNumber*1000+stationNumber\n if statid not in stations_id:\n stations_id.append(statid) \n \n codes_release(bufr)\n \n miss_value = -1.e100 \n \n for i in range(len(temperature)):\n obs_id = obs_id + 1 \n airT = temperature[i]\n winds = wind_speed[i]\n windd = wind_direction[i]\n press = pressure[i]\n gph = geopotential[i]\n dp = dew_point[i]\n if press == miss_value:\n press = np.nan \n if dp == miss_value:\n dp = np.nan\n if airT == miss_value : # replacing none values with numpy nans\n airT = np.nan \n if winds == miss_value:\n winds = np.nan\n if gph == miss_value:\n gph = np.nan \n if windd == 2147483647 or windd == -2147483647:\n windd = np.nan \n \n \n for value,var in zip( [gph, airT, winds, windd, dp], ['gph', 'temperature', 'wind_speed', 'wind_direction', 'dew_point'] ):\n obs_id = obs_id + 1 \n if not np.isnan(press): # when pressure is available, z_coord== pressure and z_type==1\n z_type = 1 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n elif (np.isnan(press) and not np.isnan(gph) ) : # when pressure is not available, z_coord== gph and z_type==2 \n z_type = 2 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, gph, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n else:\n z_type = -2147483648 \n read_data.append( ( 'BUFR'.rjust(10), report_id, int(obs_id), idate, iday, statid, lat, lon, press, value, cdmvar_dic[var]['cdm_var'] , int(cdmvar_dic[var]['cdm_unit']) , num_lev , z_type ) ) \n\n\n report_id += 1\n \n df = pd.DataFrame(data= read_data, columns= column_names) \n \n df['observation_id'] = np.chararray.zfill( (df['observation_id'].astype(int)) .astype('S'+str(id_string_length ) ), id_string_length ) #converting to fixed length bite objects \n df['report_id'] = np.chararray.zfill( (df['report_id'].astype(int)).astype ('S'+str(id_string_length ) ), id_string_length )\n \n df = df.replace([-999.9, -9999, -999, -999.0, -99999.0, -99999.9, 99999.0, -99999.00 ], np.nan)\n \n df = df.sort_values(by = ['record_timestamp', 'vertco_reference_1@body' ] ) \n \n return df, stations_id", "def _read_antti_stations(station_file):\n if station_file.split('.')[-1] == 'gz':\n ff = gzip.open(station_file, 'r')\n else:\n ff = open(station_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # extract and convert single line with observatory IDs\n obsList = []\n llList = []\n incList = []\n nObs = 0\n nLL = 0\n nInc = 0\n for line in sIO:\n if re.search(b\"^%\", line):\n # skip comments\n continue\n\n if re.search(br\"^\\s*$\", line):\n # skip blank lines\n continue\n\n # first line of consequence should be a list of quoted strings holding\n # observatory IDs for observatories considered in this solution; convert\n # to a list of strings\n if len(obsList) == 0:\n obsList = re.sub(b'\\'', b'', line).split()\n nObs = len(obsList)\n continue\n\n # assume next nobs lines read are observatory locations\n if nLL < nObs:\n llList.append([float(elem) for elem in line.decode().split()])\n nLL = nLL+1\n continue\n\n # assume next nobs lines read are observatory inclusion (boolean) lists\n if nInc < nObs:\n #incList.append(line.strip())\n incList.append([int(elem) for elem in line.decode().strip()])\n nInc = nInc+1\n continue\n\n # close sIO\n sIO.close()\n\n if len(llList) > 2:\n obsLat, obsLon, obsRad = list(zip(*llList))\n elif len(llList) == 2:\n obsLat, obsLon = list(zip(*llList))\n obsRad = np.ones(obsLat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n obsInc = list(zip(*incList))\n\n return (np.array(obsLat), np.array(obsLon), np.array(obsRad),\n np.array(obsInc), np.array(obsList))", "def readSolarData(filename):\n\treturn pd.read_csv(filename)", "def read_data(filename):\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T", "def tire_data(tire_path):\n\n df = pd.read_csv(tire_path, index_col=0, sep=\";\", low_memory=False)\n\n return df", "def stationlist(filename, verbose=1):\n from .support import message\n import numpy as np\n import pandas as pd\n\n try:\n infile = open(filename)\n tmp = infile.read()\n data = tmp.splitlines()\n message(\"Data read from:\", filename, verbose=verbose)\n except IOError as e:\n message(\"File not found: \" + filename, verbose=verbose)\n raise e\n else:\n infile.close()\n\n out = pd.DataFrame(columns=['id', 'wmo', 'lat', 'lon', 'alt', 'state', 'name', 'start', 'end', 'total'])\n\n for i, line in enumerate(data):\n id = line[0:11]\n\n try:\n id2 = \"%06d\" % int(line[5:11]) # substring\n\n except ValueError:\n id2 = \"\"\n\n lat = float(line[12:20])\n lon = float(line[21:30])\n alt = float(line[31:37])\n state = line[38:40]\n name = line[41:71]\n start = int(line[72:76])\n end = int(line[77:81])\n count = int(line[82:88])\n out.loc[i] = (id, id2, lat, lon, alt, state, name, start, end, count)\n\n message(\"Data processed\", i, verbose=verbose)\n out.loc[out.lon <= -998.8, 'lon'] = np.nan # repalce missing values\n out.loc[out.alt <= -998.8, 'alt'] = np.nan # repalce missing values\n out.loc[out.lat <= -98.8, 'lat'] = np.nan # replace missing values\n out['name'] = out.name.str.strip()\n out = out.set_index('id')\n return out", "def read_metadata_txt(path):\n df = pd.read_csv(path,\n sep='\\s+', # Fields are separated by one or more spaces\n usecols=[0, 1, 2, 3, 4], # Grab only the first 4 columns\n # Missing elevation is noted as -999.9\n na_values=[-999.9],\n header=None,\n names=['station_id', 'latitude', 'longitude', 'elevation', 'state'])\n return df", "def open_igra_metadata(filename):\n import pandas as pd\n infos = \"\"\"\n IGRAID 1- 11 Character\n WMOID 13- 17 Integer\n NAME 19- 48 Character\n NAMFLAG 50- 50 Character\n LATITUDE 52- 60 Real\n LATFLAG 62- 62 Character\n LONGITUDE 64- 72 Real\n LONFLAG 74- 74 Character\n ELEVATION 76- 81 Real\n ELVFLAG 83- 83 Character\n YEAR 85- 88 Integer\n MONTH 90- 91 Integer\n DAY 93- 94 Integer\n HOUR 96- 97 Integer\n DATEIND 99- 99 Integer\n EVENT 101-119 Character\n ALTIND 121-122 Character\n BEFINFO 124-163 Character\n BEFFLAG 164-164 Character\n LINK 166-167 Character\n AFTINFO 169-208 Character\n AFTFLAG 209-209 Character\n REFERENCE 211-235 Character\n COMMENT 236-315 Character\n UPDCOM 316-346 Character\n UPDDATE 348-354 Character\n \"\"\"\n import numpy as np\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n elif it == 'Real':\n it = 'float'\n else:\n it = 'int'\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data", "def load_stationfile(filename=None):\n \n data={} \n\n if filename==None:\n print('load_stationfile requires a filename to load.')\n return\n try:\n fp=open(filename,'r')\n except IOError:\n print('load_stationfile: invalid filename.')\n return data\n\n headerstr=fp.readline()\n data_str=np.genfromtxt(filename,skip_header=1,dtype=str)\n fp.close()\n\n data['header']=headerstr\n data['station_num']=data_str[:,0].astype(np.int32)\n data['cell']=data_str[:,3].astype(np.int32)\n data['x']=data_str[:,1].astype(np.float64)\n data['y']=data_str[:,2].astype(np.float64)\n data['h']=data_str[:,4].astype(np.float64)\n data['station_name'] = data_str[:,5]\n \n return data", "def df_from_fits(filename, i=1):\n return pd.DataFrame.from_records(fitsio.FITS(filename)[i].read().byteswap().newbyteorder())", "def load_swc(file_name):\n\n df = pd.read_csv(file_name, delimiter=' ', header=None, comment='#',\n names=['sample', 'identifier', 'x', 'y', 'z', 'r', 'parent'],\n skipinitialspace=True).astype({'sample':int,'identifier':int,'x':float,'y':float,'z':float,'r':float,'parent':int})\n return df", "def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data", "def stateToDf(statefile):\n return pd.read_csv(statefile,\n compression='gzip',\n sep=' ',\n skiprows=[1,2]\n )", "def _csv2df(data_file):\n df = pd.read_csv(data_file, encoding=\"ISO-8859-1\", low_memory=False)\n return df", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def load_data(txt_path: str = RAW_TXT) -> pd.DataFrame:\n df = pd.read_csv(txt_path)[INDICES]\n return df", "def read_lexicon_into_df(lex_txt_file):\n data = []\n with open(lex_txt_file) as txtf:\n lines = txtf.readlines()\n for line in lines:\n root = re.search(r\"root='(.*?)'\", line).group(1)\n if root.startswith('0'):\n num_radicals = 3\n else:\n num_radicals = 4\n verb_class = re.search(r\"class='(.*?)'\", line).group(1)\n verb_type = re.search(r\"type='(.*?)'\", line).group(1)\n infinitive = re.search(r\"inf='(.*?)'\", line).group(1)\n languages = re.search(r\"lang='(.*?)'\", line).group(1)\n gloss = re.search(r\"gloss='(.*?)'\", line).group(1)\n\n data.append([root, num_radicals, verb_class, verb_type, infinitive, languages, gloss])\n\n lexicon_df = pd.DataFrame(data, columns=['root', 'num_radicals', 'class', 'type', 'infinitive', 'languages', 'gloss'])\n\n lexicon_df['root'] = lexicon_df['root'].str.replace(\"0\", \"\")\n lexicon_df = utify_chars(lexicon_df)\n lexicon_df.to_csv('babylex.csv')\n return lexicon_df", "def read_imp_ASCII(filename):\n\n # create a temporary directory\n tmpDir = tempfile.mkdtemp()\n\n # unzip filename to tmpDir\n with zipfile.ZipFile(filename, 'r') as inZip:\n inZip.extractall(tmpDir)\n\n # set filenames\n dt_file = os.path.join(tmpDir, 'DateTime.txt')\n location_file = os.path.join(tmpDir, 'LatLon.txt')\n bx_file = os.path.join(tmpDir, 'BX.txt')\n by_file = os.path.join(tmpDir, 'BY.txt')\n bz_file = os.path.join(tmpDir, 'BZ.txt')\n obx_file = os.path.join(tmpDir, 'obsBX.txt')\n oby_file = os.path.join(tmpDir, 'obsBY.txt')\n obz_file = os.path.join(tmpDir, 'obsBZ.txt')\n station_file = os.path.join(tmpDir, 'Stations.txt')\n\n DT = _read_antti_datetime(dt_file)\n\n Lat, Lon, Rad, Label = _read_antti_location(location_file)\n\n BX = _read_antti_component(bx_file)\n BY = _read_antti_component(by_file)\n BZ = _read_antti_component(bz_file)\n\n obsX = _read_antti_component(obx_file)\n obsY = _read_antti_component(oby_file)\n obsZ = _read_antti_component(obz_file)\n\n obsLat, obsLon, obsRad, obsInc, obsID = _read_antti_stations(station_file)\n\n shutil.rmtree(tmpDir)\n\n return (DT, (Lat, Lon, Rad), BX, BY, BZ, Label,\n (obsLat, obsLon, obsRad), obsX, obsY, obsZ, obsInc, obsID)" ]
[ "0.6976135", "0.6776106", "0.6681584", "0.6436349", "0.6297956", "0.6267564", "0.6266623", "0.62274367", "0.6124879", "0.61233276", "0.60146654", "0.60051876", "0.59578943", "0.5896764", "0.5883741", "0.58122426", "0.5811471", "0.5803112", "0.5794453", "0.5778825", "0.574811", "0.5730776", "0.5710127", "0.5694994", "0.5668811", "0.56585", "0.56451845", "0.5640935", "0.5634102", "0.56173515" ]
0.72274184
0
build a sonde release time ex 2019 02 20 00 2349 ex 2019 01 10 00 0011 They round the release time to the closest hour. It can be the same day or the following !!! date_time = date_time pytohn object, date, time, release = original strings
def make_release_time(date_time, hour, release): release_h = int(release[:2]) release_m = int(release[2:4]) if release_h == 99: return 0 #largest integer number int 64 else: if release_m == 99: release_m = 0 release_date_time = date_time.replace(hour= release_h, minute= release_m) """ Here, I have to subtract one day to the release time stamp if the hour of the time stamp is in th evening, but the nominal time is reported at midnight hence in the following day. For example 2019 02 20 00 2349 from file VMM00048820 """ if hour == '00': if release_h > 20: release_date_time = release_date_time - timedelta(days=1) else: pass return release_date_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_time(date_time):\n\n time_hour = int(date_time.strftime('%H'))\n\n quotient = int(time_hour / 4)\n\n if quotient == 5:\n date_time = datetime.combine(date_time.date()+timedelta(1), time(0,0))\n else:\n date_time = datetime.combine(date_time.date(), time((quotient+1)*4,0))\n \n return date_time", "def write_pkg_release_date_time(pldm_fw_up_pkg, release_date_time):\n time = release_date_time.time()\n date = release_date_time.date()\n us_bytes = time.microsecond.to_bytes(3, byteorder=\"little\")\n pldm_fw_up_pkg.write(\n struct.pack(\n \"<hBBBBBBBBHB\",\n 0,\n us_bytes[0],\n us_bytes[1],\n us_bytes[2],\n time.second,\n time.minute,\n time.hour,\n date.day,\n date.month,\n date.year,\n 0,\n )\n )", "def release_time(self) -> str:\n return pulumi.get(self, \"release_time\")", "def up_date(dte, r_quant, str_unit, bln_post_colon):\n if str_unit == 'w':\n dte += timedelta(weeks=r_quant)\n elif str_unit == 'd':\n dte += timedelta(days=r_quant)\n elif str_unit == 'h':\n dte += timedelta(hours=r_quant)\n elif str_unit == 'm':\n dte += timedelta(minutes=r_quant)\n elif str_unit in ('Y', 'y'):\n if r_quant > 500: # jul 2019 vs jul 17\n r_year = r_quant\n else:\n r_year = datetime.now().year + r_quant\n try:\n dte = datetime.replace(dte, year=int(r_year))\n except ValueError:\n dte = datetime.replace(dte, day=28, month=2,\n year=int(datetime.now().year + r_quant))\n elif str_unit == 'H':\n dte = datetime.replace(dte, hour=int(r_quant), second=0, microsecond=0)\n elif str_unit == 'M':\n dte = datetime.replace(dte, minute=int(r_quant),\n second=0, microsecond=0)\n elif str_unit == 'a':\n if not bln_post_colon:\n dte = datetime.replace(dte, hour=int(r_quant), minute=0,\n second=0, microsecond=0)\n elif str_unit == 'p':\n if bln_post_colon: # adjust by 12 hours if necessary\n if dte.hour < 12:\n dte = datetime.replace(dte, hour=dte.hour+12)\n else:\n p_quant = r_quant\n if p_quant < 12:\n p_quant += 12\n dte = datetime.replace(dte, hour=int(p_quant), minute=0,\n second=0, microsecond=0)\n elif (len(str_unit) >= 3) and (STR_MONTHS.find(str_unit) != -1):\n dte = datetime.replace(dte, month=(STR_MONTHS.index(str_unit) + 3)/3,\n day=int(r_quant), second=0, microsecond=0)\n # refers to this year or next year ? (assume not past)\n dte_today = datetime.today().replace(hour=0, minute=0, \\\n second=0, microsecond=0)\n if dte < dte_today:\n dte = dte.replace(year=(dte_today.year+1))\n return dte", "def get_nightly_start_time():\n return 14 # 2PM local Tucson time", "def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc", "def postpone(self, dlt_time, ky_word):\n if ky_word == 'hour':\n self.work_datetime = self.work_datetime + tdelta(seconds=dlt_time * 3600)\n elif ky_word == 'day':\n self.work_datetime = self.work_datetime + tdelta(days=dlt_time)\n elif ky_word == 'week':\n self.work_datetime = self.work_datetime + tdelta(weeks=dlt_time)\n elif ky_word == 'month':\n self.work_datetime = self.work_datetime + tdelta(days=dlt_time * 30)\n self.eisenhower_priority()\n return self.work_datetime", "def format_build_time(duration):\n return str(datetime.timedelta(seconds=int(duration)))", "def plastic_date():\n return 'Zun, 99 Zun 9999 99:61:61'", "def release_date(self):\n for item in self.proto.releaseInfo.item:\n if item.label == 'Released on':\n return item.container.value", "def CalculateDateTime(time):\n time = str(time)\n date = time.split(' ')[0]\n splitted_date = date.split('-')\n day = splitted_date[2]\n month = splitted_date[1]\n year = splitted_date[0]\n time = time.split(' ')[1]\n hour = time.split(':')[0]\n minute = time.split(':')[1]\n updated_hour = int(hour) + 1\n if updated_hour > 12:\n updated_hour = updated_hour - 12\n fulldate = day + \" \" + month + \" \" + year + \" | \" + str(updated_hour) + \":\" + minute\n return fulldate\n else:\n fulldate = day + \" - \" + month + \" - \" + year + \" | \" + str(updated_hour) + \":\" + minute\n return fulldate", "def infer_release_date(tagname: str) -> Optional[datetime]:\n if tagname in RELEASE_DATES:\n return RELEASE_DATES[tagname]\n elif tagname[0] == \"w\" and tagname < \"w_2020_43\":\n # Weeklies used to be reliably produced on Saturdays, but that changed\n # in October of 2020.\n return datetime.strptime(tagname + \"_6\", \"w_%G_%V_%u\")\n else:\n return None", "def round_time(self, time):\n hour, mins, _ = time.split(\":\")\n return '{:02d}:00:00'.format(int(hour)+1 ) if int(mins) >= 30 else '{:02d}:00:00'.format(int(hour))", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)", "def _ToBlogTime(self, time_tuple):\r\n return time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple)", "def GetBuildDate(build_type, utc_now):\n day = utc_now.day\n month = utc_now.month\n year = utc_now.year\n if build_type != 'official':\n first_sunday = GetFirstSundayOfMonth(year, month)\n # If our build is after the first Sunday, we've already refreshed our build\n # cache on a quiet day, so just use that day.\n # Otherwise, take the first Sunday of the previous month.\n if day >= first_sunday:\n day = first_sunday\n else:\n month -= 1\n if month == 0:\n month = 12\n year -= 1\n day = GetFirstSundayOfMonth(year, month)\n now = datetime.datetime(\n year, month, day, utc_now.hour, utc_now.minute, utc_now.second)\n return '{:%b %d %Y %H:%M:%S}'.format(now)", "def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim", "def get_video_title_releaser_release_time(self, url):\n video_id = ' '.join(re.findall('id.*html', url))\n browser = webdriver.Chrome()\n browser.get(url)\n title = browser.find_element_by_id('subtitle').text\n releaser = browser.find_element_by_id('module_basic_sub').text\n releaser = releaser.replace('+订阅','')\n releaser = releaser.replace(' ','')\n try:\n rt_midstep = browser.find_element_by_class_name('video-status').text\n rt_midstep = rt_midstep.replace('上传于','')\n rt_midstep = rt_midstep.replace(' ','')\n release_time = int(datetime.datetime.strptime(rt_midstep,'%Y-%m-%d').timestamp()*1e3)\n except:\n release_time = 0\n fetch_time = int(datetime.datetime.timestamp(datetime.datetime.now())*1e3)\n D0 = {'video_id': video_id,\n 'title': title,\n 'release_time': release_time,\n 'url': url,\n 'fetch_time': fetch_time}\n return D0", "def Time(row):\r\n try:\r\n timeadd = dt.datetime.strptime(row['TickIssueTime'], '%H:%M').time()\r\n except:\r\n timeadd = dt.datetime.strptime('00:00', '%H:%M').time()\r\n\r\n newtime = dt.datetime.combine(dt.datetime.strptime(row['TickIssueDate'], '%Y-%m-%d %H:%M:%S') , timeadd)\r\n return newtime", "def time_input():\n \n year = 2020\n month = 3 # number \n day = 12 # number in month\n hour = 12 # integer between 9 (= 9:00AM) and 17 (= 4:00PM) ## CHECK THIS\n minute = 0 # float between 0 (= 0 min) to 0.983 = 59 min)\n \n date=dt.datetime(year,month,day)\n time = date.timetuple().tm_yday\n time = time + hour/24 + minute/24/60\n \n return year, time", "def main():\n date_time_conversion('2018-12-30T09:37:56.000001Z', '2020-07-12T07:56:43.000001Z', 0, 0, 0, 0)", "def round_up_to_quarter_hour(self, dt: datetime) -> str:\n delta = timedelta(minutes=15)\n # Round time backwards to the hour\n rounded_hour = dt.replace(minute=0, second=0, microsecond=0)\n rounded_qtr_hour = rounded_hour + ceil((dt - rounded_hour) / delta) * delta\n return self.date_to_intermediate_time_str(rounded_qtr_hour)", "def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg", "def make_outstr_bp( mjd, utc_tstart_dt, utc_tend_dt, zenith_midtime, airmass, \\\n trtype, moonpos, moondist, moonphase ):\n\n mjd_str = '{0:.2f}'.format( mjd ).center( 8 )\n \n utc_tstart_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tstart_dt.year, \\\n utc_tstart_dt.month, \\\n utc_tstart_dt.day, \\\n utc_tstart_dt.hour, \\\n utc_tstart_dt.minute, \\\n utc_tstart_dt.second )\n utc_tstart_str = utc_tstart_str.center( 19 )\n \n utc_tend_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tend_dt.year, \\\n utc_tend_dt.month, \\\n utc_tend_dt.day, \\\n utc_tend_dt.hour, \\\n utc_tend_dt.minute, \\\n utc_tend_dt.second )\n utc_tend_str = utc_tend_str.center( 19 )\n\n zenith_str = '{0:d}'.format( int( np.round( zenith_midtime ) ) ).center( 6 )\n airmass_str = '{0:.2f}'.format( airmass ).center( 4 )\n trtype_str = trtype.center( 21 )\n moonpos_str = moonpos.center( 12 )\n moondist_str = moondist.center( 9 )\n moonphase_str = moonphase.center( 10 )\n outstr = ' {0} {1} {2} {3} {4} {5} {6} {7} {8}\\n'\\\n .format( mjd_str, \\\n utc_tstart_str, \\\n utc_tend_str, \\\n zenith_str, \\\n airmass_str, \\\n trtype_str, \\\n moonpos_str, \\\n moondist_str, \\\n moonphase_str )\n \n return outstr", "def make_date_str_from_dt64(dt64_time, output_freq_code):\n\n print(dt64_time)\n date_str_full = str(dt64_time)[0:19].replace(':','')\n year = date_str_full[0:4]\n month = date_str_full[5:7]\n day = date_str_full[8:10]\n hour = date_str_full[11:13]\n\n print(year, month, day, hour)\n ppp_tttt = \"\"\n date_str_short =\"\"\n\n if output_freq_code == 'AVG_MON':\n date_str_short = str(np.datetime64(dt64_time,'M'))\n ppp_tttt = 'mon_mean'\n\n # --- AVG DAY\n elif output_freq_code == 'AVG_DAY':\n date_str_short = str(np.datetime64(dt64_time,'D'))\n ppp_tttt = 'day_mean'\n\n # --- SNAPSHOT\n elif 'SNAP' in output_freq_code:\n # convert from oroginal\n # '1992-01-16T12:00:00.000000000'\n # to new format\n # '1992-01-16T120000'\n date_str_short = str(dt64_time)[0:19].replace(':','')\n ppp_tttt = 'snap'\n\n date_str = dict()\n date_str['full'] = date_str_full\n date_str['short'] = date_str_short\n date_str['year'] = year\n date_str['month'] = month\n date_str['day'] = day\n date_str['hour'] = hour\n date_str['ppp_tttt'] = ppp_tttt\n\n return date_str", "def make_outstr_ch( target, mjd, utc_tstart_dt, utc_tend_dt, zenith_midtime, airmass, \\\n trtype, moonpos, moondist, moonphase ):\n\n target_str = target.replace( ' ', '' ).rjust( 11 )\n mjd_str = '{0:.2f}'.format( mjd ).center( 8 )\n \n utc_tstart_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tstart_dt.year, \\\n utc_tstart_dt.month, \\\n utc_tstart_dt.day, \\\n utc_tstart_dt.hour, \\\n utc_tstart_dt.minute, \\\n utc_tstart_dt.second )\n utc_tstart_str = utc_tstart_str.center( 19 )\n \n utc_tend_str = '{0:04d}:{1:02d}:{2:02d}:{3:02d}:{4:02d}:{5:02d}'\\\n .format( utc_tend_dt.year, \\\n utc_tend_dt.month, \\\n utc_tend_dt.day, \\\n utc_tend_dt.hour, \\\n utc_tend_dt.minute, \\\n utc_tend_dt.second )\n utc_tend_str = utc_tend_str.center( 19 )\n\n zenith_str = '{0:d}'.format( int( np.round( zenith_midtime ) ) ).center( 6 )\n airmass_str = '{0:.2f}'.format( airmass ).center( 4 )\n trtype_str = trtype.center( 21 )\n moonpos_str = moonpos.center( 12 )\n moondist_str = moondist.center( 9 )\n moonphase_str = moonphase.center( 10 )\n outstr = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}\\n'\\\n .format( target_str, \\\n mjd_str, \\\n utc_tstart_str, \\\n utc_tend_str, \\\n zenith_str, \\\n airmass_str, \\\n trtype_str, \\\n moonpos_str, \\\n moondist_str, \\\n moonphase_str )\n \n return outstr", "def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\"", "def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"", "def toPloneboardTime(context, request, time_=None):\n ploneboard_time = None\n ts = getToolByName(context, 'translation_service')\n\n format = '%Y;%m;%d;%w;%H;%M;%S'\n\n # fallback formats, english\n young_format_en = '%A %H:%M'\n old_format_en = '%B %d. %Y'\n\n\n if not time_:\n return 'Unknown date'\n\n if callable(time_):\n time_ = time_()\n\n try:\n if isinstance(time_, DateTime):\n time_ = datetime.fromtimestamp(time_.timeTime())\n else:\n time_ = dateparse(str(time_))\n\n (year, month, day,\n hours, minutes, seconds, wday, _, dst) = time_.timetuple()\n\n translated_date_elements = {\n 'year': year,\n 'month': unicode(\n defer(\n translate,\n _locales(ts.month_msgid(month)),\n context=request\n )\n ),\n 'day': day,\n 'wday': unicode(\n defer(\n translate,\n _locales(ts.day_msgid((wday + 1) % 7)),\n context=request\n )\n ),\n 'hours': \"%02i\" % hours,\n 'minutes': \"%02i\" % minutes,\n 'seconds': \"%02i\" % seconds\n }\n\n if time.time() - time.mktime(time_.timetuple()) < 604800: # 60*60*24*7\n ploneboard_time = translate(\n '${wday} ${hours}:${minutes}',\n mapping=translated_date_elements,\n context=request\n )\n else:\n ploneboard_time = translate(\n _plone(\n 'old_date_format: ${year} ${month} ${day} '\n '${hours}:${minutes}',\n default=unicode(\n time_.strftime(old_format_en).decode('utf-8')\n ),\n mapping=translated_date_elements\n ),\n context=request\n )\n except IndexError:\n pass\n\n return ploneboard_time" ]
[ "0.72464585", "0.60484743", "0.59548783", "0.5717549", "0.5513829", "0.5408672", "0.53816426", "0.5347346", "0.53351736", "0.53103334", "0.5297417", "0.52803713", "0.5220621", "0.5219593", "0.5197299", "0.51796794", "0.51562935", "0.5153432", "0.5144711", "0.5126217", "0.5102053", "0.5096154", "0.50932777", "0.50899434", "0.508722", "0.508621", "0.5078612", "0.5060174", "0.5042356", "0.5038964" ]
0.79936635
0
Create the header from the odb file, if not found in the 'headers/' directory. Headers contain the columsn names and their respective variable types. Only for ODB files.
def make_odb_header(odbfile, dataset): header = 'headers/' + dataset + '_header.dat' if not os.path.isfile ( header ): print(' Creating the header file for the dataset: ', dataset ) if dataset in ('era5_1','era5_2'): odbfile = odbfile.replace('.gz','') else: odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.') rdata=subprocess.check_output(["odb","header", odbfile ]) with open( header , 'wb' ) as f: f.write(rdata) f = open(header , 'rb') rdata=f.read() rdata=rdata.decode('utf-8').split('\n') else: f = open(header , 'rb') rdata=f.read() rdata=rdata.decode('utf-8').split('\n') #print(' Done reading the existing header file for the dataset: ', dataset ) columns, kinds, tdict =[] , [] , {} for r in rdata[2:-2]: try: if r[:6]=='Header': break else: columns.append(r.split('name: ')[1].split(',')[0]) kinds.append(r.split('type: ')[1].split(',')[0]) if kinds[-1]=='REAL': tdict[columns[-1]]=numpy.float32 elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]: #print(columns[-1]) if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv': tdict[columns[-1]]=numpy.float32 else: tdict[columns[-1]]=numpy.int32 else: tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type except IndexError: pass """ This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list. So the following call alldict=pd.read_csv(f,delimiter='\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks """ for t in tdict.keys(): if t not in columns: #print("Removing non appearing fb column: " , c) del tdict[t] """ These values must be removed rom the fb, since they have NULL values and it creates problem with alldict=pd.read_csv(f,delimiter='\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) """ if dataset in ["era5_1759", "era5_1761", "era5_3188"]: remove = ['sonde_type@conv' , "eda_spread@errstat", "bias_volatility@body" , "timeseries_index@conv"] for c in remove: #print("Removing wrong fb column: " , c) try: columns.remove(c) del tdict[c] except: pass return columns, kinds, tdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def test_header_file_simple(suffix: str) -> None:\n path = rsc / header_file\n df = read_ods(path.with_suffix(suffix))\n\n assert isinstance(df, pd.DataFrame)\n assert len(df) == 10\n assert len(df.columns) == 5", "def _ReadFileHeader(self, file_object):\n data_type_map = self._GetDataTypeMap('recycle_bin_metadata_file_header')\n\n file_header, _ = self._ReadStructureFromFileObject(\n file_object, 0, data_type_map, 'file header')\n\n if self._debug:\n debug_info = self._DEBUG_INFORMATION.get(\n 'recycle_bin_metadata_file_header', None)\n self._DebugPrintStructureObject(file_header, debug_info)\n\n if file_header.format_version not in self._SUPPORTED_FORMAT_VERSION:\n raise errors.ParseError(\n f'Unsupported format version: {file_header.format_version:d}')\n\n return file_header", "def read_header(fobj, endian=''): \n\n # read the header\n lstr = fobj.read(4)\n if lstr == '':\n raise EOFError('read_header: EOF encountered at start of header read')\n (lmap,) = struct.unpack(endian + 'i', lstr)\n \n head = subs.Odict()\n for i in xrange(lmap):\n name = read_string(fobj, endian)\n (itype,) = struct.unpack(endian + 'i', fobj.read(4))\n comment = read_string(fobj, endian)\n \n if itype == 0: # double\n (value,) = struct.unpack(endian + 'd', fobj.read(8))\n elif itype == 1: # char\n raise CppError('read_header: char not enabled')\n elif itype == 2: # int\n (value,) = struct.unpack(endian + 'i', fobj.read(4))\n elif itype == 3: # uint\n raise CppError('read_header: uint not enabled')\n elif itype == 4: # lint\n raise CppError('read_header: linit not enabled')\n elif itype == 5: # ulint\n raise CppError('read_header: ulint not enabled')\n elif itype == 6: # float\n (value,) = struct.unpack(endian + 'f', fobj.read(4))\n elif itype == 7: # string\n value = read_string(fobj, endian)\n elif itype == 8: # bool\n (value,) = struct.unpack(endian + 'B', fobj.read(1))\n elif itype == 9: # directory\n value = subs.Odict()\n elif itype == 10: # date\n raise CppError('read_header: date not enabled')\n elif itype == 11: # time\n (mjd,) = struct.unpack(endian + 'i', fobj.read(4))\n (hour,) = struct.unpack(endian + 'd', fobj.read(8))\n value = (mjd, hour)\n elif itype == 12: # position\n value = subs.Odict()\n (value['RA'],) = struct.unpack(endian + 'd', fobj.read(8))\n (value['Dec'],) = struct.unpack(endian + 'd', fobj.read(8))\n value['System'] = 'ICRS'\n (value['Epoch'],) = struct.unpack(endian + 'd', fobj.read(8))\n (value['PmRA'],) = struct.unpack(endian + 'f', fobj.read(4))\n (value['PmDec'],) = struct.unpack(endian + 'f', fobj.read(4))\n (value['Parallax'],) = struct.unpack(endian + 'f', fobj.read(4))\n (value['RV'],) = struct.unpack(endian + 'f', fobj.read(4))\n elif itype == 13: # dvector\n raise CppError('read_header: dvector not enabled')\n elif itype == 14: # uchar\n (value,) = struct.unpack(endian + 'c', fobj.read(1))\n elif itype == 15: # telescope\n tname = read_string(fobj, endian)\n sname = read_string(fobj, endian)\n (longitude,) = struct.unpack(endian + 'd', fobj.read(8))\n (latitude,) = struct.unpack(endian + 'd', fobj.read(8))\n (height,) = struct.unpack(endian + 'f', fobj.read(4))\n value = subs.Odict()\n value['Name'] = tname\n value['Observatory'] = sname\n value['Longitude'] = longitude\n value['Latitude'] = latitude\n value['Height'] = height\n else:\n raise CppError('read_header: itype = ' + str(itype) + ' not recognised.')\n\n clist = name.split('.')\n head_set(head, clist, value)\n \n return head", "def write_headers(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\t# For unknown reasons this does not work on Linux, but works on Mac ??? Really?\n\t\tDB = db_open_dict(filename)\n\t\tfor i in range(len(lima)):\n\t\t\tDB.set_header(lima[i], data[i])\n\t\tDB.close()\n\t\t#for i in range(len(lima)):\n\t\t#\tdata[i].write_image(filename, lima[i])\n\telif ftp == \"hdf\":\n\t\tfor i in range(len(lima)):\n\t\t\tdata[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def write_db_header(table,runnumber):\n\n table.write('{\\n')\n table.write('\"type\": \"DQLL\",\\n')\n table.write('\"version\": 1,\\n')\n table.write('\"index\": \"\",\\n')\n \n runrange = \"\\\"run_range\\\": [{0},{1}],\\n\".format(runnumber,runnumber)\n table.write(runrange)\n \n table.write('\"pass\": 0,\\n')\n table.write('\"production\": true,\\n')\n table.write('\"comment\": \"\",\\n')\n\n currentdatetime = datetime.datetime.now()\n\n # If there is no time zone information in the string, assume it's the local computer timezone\n if currentdatetime.tzinfo is None:\n currentdatetime = (pytz.timezone('Europe/Lisbon')).localize(currentdatetime)\n\n # Define Sudbury time zone\n sudburytimezone = pytz.timezone('US/Eastern')\n\n timestamp = \"\\\"timestamp\\\": \\\"{0}\\\",\\n\".format(currentdatetime.astimezone(sudburytimezone).isoformat())\n table.write(timestamp)\n\n table.write(\"\\n\")", "def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"", "def create_header(self, tables: List[Dict], schema: bool = False) -> str:\n header = \"\"\n if \"func\" in self.state:\n header += gt.sql_alchemy_func_import + \"\\n\"\n if self.postgresql_dialect_cols:\n header += (\n gt.postgresql_dialect_import.format(\n types=\",\".join(self.postgresql_dialect_cols)\n )\n + \"\\n\"\n )\n if self.constraint:\n header += gt.unique_cons_import + \"\\n\"\n if self.im_index:\n header += gt.index_import + \"\\n\"\n if schema and tables[0].table_schema:\n schema = tables[0].table_schema.replace('\"', \"\")\n header += \"\\n\" + gt.gino_init_schema.format(schema=schema)\n else:\n header += \"\\n\" + gt.gino_init\n return header", "def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def get_tfsheader(tfsfile):\n headerdata = pd.read_csv(tfsfile, delim_whitespace=True, nrows=44, index_col=None)\n headerdata.columns = ['AT', 'NAME', 'TYPE', 'VALUE']\n return headerdata[['NAME', 'VALUE']]", "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl", "def test_header_file_with_cols(suffix: str) -> None:\n path = rsc / header_file\n columns = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\"]\n df = read_ods(path.with_suffix(suffix), \"Sheet1\", columns=columns)\n\n assert list(df.columns) == columns\n assert len(df) == 10\n assert len(df.columns) == 5", "def _read_header(edf_file):\n read = edf_file.read\n read_ascii = lambda n: read(n).decode('ascii').strip()\n read_int = lambda n: int(read_ascii(n))\n read_float = lambda n: float(read_ascii(n))\n\n version = int(read(8).decode('ascii').strip())\n assert version == 0\n\n header = OrderedDict()\n\n header['local_patient_id'] = read_ascii(80)\n header['local_recording_id'] = read_ascii(80)\n\n unpack_ts = lambda n: [int(x) for x in read_ascii(n).split('.')]\n header['start_date'] = StartDate(*unpack_ts(8))\n header['start_time'] = StartTime(*unpack_ts(8))\n\n header['num_header_bytes'] = read_int(8)\n\n read(44)\n\n header['num_records'] = read_int(8)\n header['seconds_per_record'] = read_int(8)\n header['num_signals'] = nsig = read_int(4)\n\n header['label'] = [read_ascii(16) for _ in range(nsig)]\n header['transducer_type'] = [read_ascii(80) for _ in range(nsig)]\n header['units'] = [read_ascii(8) for _ in range(nsig)]\n header['physical_min'] = np.array([read_float(8) for _ in range(nsig)])\n header['physical_max'] = np.array([read_float(8) for _ in range(nsig)])\n header['digital_min'] = np.array([read_float(8) for _ in range(nsig)])\n header['digital_max'] = np.array([read_float(8) for _ in range(nsig)])\n header['prefiltering'] = [read_ascii(80) for _ in range(nsig)]\n header['samples_per_record'] = np.array([read_int(8) for _ in range(nsig)])\n\n read(32 * nsig)\n\n assert edf_file.tell() == header['num_header_bytes']\n\n return header", "def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile", "def load_header(base_path, subvolume):\n with h5py.File(file_path(base_path, subvolume, 'subvolume'), 'r') as f:\n header = dict(f['Header'].attrs.items())\n header.update({key: f['Header'][key][:] for key in f['Header'].keys()})\n \n return header", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def parse_pdb_header(infile):\n header = []\n with File.as_handle(infile) as f:\n for l in f:\n record_type = l[0:6]\n if record_type in (\"ATOM \", \"HETATM\", \"MODEL \"):\n break\n else:\n header.append(l)\n return _parse_pdb_header_list(header)", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def test_header_file_with_str(suffix: str) -> None:\n path = rsc / header_file\n df = read_ods(path.with_suffix(suffix), \"Sheet1\")\n\n assert isinstance(df, pd.DataFrame)\n assert len(df) == 10\n assert len(df.columns) == 5", "def test_header_file_with_int(suffix: str) -> None:\n path = rsc / header_file\n df = read_ods(path.with_suffix(suffix), 1)\n\n assert isinstance(df, pd.DataFrame)\n assert len(df) == 10\n assert len(df.columns) == 5", "def get_lh5_header(in_file, verbose=False):\n hf = h5py.File(in_file)\n\n # pretty print the raw structure, with all attributes\n if verbose:\n def print_groups(name, obj):\n if isinstance(obj, h5py.Group):\n print(f\"GROUP /{name}\")\n indent = \" \"\n if isinstance(obj, h5py.Dataset):\n print(\" DATASET\", obj.shape, obj.name)\n indent = \" \"\n for att, val in obj.attrs.items():\n print(f\"{indent}ATTRIBUTE {att}:\", val)\n print(\" \")\n hf.visititems(print_groups) # accesses __call__\n \n # find each LH5 \"Table\" contained in the file, and create a DataFrame header\n tables = {}\n for g_top in hf.keys():\n \n h5group = hf[f\"/{g_top}\"]\n attrs = {att:val for att, val in h5group.attrs.items()}\n \n # LH5 table condition\n if \"datatype\" in attrs.keys() and \"table{\" in attrs[\"datatype\"]:\n \n # call our nice iterator at this group level\n table = {g_top:[]}\n for (path, name, size, dtype, units, spec) in get_datasets(h5group):\n table[g_top].append((name, size, dtype, units, spec))\n \n hdr = pd.DataFrame(table[g_top], columns=['name','size','dtype',\n 'units','spec'])\n \n # fix waveform datatype to match flattened_data\n if 'waveform' in hdr['name'].values:\n wf_dt = h5group['waveform/values/flattened_data'].dtype\n hdr.loc[hdr['name'] == 'waveform', ['dtype']] = wf_dt\n \n tables[g_top] = hdr\n\n return tables", "def gen_model_header(env: jinja2.environment.Environment, model: onnx.ModelProto) -> str:\n header_template = env.get_template(\"model_header.dml.jinja\")\n header_infos = dict()\n\n header_infos[\"ir_version\"] = model.ir_version\n opset_import = list()\n for opset in model.opset_import:\n if len(opset.domain) == 0:\n opset.domain = \"ONNX\"\n opset_import.append(opset.domain + \"/\" + str(opset.version))\n header_infos[\"producer_name\"] = model.producer_name\n header_infos[\"producer_version\"] = model.producer_version\n header_infos[\"domain\"] = model.domain\n header_infos[\"model_version\"] = model.model_version\n header_infos[\"doc_string\"] = model.doc_string\n metadata_props = [[prop.key, prop.vale] for prop in model.metadata_props]\n\n model_header_render = header_template.render(\n header_components=header_infos,\n opset_import=opset_import,\n metadata_props=metadata_props\n )\n return model_header_render", "def create_header(analysis_outdir, metadata, rg_dict, specimen_dict, logger=default_logger):\n\n rgid = rg_dict[\"ID\"].replace(\".\", \"_\")\n header = \"%s/header-%s.sam\" %(analysis_outdir, rg_dict[\"ID\"])\n header_file = open(header, \"w\")\n header_file.write(\"@HD\\tVN:1.4\\n\")\n PI_STR = \"\"\n if len(rg_dict[\"PI\"]):\n PI_STR=\"PI:%s\\t\" % (rg_dict[\"PI\"])\n header_file.write(\"@RG\\tID:%s:%s\\tCN:%s\\tPL:%s\\tPM:%s\\tLB:%s:%s:%s\\t%sSM:%s\\tPU:%s:%s\\tDT:%s\\n\"\n %(metadata[\"center_name\"], rgid,metadata[\"center_name\"], metadata[\"platform\"],metadata[\"platform_model\"], metadata[\"seqtype\"],\n metadata[\"center_name\"], rg_dict[\"LB\"], PI_STR, metadata[\"aliquot_id\"], rg_dict[\"CN\"], rg_dict[\"PU\"], getUTCDate(rg_dict[\"DT\"])))\n header_file.write(\"@CO\\tdcc_project_code:%s-US\\n\" %metadata[\"disease\"])\n header_file.write(\"@CO\\tsubmitter_donor_id:%s\\n\" %metadata[\"participant_id\"])\n header_file.write(\"@CO\\tsubmitter_specimen_id:%s\\n\" %metadata[\"sample_id\"])\n header_file.write(\"@CO\\tsubmitter_sample_id:%s\\n\" %metadata[\"aliquot_id\"])\n\n if metadata[\"sample_type\"] not in specimen_dict:\n msg = \"sample_type %s not found in specimen mapping\" % metadata[\"sample_type\"]\n logger.error(msg)\n if not FORCE_RUN:\n raise HeaderException(msg)\n\n if \"sample_type\" in metadata and metadata[\"sample_type\"] in specimen_dict:\n (icgc_type, sample_class) = specimen_dict[metadata[\"sample_type\"]]\n else:\n icgc_type = \"unknown\"\n sample_class = \"unknown\"\n\n #Sanity check about use_cntl\n if \"use_cntl\" in metadata:\n if metadata[\"use_cntl\"] == \"N/A\" and sample_class == \"tumour\":\n msg = \"Tumour sample requires use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n if sample_class == \"normal\" and metadata[\"use_cntl\"] != \"N/A\":\n msg = \"Normal sample requires N/A use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n\n header_file.write(\"@CO\\tdcc_specimen_type:%s\\n\" % icgc_type)\n header_file.write(\"@CO\\tuse_cntl:%s\\n\" %(metadata.get(\"use_cntl\", \"NA\")))\n header_file.close()\n return header", "def GetIndexFileHeaderText(headerinfo):#{{{\n (dbname, version, ext, prefix) = headerinfo\n indexFileHeaderText = []\n indexFileHeaderText.append(\"DEF_VERSION %s\"%(version))\n indexFileHeaderText.append(\"DEF_DBNAME %s\"%(dbname))\n indexFileHeaderText.append(\"DEF_EXTENSION %s\"%(ext))\n indexFileHeaderText.append(\"DEF_PREFIX %s\"%(prefix))\n return indexFileHeaderText", "def _create_header_file(tensor_name, npy_data, output_path, data_linkage):\n file_path = pathlib.Path(f\"{output_path}/\" + tensor_name).resolve()\n # create header file\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\"#include <stddef.h>\\n\")\n header_file.write(\"#include <stdint.h>\\n\")\n header_file.write(\"#include <dlpack/dlpack.h>\\n\")\n header_file.write(f\"const size_t {tensor_name}_len = {npy_data.size};\\n\")\n\n _emit_data_linkage(header_file, data_linkage)\n\n header_file.write(f\"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =\")\n\n header_file.write(\"{\")\n for i in np.ndindex(npy_data.shape):\n header_file.write(f\"{npy_data[i]}, \")\n header_file.write(\"};\\n\\n\")", "def generateHeader(param_dict, filename_out, test_mode=False, template=\"uvfits_headers/header.tpl\"):\n findAndReplace(param_dict, template,filename_out, test_mode)" ]
[ "0.6557311", "0.6183077", "0.6180994", "0.6173603", "0.61628336", "0.6050184", "0.6024849", "0.5993984", "0.5971808", "0.59469956", "0.5938618", "0.59086764", "0.59044254", "0.59018356", "0.58778733", "0.58743674", "0.58452106", "0.5840796", "0.58389056", "0.58276016", "0.5824634", "0.5814124", "0.58065474", "0.5806088", "0.57986754", "0.57924604", "0.5790675", "0.5780478", "0.5772929", "0.57675636" ]
0.80710715
0
Writes each separate variable from the observation or feedback tables inot netcdf using h5py. f is a pandas dataframe with one column, one for each variable k is either 'era5fb' or 'observations_table'
def write_dict_h5(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}): #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')} #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') } with h5py.File(dfile,mode) as fd: try: fd.create_group(k) index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1') fd[k].create_dataset('index', data=index) except: pass if not var_selection: var_selection=list(f.keys()) string10=numpy.zeros(fixed_string_len,dtype='S1') sdict={} slist=[] #groupencodings for v in var_selection: #variables_dic[v] = '' if type(f[v]) == pd.core.series.Series: fvv=f[v].values else: fvv=f[v] if type(fvv[0]) not in [str,bytes,numpy.bytes_]: if fvv.dtype !='S1': fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True) fd[k][v][:]=fvv[:] if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')} if v in attrs.keys(): for kk,vv in attrs[v].items(): if type(vv) is str: fd[k][v].attrs[kk]=numpy.bytes_(vv) else: fd[k][v].attrs[kk]=vv if v in ['date_time','report_timestamp','record_timestamp']: fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) else: fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True) fd[k][v][:]=fvv[:] slen=fvv.shape[1] sdict[v]=slen if slen not in slist: slist.append(slen) try: fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] ) except: pass if v in attrs.keys(): fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description']) fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) else: sleno=len(fvv[0]) slen=sleno try: slen=int(fvv.dtype.descr[0][1].split('S')[1]) except: pass sdict[v]=slen if slen not in slist: slist.append(slen) try: fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] ) except: pass #x=x.reshape(fvv.shape[0],slen) fd[k].create_dataset(v,data=fvv.view('S1').reshape(fvv.shape[0],slen),compression=fbencodings[v]['compression'],chunks=True) if v in attrs.keys(): fd[k][v].attrs['description'] =numpy.bytes_(attrs[v]['description']) fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) #variables_dic[v] = f[v].values.dtype for v in fd[k].keys(): #var_selection: l=0 ''' if v == 'primary_station_id': try: fd[k][v].dims[l].attach_scale(fd[k]['index']) except: pass try: slen = len( fd[k][v][0] ) stringa=numpy.zeros( slen , dtype='S1') fd[k].create_dataset( 'string{}'.format(slen), data= stringa ) fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) except: fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) if v == 'station_name': try: fd[k][v].dims[l].attach_scale(fd[k]['index']) slen = len( fd[k][v][0][0]) stringa=numpy.zeros( slen , dtype='S1') except: pass try: fd[k].create_dataset( 'string{}'.format(slen), data= stringa ) fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) print('done attaching') except: print('not working') ''' try: if type(f[v]) == pd.core.series.Series: fvv=f[v].values else: fvv=f[v] if 'string' not in v and v!='index': fd[k][v].dims[l].attach_scale(fd[k]['index']) #print(v,fvv.ndim,type(fvv[0])) if fvv.ndim==2 or type(fvv[0]) in [str,bytes,numpy.bytes_]: slen=sdict[v] #slen=10 fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)]) except: pass i=4 for v in slist: s='string{}'.format(v) for a in ['NAME']: fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.') i+=1 return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_dict_h6(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}):\n\n #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')}\n #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') }\n \n with h5py.File(dfile,mode) as fd:\n try:\n fd.create_group(k)\n# if type(f[v]) == pd.core.frame.DataFrame:\n# index=numpy.zeros (f[f.columns[0]].shape[0], dtype='S1')\n# else:\n index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1')\n \n fd[k].create_dataset('index', data=index)\n except:\n pass\n if not var_selection:\n var_selection=list(f.keys())\n \n string10=numpy.zeros(fixed_string_len,dtype='S1')\n sdict={}\n slist=[]\n\n #groupencodings \n \n for v in var_selection: \n #variables_dic[v] = ''\n \n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n if type(fvv[0]) not in [str,bytes,numpy.bytes_]:\n if fvv.dtype !='S1':\n \n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv #f[v][:]\n if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')}\n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n if v == 'date_time':\n fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) \n \n else:\n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv #f[v][:]\n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n else:\n sleno=len(fvv[0])\n slen=sleno\n x=numpy.array(fvv,dtype='S').view('S1')\n slen=x.shape[0]//fvv.shape[0]\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n \n \n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n \n x=x.reshape(fvv.shape[0],slen)\n fd[k].create_dataset(v,data=x,compression=fbencodings[v]['compression'],chunks=True)\n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) \n \n #variables_dic[v] = fvv.dtype\n \n for v in fd[k].keys(): #var_selection:\n l=0 \n try:\n if 'string' not in v and v!='index': \n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n if type(fvv[0]) in [str,bytes,numpy.bytes_]:\n slen=sdict[v]\n #slen=10\n fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)])\n except MemoryError:\n pass\n \n \n \n i=4 \n for v in slist:\n s='string{}'.format(v)\n for a in ['NAME']:\n fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.')\n \n i+=1\n \n return", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def genFluxTable(self, fname=\"1dFEhdfoutput.h5\"):\n h5data = {}\n for g in range(10):\n plotData = np.array([self.nodes[:, 1], self.angleIntFlux[g]])\n plotData = plotData[:, np.argsort(plotData[0])]\n h5data[\"mesh\" + str(g)] = plotData[0]\n h5data[\"groupFlx\" + str(g)] = plotData[1]\n h5d.writeToHdf5(h5data, fname)", "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n x = []\n g0 = []\n offt = []\n unused_bit = []\n pa = []\n pb = []\n wa = []\n wb = []\n nan = np.full(3, np.nan)\n encoding = model._encoding\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if element.g0 is None:\n x.append(element.x)\n g0.append(-1)\n else:\n x.append(nan)\n g0.append(element.g0)\n\n offti = element.offt\n if isinstance(offti, integer_types):\n offti = str(offti)\n offt.append(offti.encode(encoding))\n pa.append(element.pa)\n pb.append(element.pb)\n wa.append(element.wa)\n wb.append(element.wb)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('pid', data=pids)\n #print('x =', x)\n #print('g0 =', g0)\n h5_file.create_dataset('x', data=x)\n h5_file.create_dataset('g0', data=g0)\n h5_file.create_dataset('offt', data=offt)\n\n h5_file.create_dataset('pa', data=pa)\n h5_file.create_dataset('pb', data=pb)\n\n h5_file.create_dataset('wa', data=wa)\n h5_file.create_dataset('wb', data=wb)", "def save_as_hdf5(self, filename):", "def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")", "def test_34_save_ds(self, tempfile_h5):\n example = Example(groups=7, origins=5, )\n example.save_dataset_to_netcdf(tempfile_h5)", "def write_postprocessing_section(params, hdf5_data):\n\n if params.irf is not None:\n x2 = (' '.join(params.irf)).split()\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(float(x2[0]))\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n if params.show_pressure is not None:\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(float(x2[0]))\n\n if params.kochin_function is not None:\n x2 = (' '.join(params.kochin_function)).split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n if params.free_surface_elevation:\n x2 = (' '.join(params.free_surface_elevation)).split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])", "def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp", "def save_subdomain_data(self, h5File):\n s = \"::: writing 'ff' FacetFunction to supplied hdf5 file :::\"\n print_text(s, cls=self)\n h5File.write(self.ff, 'ff')\n\n s = \"::: writing 'ff_acc' FacetFunction to supplied hdf5 file :::\"\n print_text(s, cls=self)\n h5File.write(self.ff_acc, 'ff_acc')\n\n s = \"::: writing 'cf' CellFunction to supplied hdf5 file :::\"\n print_text(s, cls=self)\n h5File.write(self.cf, 'cf')", "def make_obstab_era5fb_dic(self, dataset = '' , date_time = '', File = ''):\n index_offset = self.unique_dates[dataset][File]['index_offset']\n \n # Removing the index_offset, which is defined only if any slicing was done \n index = self.unique_dates[dataset][File]['indices'][date_time]['low'] - index_offset\n index_up = self.unique_dates[dataset][File]['indices'][date_time]['up'] - index_offset\n \n obs_dic = {} \n for v in self.observations_table_vars:\n obs_dic[v] = data[dataset][File]['observations_table'][v][index:index_up]\n #print('v is : ', v )\n\n \"\"\" Loop over the obs_tab to find duplicates.\n I fill a dictionary for each distinct pressure level, and I put inside\n the observed_variable number.\n If the list lready contains the combination pressure level - observed variable,\n then the record is skipped \"\"\"\n\n indices = [] # these are the only non-duplicates to be kept\n\n already_selected = { }\n \n #print('starting the loop: ' , date_time, ' ' , dataset, ' ', index, ' ' , index_up)\n for p,var,val,ind in zip ( obs_dic['z_coordinate'] , obs_dic['observed_variable'],obs_dic['observation_value'] ,range(len(obs_dic['z_coordinate'])) ):\n #print(p,var,val,ind)\n #if date_time > 2354300000:\n # print('looping :::', var, ' ' , val, ' ' , ind , ' ', dataset, ' ' , index_up, ' ' , index, ' ', File)\n \n if self.only_std_plevels:\n if p not in self.std_plevs:\n continue \n\n \n if p not in already_selected.keys():\n already_selected[p] = []\n \n \n if np.isfinite(val):\n if var not in already_selected[p]:\n already_selected[p].append(var)\n indices.append(ind) # record to be kept\n else:\n pass\n else: # skipping nans\n pass\n\n #print('done with the loop')\n red_obs_dic = {} # dictionary for the reduced (removed duplicates) obs_tab\n for v in self.observations_table_vars:\n red_obs_dic[v] = obs_dic[v][indices]\n\n ''' Simply returns the proper format for ''null' value '''\n def get_null( tipo = ''):\n if tipo == np.int32 :\n void = 0\n elif tipo == np.float32 :\n void = 0.0\n elif tipo == np.bytes_ :\n void = b'nan'\n return void\n \n ''' Filling the feedback table. Only feednack for era5_1 and era5_2 are currently available. \n Reads the total number of possible columns from the dic_type_attributes dictionary.\n Era5_1 and era5_2 fb have different columns.\n If data for a variable is not available, it fills with the appropriate null value '''\n \n #print('making the era5fb ', date_time, ' ' , dataset)\n red_era5fb_dic = {}\n for v in self.era5fb_columns:\n tipo = self.dic_type_attributes['era5fb'][v]['type'] \n if dataset == 'era5_1' or dataset == 'era5_2':\n if v in data[dataset][File]['era5fb_tab'].keys(): \n red_era5fb_dic[v] = data[dataset][File]['era5fb_tab'][v][index:index_up][indices]\n else:\n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void) \n else: # no feedback for non era%-1 or era5_2 datasets \n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void)\n \n #print('done making_obstab_era5fb')\n \"\"\"\n try:\n if len(red_obs_dic['date_time']) > 2:\n print('yes')\n else:\n print('check') \n except:\n print('check')\n \"\"\" \n return red_obs_dic , red_era5fb_dic", "def do_H5F_2_PY(AX_dic, tag, d):\n\n # accesing xs file\n ps1 = xs_data(AX_dic['path']['file_path'], AX_dic['A2'][tag]['info']['xs_folder'], AX_dic['A2'][tag]['info']['xs_file'],\n AX_dic['path']['sbr_path'], AX_dic['path']['sbr_file']) # path for xs and sbr is defines\n ps1.get_phase_space(grid_flag='FG')\n # the auxiliary files are generated with sbr. if generate_out_flag='yes'\n # the *.out files are generated.\n grid_flag = 'FG'\n ps1.xs_auxiliar_file_generator(AX_dic['A2'][tag]['info']['generate_out_flag'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'], grid_flag,\n AX_dic['path']['out_folder'], AX_dic['A2'][tag]['info']['out_alias']) # grid_flag is required, options; 'SG', 'FG'\n domain_ofinterest = cp.deepcopy(ps1.phase_space)\n xs_ofinterest, domain_ofinterest = domain_reduction(domain_ofinterest, d, AX_dic['A2'][\n tag]['info']['evol_vec'], ps1.order)\n IRG = []\n for key in xs_ofinterest.keys():\n IRG.append('_' + str(len(xs_ofinterest[key])))\n AX_dic['A2'][tag]['info']['IRG'] = ''.join(IRG)\n xs_out, order = ps1.xs_retrival_FG(xs_ofinterest, domain_ofinterest,\n AX_dic['path']['out_folder'], AX_dic['A2'][tag]['info']['out_alias'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'])\n conc_dic, fi_dic, k_dic = ps1.cellwise_retrival(domain_ofinterest, AX_dic['path']['out_folder'],\n AX_dic['A2'][tag]['info']['out_alias'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'], AX_dic['A2'][tag]['info']['evol_vec'])\n\n # The structure of the xs data is here generated\n AX_dic['A2'][tag]['data'] = {}\n AX_dic['A2'][tag]['data']['I'] = xs_out\n AX_dic['A2'][tag]['data']['order_tuple'] = order\n AX_dic['A2'][tag]['data']['PS'] = ps1.domain_ofinterest\n\n for i in AX_dic['A2'][tag]['data']['I'].keys():\n AX_dic['A2'][tag]['data']['I'][i]['conc'] = conc_dic[i]\n AX_dic['A2'][tag]['data']['fi'] = fi_dic\n AX_dic['A2'][tag]['data']['k'] = k_dic", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def _write_h5_out(self, fout, save_hybrid_meta=True):\n\n with Outputs(fout, mode='a') as out:\n if 'meta' in out.datasets and save_hybrid_meta:\n hybrid_meta = to_records_array(self.hybrid_meta)\n out['meta'] = hybrid_meta\n\n for dset, data in self.profiles.items():\n out[dset] = data", "def read_all_odbsql_stn_withfeedback(dataset, odbfile):\n columns, kinds, tdict = make_odb_header(odbfile, dataset) \n try: \n t=time.time() \n try:\n f=gzip.open(odbfile) \n except:\n print(odbfile, 'The zipped ODB file was not found !')\n return\n \n #d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','reportype','andate','antime',\n # 'obsvalue@body','fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','collection_identifier@conv','source@hdr']\n \n # had to remove 'collection_identifier@conv' to make it work with 1, 3188, 1759, 1761 \n \n tdict['sensor@hdr']=numpy.float32\n tdict['ppcode@conv_body']=numpy.float32\n \n '''\n d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','lon@hdr','lat@hdr','seqno@hdr',\n 'obsvalue@body','source@hdr' , 'vertco_type@body']\n \n if 'fg_depar@body' in columns: # creating the colkumns for era5fb \n d=d+['fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','reportype','andate','antime']\n '''\n \n# restrict feedback to certain columns \n #for c in columns:\n # if c not in d:\n # del tdict[c]\n \n #columns=d.copy()\n \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) #nrows=1000000)\n \n \"\"\" Case where erafb is not available \"\"\"\n if 'fg_depar@body' not in columns:\n alldict['fg_depar@body']=numpy.float32(numpy.NaN)\n alldict['an_depar@body']=numpy.float32(numpy.NaN)\n alldict['biascorr@body']=numpy.float32(numpy.NaN)\n alldict['sondetype@conv']=numpy.int32(-2147483648)\n alldict['reportype']=numpy.int32(-2147483648)\n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n idx=numpy.where(numpy.logical_or(alldict.reportype.values==16045,alldict.reportype.values==16068))[0]\n if len(idx)>0:\n \n #alldict.drop(index=alldict.index[idx],inplace=True)\n y=numpy.int64(alldict['date@hdr'].values)*1000000+alldict['time@hdr'].values\n x=numpy.unique(y)\n dropindex=[]\n for i in range(1,x.shape[0]):\n if x[i]-x[i-1]<60:\n idx=numpy.where(y==x[i-1])[0]\n if idx.shape[0]>0:\n dropindex.append(idx)\n else:\n print('empty index')\n if dropindex: \n dropindex = numpy.concatenate(dropindex).ravel()\n alldict.drop(index=alldict.index[dropindex],inplace=True)\n \n #print(time.time()-t) #,sys.getsizeof(alldict)//1024//1024)\n \n #idx=numpy.where(alldict.reportype.values==16045)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n #idx=numpy.where(alldict.reportype.values==16068)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n \n alldict['source_id'] = dataset.rjust(10)\n\n for c in alldict.columns:\n \n if type(alldict[c].iloc[0]) in [str,bytes]:\n l=alldict[c].shape[0]\n slen=len(alldict[c].values[0])\n alldict[c]=numpy.array(alldict.pop(c).values,dtype='S{}'.format(slen))\n #alldict[c]=numpy.string_(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.int64:\n alldict[c]=numpy.int32(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.float64:\n alldict[c]=numpy.float32(alldict[c])\n \n #print('after odb:',time.time()-t)\n \n except MemoryError:\n print('Reading ODB failed ! ' + odbfile)\n return alldict\n \n #print(odbfile,time.time()-t)#, sys.getsizeof(alldict))\n\n \n return alldict", "def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return", "def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def resultInHDF5(self, iStep):\n filePath = os.path.expanduser('~/LBMResults')\n resultFile = filePath + '/SimulationResults.h5'\n dataFile = tb.open_file(resultFile, 'a')\n #output the densities of fluids\n for i in sp.arange(self.typesFluids):\n dataFile.create_array('/FluidMacro', 'FluidDensityType%gin%g' % (i, iStep), \\\n self.fluidsDensity[i])\n dataFile.create_array('/FluidVelocity', 'FluidVelocityXAt%g' % iStep, \\\n self.physicalVX)\n dataFile.create_array('/FluidVelocity', 'FluidVelocityYAt%g' % iStep, \\\n self.physicalVY)\n dataFile.close()", "def process_pred_detail_f5file(fn, f5_readid_map):\n\n f5_pred_key = '/pred/pred_0/predetail'\n dflist = []\n with h5py.File(fn, 'r') as mr:\n # m_pred = mr[f5_pred_key].value\n # logger.debug(m_pred)\n for name in mr['/pred']:\n # logger.debug(name)\n pred_num_key = f'/pred/{name}'\n f5file = os.path.basename(mr[pred_num_key].attrs['f5file'])\n mapped_chr = mr[pred_num_key].attrs['mapped_chr']\n mapped_strand = mr[pred_num_key].attrs['mapped_strand']\n\n # logger.debug(f'{pred_num_key}: chr={mapped_chr}, strand={mapped_strand}, f5file={f5file}')\n\n pred_detail_key = f'{pred_num_key}/predetail'\n # m_pred = mr[pred_detail_key].value\n m_pred = mr[pred_detail_key][()]\n m_pred = np.array(m_pred, dtype=[('refbase', 'U1'), ('readbase', 'U1'), ('refbasei', np.uint64),\n ('readbasei', np.uint64), ('mod_pred', np.int)])\n\n dataset = []\n for mi in range(len(m_pred)):\n if m_pred['refbase'][mi] not in ['C']:\n continue\n if m_pred['refbase'][mi] in ['-', 'N', 'n']:\n continue\n # if m_pred['readbase'][mi] == '-':\n # continue\n\n # Filter non-CG patterns results\n ret = get_dna_base_from_reference(mapped_chr, int(m_pred['refbasei'][mi]), ref_fasta=ref_fasta)\n\n if mapped_strand == '+':\n if ret[5:7] != 'CG':\n continue\n elif mapped_strand == '-':\n if ret[4:6] != 'CG':\n continue\n\n if -0.1 < m_pred['mod_pred'][mi] - 1 < 0.1:\n meth_indicator = 1\n else:\n meth_indicator = 0\n # sp_options['4NA'][m_pred['refbase'][mi]][(cur_chr, cur_strand, int(m_pred['refbasei'][mi]) )][0] += 1\n ret = {'start': int(m_pred['refbasei'][mi]), 'pred': meth_indicator, 'base': m_pred['refbase'][mi],\n 'sequence': ret}\n dataset.append(ret)\n df = pd.DataFrame(dataset)\n\n if len(df) < 1:\n continue\n df['chr'] = str(mapped_chr)\n df['end'] = df['start'] + 1\n df['strand'] = str(mapped_strand)\n df['read-id'] = f5_readid_map[f5file]\n df = df[['chr', 'start', 'end', 'read-id', 'base', 'strand', 'sequence', 'pred']]\n # logger.info(df)\n dflist.append(df)\n\n sumdf = pd.concat(dflist)\n\n # logger.debug(f'Process pred detail file {fn} finished, total reads={len(sumdf)}.')\n return sumdf", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n testfile=obs.out_filename(\"events\", format=informat, dir=indir)\n try:\n table = Table.read(str(testfile), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \"+str(filename)\n continue\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n #for filetype in ['events']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n filename = obs.out_filename(filetype, format=informat, dir=indir)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(outfile))\n table.write(str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def summaryD5(self):\r\n\r\n if self.window.diff_tabs.tempruns_D45_set==3:\r\n\r\n dfD5_temp1_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp1_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n D5summary_temp1=pd.concat([dfD5_temp1_pos1,dfD5_temp1_pos2],axis=1)\r\n D5summary_temp1.to_csv('D5summary_temp1.txt',index=False)\r\n\r\n dfD5_temp2_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp2_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n\r\n D5summary_temp2=pd.concat([dfD5_temp2_pos1,dfD5_temp2_pos2],axis=1)\r\n D5summary_temp2.to_csv('D5summary_temp2.txt',index=False)\r\n\r\n dfD5_temp3_pos1= pd.read_csv('raw_text_D5_3_1.txt')\r\n dfD5_temp3_pos2= pd.read_csv('raw_text_D5_3_2.txt')\r\n \r\n\r\n D4summary_temp3=pd.concat([dfD5_temp3_pos1,dfD5_temp3_pos2],axis=1)\r\n D4summary_temp3.to_csv('D5summary_temp3.txt',index=False)", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n events_filename = Path(indir) / obs.filename('events', format=informat)\n try:\n table = Table.read(str(events_filename), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \" + str(events_filename)\n continue\n if table.meta[\"OBS_ID\"]!=obs.obs_id:\n continue\n # for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n # for filetype in ['events']:\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_table']:\n filename = Path(indir) / obs.filename(filetype, format=informat)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n elif filetype in ('psf_table'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(\n os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(indir + \"/\" + str(outfile)))\n table.write(indir + \"/\" + str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(indir + \"/\" + str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def write_merged(self, content = '', table=''):\n\n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True) \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n\n '''\n if os.path.isfile('dic_obstab_attributes.npy'):\n attrs_dic = np.load('dic_obstab_attributes.npy' , allow_pickle = True).item()\n else:\n attrs_dic = {}\n '''\n attrs_dic = {}\n\n \"\"\" Retrieving the attributes \"\"\"\n if content in ['observations_table','header_table','era5fb', 'station_configuration']:\n for var in table.keys():\n if var == 'comments':\n continue \n\n attrs_dic[var] = {}\n try:\n attrs_dic[var]['description'] = bytes( self.dic_type_attributes[content][var]['description'] , 'utf-8' )\n except:\n attrs_dic[var]['description'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH DESCRIPTION: ', var , ' ' , self.dic_type_attributes[content][var]['description']) # FFF CHECK WHY SOME ARE FAILING\n\n try:\n attrs_dic[var]['external_table'] = bytes( self.dic_type_attributes[content][var]['external_table'] , 'utf-8' )\n except:\n attrs_dic[var]['external_table'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH EXTERNAL TABLE : ', var ) # FFF CHECK WHY SOME ARE FAILING \n\n\n if content == 'recordindex': # writing the recordindex, recordtimestamp, dateindex\n #logging.info('Writing the merged record indices to the netCDF output ')\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n\n elif content == 'cdm_tables':\n for k in data['cdm_tables'].keys():\n table = data['cdm_tables'][k]\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = k)\n #logging.info('Writing the cdm table %s to the netCDF output ', k)\n \n elif content == 'source_configuration': \n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = content)\n #logging.info('Writing the source_configuration table to the netCDF output ')\n\n elif content == 'station_configuration':\n for k in table.keys(): \n if k == 'station_name':\n print(0)\n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n try:\n table[k] = table[k].astype( var_type ) \n print('Done station_conf' , k )\n except:\n if k == 'secondary_id':\n table[k] = table[k].astype( bytes ) \n\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n dic = {k:table[k]} \n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n \n \n # Writing the observations_table, header_table, era5fb \n elif content in ['observations_table', 'era5fb', 'header_table']: \n\n shape = ''\n for k in table.keys(): \n if k == 'index' or k == 'hdrlen' or 'string' in k :\n continue\n if k == 'station_name':\n print(0)\n \n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n\n if k == 'hdrlen': \n continue\n try:\n #table[k] = table[k].astype( bytes ) \n table[k] = table[k].astype( var_type ) \n \n except:\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n #print('*** Writing the table ', content, ' variable ', k)\n #if k == 'duplicates':\n # table[k] = table[k].astype( bytes ) \n \n \n dic = {k:table[k]} # making a 1 colum dictionary\n shape = table[k].shape\n #print('SHAPE IS FFF ', table[k].shape )\n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n\n if content == 'observations_table' and not self.obstab_nans_filled :\n missing_cdm_var = [ v for v in self.dic_type_attributes[content].keys() if v not in self.observations_table_vars] # variables to be filled with nans \n for k in missing_cdm_var:\n if k not in ['advanced_assimilation_feedback']:\n var_type = self.dic_type_attributes[content][k]['type']\n if var_type == np.int32 :\n nan = np.int32(-2147483648)\n else:\n nan = np.float32(np.nan) \n logging.debug('Adding missing cdm colum with empty values: %s ' , k )\n dic={k:np.empty(shape,dtype=np.dtype(nan))}\n dic[k].fill(nan)\n write_dict_h5(out_name, dic, 'observations_table', self.encodings['observations_table'], var_selection=[], mode='a', attrs = attrs_dic ) ### TO DO\n self.obstab_nans_filled = True\n\n elif content == 'observations_table' and self.obstab_nans_filled:\n return", "def saveTrainingStats(model, hdf5):\n stats = model.getTrainingStats()\n stats_grp = hdf5.create_group(\"training_stats\")\n stats_grp.create_dataset(\"activeK\", data=stats[\"activeK\"])\n stats_grp.create_dataset(\"elbo\", data=stats[\"elbo\"])\n stats_grp.create_dataset(\"elbo_terms\", data=stats[\"elbo_terms\"].T)\n stats_grp['elbo_terms'].attrs['colnames'] = [a.encode('utf8') for a in stats[\"elbo_terms\"].columns.values]", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def write_h5(\n lk_file,\n output_filename,\n compression_level=5,\n omit_data=None,\n *,\n crop_time_range=None,\n verbose=False,\n):\n import h5py\n\n omit_data = {omit_data} if isinstance(omit_data, str) else omit_data\n h5_file = lk_file.h5\n\n with h5py.File(output_filename, \"w\") as out_file:\n\n def traversal_function(name, node):\n if omit_data and any([fnmatch(name, o) for o in omit_data]):\n if verbose:\n print(f\"Omitted {name} from export\")\n return\n\n if isinstance(node, h5py.Dataset):\n if node.dtype.kind == \"O\":\n with warnings.catch_warnings():\n warnings.filterwarnings(\n action=\"ignore\",\n category=FutureWarning,\n message=\"Direct access to this field is deprecated\",\n )\n\n _write_cropped_metadata(\n lk_file, out_file, name, node, crop_time_range, verbose\n )\n else:\n _write_numerical_data(\n lk_file, out_file, name, node, compression_level, crop_time_range, verbose\n )\n\n else:\n out_file.create_group(f\"{name}\")\n out_file[name].attrs.update(node.attrs)\n\n h5_file.visititems(traversal_function)\n out_file.attrs.update(h5_file.attrs)", "def output_netcdf(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/netcdf/'.format(\n date)\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n \n map_data = make_proj_grids(proj_dict,grid_dict)\n lons = map_data[\"lon\"]\n lats = map_data[\"lat\"]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.nc\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date,\n start_hour,end_hour,config.forecast_model_names)\n\n \n out_file = Dataset(filename, \"w\")\n out_file.createDimension(\"x\", filtered_forecast.shape[0])\n out_file.createDimension(\"y\", filtered_forecast.shape[1])\n out_file.createVariable(\"Longitude\", \"f4\", (\"x\", \"y\"))\n out_file.createVariable(\"Latitude\", \"f4\",(\"x\", \"y\"))\n out_file.createVariable(\"Data\", \"f4\", (\"x\", \"y\"))\n out_file.variables[\"Longitude\"][:,:] = lons\n out_file.variables[\"Latitude\"][:,:] = lats\n out_file.variables[\"Data\"][:,:] = filtered_forecast\n out_file.projection = proj_dict[\"proj\"]\n out_file.lon_0 = proj_dict[\"lon_0\"]\n out_file.lat_0 = proj_dict[\"lat_0\"]\n out_file.lat_1 = proj_dict[\"lat_1\"]\n out_file.lat_2 = proj_dict[\"lat_2\"]\n out_file.close()\n \n print(\"Writing to \" + filename)\n return" ]
[ "0.67543757", "0.6443923", "0.6092056", "0.5962444", "0.59045845", "0.58782977", "0.58390933", "0.58258235", "0.57865006", "0.57606965", "0.57498914", "0.56900674", "0.5640461", "0.5636864", "0.5624655", "0.5609386", "0.55957025", "0.5575014", "0.5549395", "0.55375063", "0.55095387", "0.5505268", "0.5503839", "0.5463", "0.54442716", "0.54426664", "0.5422753", "0.54030067", "0.53984666", "0.5383795" ]
0.6696595
1
Writes each separate variable from the observation or feedback tables inot netcdf using h5py. f is a pandas dataframe with one column, one for each variable k is either 'era5fb' or 'observations_table'
def write_dict_h6(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}): #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')} #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') } with h5py.File(dfile,mode) as fd: try: fd.create_group(k) # if type(f[v]) == pd.core.frame.DataFrame: # index=numpy.zeros (f[f.columns[0]].shape[0], dtype='S1') # else: index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1') fd[k].create_dataset('index', data=index) except: pass if not var_selection: var_selection=list(f.keys()) string10=numpy.zeros(fixed_string_len,dtype='S1') sdict={} slist=[] #groupencodings for v in var_selection: #variables_dic[v] = '' if type(f[v]) == pd.core.series.Series: fvv=f[v].values else: fvv=f[v] if type(fvv[0]) not in [str,bytes,numpy.bytes_]: if fvv.dtype !='S1': fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True) fd[k][v][:]=fvv #f[v][:] if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')} if v in attrs.keys(): fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description']) fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) if v == 'date_time': fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) else: fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True) fd[k][v][:]=fvv #f[v][:] if v in attrs.keys(): fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description']) fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) else: sleno=len(fvv[0]) slen=sleno x=numpy.array(fvv,dtype='S').view('S1') slen=x.shape[0]//fvv.shape[0] sdict[v]=slen if slen not in slist: slist.append(slen) try: fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] ) except: pass x=x.reshape(fvv.shape[0],slen) fd[k].create_dataset(v,data=x,compression=fbencodings[v]['compression'],chunks=True) if v in attrs.keys(): fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description']) fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) #variables_dic[v] = fvv.dtype for v in fd[k].keys(): #var_selection: l=0 try: if 'string' not in v and v!='index': if type(f[v]) == pd.core.series.Series: fvv=f[v].values else: fvv=f[v] fd[k][v].dims[l].attach_scale(fd[k]['index']) if type(fvv[0]) in [str,bytes,numpy.bytes_]: slen=sdict[v] #slen=10 fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)]) except MemoryError: pass i=4 for v in slist: s='string{}'.format(v) for a in ['NAME']: fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.') i+=1 return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_dict_h5(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}):\n\n #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')}\n #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') }\n \n with h5py.File(dfile,mode) as fd:\n try:\n fd.create_group(k)\n index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1')\n fd[k].create_dataset('index', data=index)\n except:\n pass\n if not var_selection:\n var_selection=list(f.keys())\n \n string10=numpy.zeros(fixed_string_len,dtype='S1')\n sdict={}\n slist=[]\n\n #groupencodings \n \n for v in var_selection: \n #variables_dic[v] = ''\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n \n if type(fvv[0]) not in [str,bytes,numpy.bytes_]:\n\n if fvv.dtype !='S1':\n \n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')}\n if v in attrs.keys():\n for kk,vv in attrs[v].items():\n if type(vv) is str: \n fd[k][v].attrs[kk]=numpy.bytes_(vv)\n else:\n fd[k][v].attrs[kk]=vv\n \n if v in ['date_time','report_timestamp','record_timestamp']:\n fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) \n \n else:\n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n slen=fvv.shape[1]\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n else:\n sleno=len(fvv[0])\n slen=sleno\n try:\n slen=int(fvv.dtype.descr[0][1].split('S')[1])\n except: \n pass\n\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n \n #x=x.reshape(fvv.shape[0],slen)\n fd[k].create_dataset(v,data=fvv.view('S1').reshape(fvv.shape[0],slen),compression=fbencodings[v]['compression'],chunks=True)\n if v in attrs.keys():\n fd[k][v].attrs['description'] =numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) \n \n #variables_dic[v] = f[v].values.dtype\n \n for v in fd[k].keys(): #var_selection:\n l=0 \n \n '''\n if v == 'primary_station_id':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n except:\n pass\n \n try:\n slen = len( fd[k][v][0] )\n stringa=numpy.zeros( slen , dtype='S1')\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa ) \n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n except:\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n \n \n if v == 'station_name':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n slen = len( fd[k][v][0][0])\n stringa=numpy.zeros( slen , dtype='S1')\n except:\n pass\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa )\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n print('done attaching')\n except:\n print('not working')\n \n ''' \n try:\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n if 'string' not in v and v!='index': \n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n #print(v,fvv.ndim,type(fvv[0]))\n if fvv.ndim==2 or type(fvv[0]) in [str,bytes,numpy.bytes_]:\n slen=sdict[v]\n #slen=10\n fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)])\n except:\n pass\n \n \n \n i=4 \n for v in slist:\n s='string{}'.format(v)\n for a in ['NAME']:\n fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.')\n \n i+=1\n \n return", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def genFluxTable(self, fname=\"1dFEhdfoutput.h5\"):\n h5data = {}\n for g in range(10):\n plotData = np.array([self.nodes[:, 1], self.angleIntFlux[g]])\n plotData = plotData[:, np.argsort(plotData[0])]\n h5data[\"mesh\" + str(g)] = plotData[0]\n h5data[\"groupFlx\" + str(g)] = plotData[1]\n h5d.writeToHdf5(h5data, fname)", "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n x = []\n g0 = []\n offt = []\n unused_bit = []\n pa = []\n pb = []\n wa = []\n wb = []\n nan = np.full(3, np.nan)\n encoding = model._encoding\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if element.g0 is None:\n x.append(element.x)\n g0.append(-1)\n else:\n x.append(nan)\n g0.append(element.g0)\n\n offti = element.offt\n if isinstance(offti, integer_types):\n offti = str(offti)\n offt.append(offti.encode(encoding))\n pa.append(element.pa)\n pb.append(element.pb)\n wa.append(element.wa)\n wb.append(element.wb)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('pid', data=pids)\n #print('x =', x)\n #print('g0 =', g0)\n h5_file.create_dataset('x', data=x)\n h5_file.create_dataset('g0', data=g0)\n h5_file.create_dataset('offt', data=offt)\n\n h5_file.create_dataset('pa', data=pa)\n h5_file.create_dataset('pb', data=pb)\n\n h5_file.create_dataset('wa', data=wa)\n h5_file.create_dataset('wb', data=wb)", "def save_as_hdf5(self, filename):", "def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")", "def test_34_save_ds(self, tempfile_h5):\n example = Example(groups=7, origins=5, )\n example.save_dataset_to_netcdf(tempfile_h5)", "def write_postprocessing_section(params, hdf5_data):\n\n if params.irf is not None:\n x2 = (' '.join(params.irf)).split()\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(float(x2[0]))\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n if params.show_pressure is not None:\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(float(x2[0]))\n\n if params.kochin_function is not None:\n x2 = (' '.join(params.kochin_function)).split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n if params.free_surface_elevation:\n x2 = (' '.join(params.free_surface_elevation)).split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])", "def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp", "def save_subdomain_data(self, h5File):\n s = \"::: writing 'ff' FacetFunction to supplied hdf5 file :::\"\n print_text(s, cls=self)\n h5File.write(self.ff, 'ff')\n\n s = \"::: writing 'ff_acc' FacetFunction to supplied hdf5 file :::\"\n print_text(s, cls=self)\n h5File.write(self.ff_acc, 'ff_acc')\n\n s = \"::: writing 'cf' CellFunction to supplied hdf5 file :::\"\n print_text(s, cls=self)\n h5File.write(self.cf, 'cf')", "def make_obstab_era5fb_dic(self, dataset = '' , date_time = '', File = ''):\n index_offset = self.unique_dates[dataset][File]['index_offset']\n \n # Removing the index_offset, which is defined only if any slicing was done \n index = self.unique_dates[dataset][File]['indices'][date_time]['low'] - index_offset\n index_up = self.unique_dates[dataset][File]['indices'][date_time]['up'] - index_offset\n \n obs_dic = {} \n for v in self.observations_table_vars:\n obs_dic[v] = data[dataset][File]['observations_table'][v][index:index_up]\n #print('v is : ', v )\n\n \"\"\" Loop over the obs_tab to find duplicates.\n I fill a dictionary for each distinct pressure level, and I put inside\n the observed_variable number.\n If the list lready contains the combination pressure level - observed variable,\n then the record is skipped \"\"\"\n\n indices = [] # these are the only non-duplicates to be kept\n\n already_selected = { }\n \n #print('starting the loop: ' , date_time, ' ' , dataset, ' ', index, ' ' , index_up)\n for p,var,val,ind in zip ( obs_dic['z_coordinate'] , obs_dic['observed_variable'],obs_dic['observation_value'] ,range(len(obs_dic['z_coordinate'])) ):\n #print(p,var,val,ind)\n #if date_time > 2354300000:\n # print('looping :::', var, ' ' , val, ' ' , ind , ' ', dataset, ' ' , index_up, ' ' , index, ' ', File)\n \n if self.only_std_plevels:\n if p not in self.std_plevs:\n continue \n\n \n if p not in already_selected.keys():\n already_selected[p] = []\n \n \n if np.isfinite(val):\n if var not in already_selected[p]:\n already_selected[p].append(var)\n indices.append(ind) # record to be kept\n else:\n pass\n else: # skipping nans\n pass\n\n #print('done with the loop')\n red_obs_dic = {} # dictionary for the reduced (removed duplicates) obs_tab\n for v in self.observations_table_vars:\n red_obs_dic[v] = obs_dic[v][indices]\n\n ''' Simply returns the proper format for ''null' value '''\n def get_null( tipo = ''):\n if tipo == np.int32 :\n void = 0\n elif tipo == np.float32 :\n void = 0.0\n elif tipo == np.bytes_ :\n void = b'nan'\n return void\n \n ''' Filling the feedback table. Only feednack for era5_1 and era5_2 are currently available. \n Reads the total number of possible columns from the dic_type_attributes dictionary.\n Era5_1 and era5_2 fb have different columns.\n If data for a variable is not available, it fills with the appropriate null value '''\n \n #print('making the era5fb ', date_time, ' ' , dataset)\n red_era5fb_dic = {}\n for v in self.era5fb_columns:\n tipo = self.dic_type_attributes['era5fb'][v]['type'] \n if dataset == 'era5_1' or dataset == 'era5_2':\n if v in data[dataset][File]['era5fb_tab'].keys(): \n red_era5fb_dic[v] = data[dataset][File]['era5fb_tab'][v][index:index_up][indices]\n else:\n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void) \n else: # no feedback for non era%-1 or era5_2 datasets \n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void)\n \n #print('done making_obstab_era5fb')\n \"\"\"\n try:\n if len(red_obs_dic['date_time']) > 2:\n print('yes')\n else:\n print('check') \n except:\n print('check')\n \"\"\" \n return red_obs_dic , red_era5fb_dic", "def do_H5F_2_PY(AX_dic, tag, d):\n\n # accesing xs file\n ps1 = xs_data(AX_dic['path']['file_path'], AX_dic['A2'][tag]['info']['xs_folder'], AX_dic['A2'][tag]['info']['xs_file'],\n AX_dic['path']['sbr_path'], AX_dic['path']['sbr_file']) # path for xs and sbr is defines\n ps1.get_phase_space(grid_flag='FG')\n # the auxiliary files are generated with sbr. if generate_out_flag='yes'\n # the *.out files are generated.\n grid_flag = 'FG'\n ps1.xs_auxiliar_file_generator(AX_dic['A2'][tag]['info']['generate_out_flag'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'], grid_flag,\n AX_dic['path']['out_folder'], AX_dic['A2'][tag]['info']['out_alias']) # grid_flag is required, options; 'SG', 'FG'\n domain_ofinterest = cp.deepcopy(ps1.phase_space)\n xs_ofinterest, domain_ofinterest = domain_reduction(domain_ofinterest, d, AX_dic['A2'][\n tag]['info']['evol_vec'], ps1.order)\n IRG = []\n for key in xs_ofinterest.keys():\n IRG.append('_' + str(len(xs_ofinterest[key])))\n AX_dic['A2'][tag]['info']['IRG'] = ''.join(IRG)\n xs_out, order = ps1.xs_retrival_FG(xs_ofinterest, domain_ofinterest,\n AX_dic['path']['out_folder'], AX_dic['A2'][tag]['info']['out_alias'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'])\n conc_dic, fi_dic, k_dic = ps1.cellwise_retrival(domain_ofinterest, AX_dic['path']['out_folder'],\n AX_dic['A2'][tag]['info']['out_alias'], AX_dic['A2'][tag]['info']['flag_FG2semiFG'], AX_dic['A2'][tag]['info']['evol_vec'])\n\n # The structure of the xs data is here generated\n AX_dic['A2'][tag]['data'] = {}\n AX_dic['A2'][tag]['data']['I'] = xs_out\n AX_dic['A2'][tag]['data']['order_tuple'] = order\n AX_dic['A2'][tag]['data']['PS'] = ps1.domain_ofinterest\n\n for i in AX_dic['A2'][tag]['data']['I'].keys():\n AX_dic['A2'][tag]['data']['I'][i]['conc'] = conc_dic[i]\n AX_dic['A2'][tag]['data']['fi'] = fi_dic\n AX_dic['A2'][tag]['data']['k'] = k_dic", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def _write_h5_out(self, fout, save_hybrid_meta=True):\n\n with Outputs(fout, mode='a') as out:\n if 'meta' in out.datasets and save_hybrid_meta:\n hybrid_meta = to_records_array(self.hybrid_meta)\n out['meta'] = hybrid_meta\n\n for dset, data in self.profiles.items():\n out[dset] = data", "def read_all_odbsql_stn_withfeedback(dataset, odbfile):\n columns, kinds, tdict = make_odb_header(odbfile, dataset) \n try: \n t=time.time() \n try:\n f=gzip.open(odbfile) \n except:\n print(odbfile, 'The zipped ODB file was not found !')\n return\n \n #d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','reportype','andate','antime',\n # 'obsvalue@body','fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','collection_identifier@conv','source@hdr']\n \n # had to remove 'collection_identifier@conv' to make it work with 1, 3188, 1759, 1761 \n \n tdict['sensor@hdr']=numpy.float32\n tdict['ppcode@conv_body']=numpy.float32\n \n '''\n d=['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body','lon@hdr','lat@hdr','seqno@hdr',\n 'obsvalue@body','source@hdr' , 'vertco_type@body']\n \n if 'fg_depar@body' in columns: # creating the colkumns for era5fb \n d=d+['fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv','reportype','andate','antime']\n '''\n \n# restrict feedback to certain columns \n #for c in columns:\n # if c not in d:\n # del tdict[c]\n \n #columns=d.copy()\n \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) #nrows=1000000)\n \n \"\"\" Case where erafb is not available \"\"\"\n if 'fg_depar@body' not in columns:\n alldict['fg_depar@body']=numpy.float32(numpy.NaN)\n alldict['an_depar@body']=numpy.float32(numpy.NaN)\n alldict['biascorr@body']=numpy.float32(numpy.NaN)\n alldict['sondetype@conv']=numpy.int32(-2147483648)\n alldict['reportype']=numpy.int32(-2147483648)\n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n idx=numpy.where(numpy.logical_or(alldict.reportype.values==16045,alldict.reportype.values==16068))[0]\n if len(idx)>0:\n \n #alldict.drop(index=alldict.index[idx],inplace=True)\n y=numpy.int64(alldict['date@hdr'].values)*1000000+alldict['time@hdr'].values\n x=numpy.unique(y)\n dropindex=[]\n for i in range(1,x.shape[0]):\n if x[i]-x[i-1]<60:\n idx=numpy.where(y==x[i-1])[0]\n if idx.shape[0]>0:\n dropindex.append(idx)\n else:\n print('empty index')\n if dropindex: \n dropindex = numpy.concatenate(dropindex).ravel()\n alldict.drop(index=alldict.index[dropindex],inplace=True)\n \n #print(time.time()-t) #,sys.getsizeof(alldict)//1024//1024)\n \n #idx=numpy.where(alldict.reportype.values==16045)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n #idx=numpy.where(alldict.reportype.values==16068)[0]\n #if idx.shape[0]>0:\n #idy=numpy.where(numpy.logical_and(alldict.reportype.values!=16045,alldict.reportype.values!=16068))[0]\n #if idy.shape[0]>0:\n #idz=numpy.isin(alldict.andate.values[idy],alldict.andate.values[idx])\n #if numpy.sum(idz)>0:\n #alldict.drop(index=alldict.index[idy[idz]],inplace=True)\n \n \n #print(time.time()-t,sys.getsizeof(alldict)//1024//1024)\n \n alldict['source_id'] = dataset.rjust(10)\n\n for c in alldict.columns:\n \n if type(alldict[c].iloc[0]) in [str,bytes]:\n l=alldict[c].shape[0]\n slen=len(alldict[c].values[0])\n alldict[c]=numpy.array(alldict.pop(c).values,dtype='S{}'.format(slen))\n #alldict[c]=numpy.string_(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.int64:\n alldict[c]=numpy.int32(alldict[c])\n \n if type(alldict[c].iloc[0]) is numpy.float64:\n alldict[c]=numpy.float32(alldict[c])\n \n #print('after odb:',time.time()-t)\n \n except MemoryError:\n print('Reading ODB failed ! ' + odbfile)\n return alldict\n \n #print(odbfile,time.time()-t)#, sys.getsizeof(alldict))\n\n \n return alldict", "def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return", "def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def resultInHDF5(self, iStep):\n filePath = os.path.expanduser('~/LBMResults')\n resultFile = filePath + '/SimulationResults.h5'\n dataFile = tb.open_file(resultFile, 'a')\n #output the densities of fluids\n for i in sp.arange(self.typesFluids):\n dataFile.create_array('/FluidMacro', 'FluidDensityType%gin%g' % (i, iStep), \\\n self.fluidsDensity[i])\n dataFile.create_array('/FluidVelocity', 'FluidVelocityXAt%g' % iStep, \\\n self.physicalVX)\n dataFile.create_array('/FluidVelocity', 'FluidVelocityYAt%g' % iStep, \\\n self.physicalVY)\n dataFile.close()", "def process_pred_detail_f5file(fn, f5_readid_map):\n\n f5_pred_key = '/pred/pred_0/predetail'\n dflist = []\n with h5py.File(fn, 'r') as mr:\n # m_pred = mr[f5_pred_key].value\n # logger.debug(m_pred)\n for name in mr['/pred']:\n # logger.debug(name)\n pred_num_key = f'/pred/{name}'\n f5file = os.path.basename(mr[pred_num_key].attrs['f5file'])\n mapped_chr = mr[pred_num_key].attrs['mapped_chr']\n mapped_strand = mr[pred_num_key].attrs['mapped_strand']\n\n # logger.debug(f'{pred_num_key}: chr={mapped_chr}, strand={mapped_strand}, f5file={f5file}')\n\n pred_detail_key = f'{pred_num_key}/predetail'\n # m_pred = mr[pred_detail_key].value\n m_pred = mr[pred_detail_key][()]\n m_pred = np.array(m_pred, dtype=[('refbase', 'U1'), ('readbase', 'U1'), ('refbasei', np.uint64),\n ('readbasei', np.uint64), ('mod_pred', np.int)])\n\n dataset = []\n for mi in range(len(m_pred)):\n if m_pred['refbase'][mi] not in ['C']:\n continue\n if m_pred['refbase'][mi] in ['-', 'N', 'n']:\n continue\n # if m_pred['readbase'][mi] == '-':\n # continue\n\n # Filter non-CG patterns results\n ret = get_dna_base_from_reference(mapped_chr, int(m_pred['refbasei'][mi]), ref_fasta=ref_fasta)\n\n if mapped_strand == '+':\n if ret[5:7] != 'CG':\n continue\n elif mapped_strand == '-':\n if ret[4:6] != 'CG':\n continue\n\n if -0.1 < m_pred['mod_pred'][mi] - 1 < 0.1:\n meth_indicator = 1\n else:\n meth_indicator = 0\n # sp_options['4NA'][m_pred['refbase'][mi]][(cur_chr, cur_strand, int(m_pred['refbasei'][mi]) )][0] += 1\n ret = {'start': int(m_pred['refbasei'][mi]), 'pred': meth_indicator, 'base': m_pred['refbase'][mi],\n 'sequence': ret}\n dataset.append(ret)\n df = pd.DataFrame(dataset)\n\n if len(df) < 1:\n continue\n df['chr'] = str(mapped_chr)\n df['end'] = df['start'] + 1\n df['strand'] = str(mapped_strand)\n df['read-id'] = f5_readid_map[f5file]\n df = df[['chr', 'start', 'end', 'read-id', 'base', 'strand', 'sequence', 'pred']]\n # logger.info(df)\n dflist.append(df)\n\n sumdf = pd.concat(dflist)\n\n # logger.debug(f'Process pred detail file {fn} finished, total reads={len(sumdf)}.')\n return sumdf", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n testfile=obs.out_filename(\"events\", format=informat, dir=indir)\n try:\n table = Table.read(str(testfile), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \"+str(filename)\n continue\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n #for filetype in ['events']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n filename = obs.out_filename(filetype, format=informat, dir=indir)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(outfile))\n table.write(str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def summaryD5(self):\r\n\r\n if self.window.diff_tabs.tempruns_D45_set==3:\r\n\r\n dfD5_temp1_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp1_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n D5summary_temp1=pd.concat([dfD5_temp1_pos1,dfD5_temp1_pos2],axis=1)\r\n D5summary_temp1.to_csv('D5summary_temp1.txt',index=False)\r\n\r\n dfD5_temp2_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp2_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n\r\n D5summary_temp2=pd.concat([dfD5_temp2_pos1,dfD5_temp2_pos2],axis=1)\r\n D5summary_temp2.to_csv('D5summary_temp2.txt',index=False)\r\n\r\n dfD5_temp3_pos1= pd.read_csv('raw_text_D5_3_1.txt')\r\n dfD5_temp3_pos2= pd.read_csv('raw_text_D5_3_2.txt')\r\n \r\n\r\n D4summary_temp3=pd.concat([dfD5_temp3_pos1,dfD5_temp3_pos2],axis=1)\r\n D4summary_temp3.to_csv('D5summary_temp3.txt',index=False)", "def write_merged(self, content = '', table=''):\n\n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True) \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n\n '''\n if os.path.isfile('dic_obstab_attributes.npy'):\n attrs_dic = np.load('dic_obstab_attributes.npy' , allow_pickle = True).item()\n else:\n attrs_dic = {}\n '''\n attrs_dic = {}\n\n \"\"\" Retrieving the attributes \"\"\"\n if content in ['observations_table','header_table','era5fb', 'station_configuration']:\n for var in table.keys():\n if var == 'comments':\n continue \n\n attrs_dic[var] = {}\n try:\n attrs_dic[var]['description'] = bytes( self.dic_type_attributes[content][var]['description'] , 'utf-8' )\n except:\n attrs_dic[var]['description'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH DESCRIPTION: ', var , ' ' , self.dic_type_attributes[content][var]['description']) # FFF CHECK WHY SOME ARE FAILING\n\n try:\n attrs_dic[var]['external_table'] = bytes( self.dic_type_attributes[content][var]['external_table'] , 'utf-8' )\n except:\n attrs_dic[var]['external_table'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH EXTERNAL TABLE : ', var ) # FFF CHECK WHY SOME ARE FAILING \n\n\n if content == 'recordindex': # writing the recordindex, recordtimestamp, dateindex\n #logging.info('Writing the merged record indices to the netCDF output ')\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n\n elif content == 'cdm_tables':\n for k in data['cdm_tables'].keys():\n table = data['cdm_tables'][k]\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = k)\n #logging.info('Writing the cdm table %s to the netCDF output ', k)\n \n elif content == 'source_configuration': \n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = content)\n #logging.info('Writing the source_configuration table to the netCDF output ')\n\n elif content == 'station_configuration':\n for k in table.keys(): \n if k == 'station_name':\n print(0)\n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n try:\n table[k] = table[k].astype( var_type ) \n print('Done station_conf' , k )\n except:\n if k == 'secondary_id':\n table[k] = table[k].astype( bytes ) \n\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n dic = {k:table[k]} \n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n \n \n # Writing the observations_table, header_table, era5fb \n elif content in ['observations_table', 'era5fb', 'header_table']: \n\n shape = ''\n for k in table.keys(): \n if k == 'index' or k == 'hdrlen' or 'string' in k :\n continue\n if k == 'station_name':\n print(0)\n \n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n\n if k == 'hdrlen': \n continue\n try:\n #table[k] = table[k].astype( bytes ) \n table[k] = table[k].astype( var_type ) \n \n except:\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n #print('*** Writing the table ', content, ' variable ', k)\n #if k == 'duplicates':\n # table[k] = table[k].astype( bytes ) \n \n \n dic = {k:table[k]} # making a 1 colum dictionary\n shape = table[k].shape\n #print('SHAPE IS FFF ', table[k].shape )\n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n\n if content == 'observations_table' and not self.obstab_nans_filled :\n missing_cdm_var = [ v for v in self.dic_type_attributes[content].keys() if v not in self.observations_table_vars] # variables to be filled with nans \n for k in missing_cdm_var:\n if k not in ['advanced_assimilation_feedback']:\n var_type = self.dic_type_attributes[content][k]['type']\n if var_type == np.int32 :\n nan = np.int32(-2147483648)\n else:\n nan = np.float32(np.nan) \n logging.debug('Adding missing cdm colum with empty values: %s ' , k )\n dic={k:np.empty(shape,dtype=np.dtype(nan))}\n dic[k].fill(nan)\n write_dict_h5(out_name, dic, 'observations_table', self.encodings['observations_table'], var_selection=[], mode='a', attrs = attrs_dic ) ### TO DO\n self.obstab_nans_filled = True\n\n elif content == 'observations_table' and self.obstab_nans_filled:\n return", "def file_table(list_observations, indir, informat, outfile):\n print('Creating file summary table ...')\n\n # We gather all infos in a list of dicts and write this\n # as a FITS table at the end.\n # for documentation see http://gamma-astro-data-formats.readthedocs.org/en/latest/data_storage/hdu_index/index.html\n\n HDU_CLASS_TAGS = dict(\n events='events',\n aeff='aeff_2d',\n edisp='edisp_2d',\n psf_3gauss='psf_3gauss',\n psf_king='psf_king',\n psf_table='psf_table',\n gti='gti'\n )\n\n rows = []\n for obs in list_observations.observations:\n events_filename = Path(indir) / obs.filename('events', format=informat)\n try:\n table = Table.read(str(events_filename), hdu='EVENTS')\n except Exception:\n print \"fits corrupted for file \" + str(events_filename)\n continue\n if table.meta[\"OBS_ID\"]!=obs.obs_id:\n continue\n # for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n # for filetype in ['events']:\n #for filetype in ['events', 'aeff', 'edisp', 'psf_3gauss']:\n for filetype in ['events', 'aeff', 'edisp', 'psf_table']:\n filename = Path(indir) / obs.filename(filetype, format=informat)\n\n if filename.is_file():\n print('Processing {}'.format(filename))\n\n data = dict()\n\n # OBS_ID\n data['OBS_ID'] = obs.obs_id\n\n # HDU_TYPE\n if filetype in ('psf_3gauss'):\n data['HDU_TYPE'] = 'psf'\n elif filetype in ('psf_table'):\n data['HDU_TYPE'] = 'psf'\n else:\n data['HDU_TYPE'] = str(filetype)\n\n # HDU_CLASS\n data['HDU_CLASS'] = HDU_CLASS_TAGS[filetype]\n\n # FILE_DIR (relative path)\n data['FILE_DIR'] = str(\n os.path.relpath(str(obs.out_filename(filetype).parent), str(Path(outfile).parent)))\n\n # FILE_NAME\n data['FILE_NAME'] = str(obs.filename(filetype, format=informat).parts[-1])\n\n # HDU-INFOS\n hdu_list = fits.open(str(filename))\n hdu = hdu_list[1]\n header = hdu.header\n data['HDU_NAME'] = hdu.name\n\n # FILE-INFOS\n stat = filename.stat()\n data['SIZE'] = stat.st_size\n data['MTIME'] = stat.st_mtime\n data['MD5'] = hashlib.md5(filename.open('rb').read()).hexdigest()\n\n # if 'HDUCLAS2' in header:\n # data['HDUCLASS'] = header['HDUCLAS2']\n # else:\n # data['HDUCLASS'] = 'EVENTS'\n\n # if its the events-file, use a second dict for the gti-hdu\n if filetype == 'events':\n data_gti = dict()\n data_gti['OBS_ID'] = obs.obs_id\n data_gti['HDU_TYPE'] = 'gti'\n data_gti['HDU_CLASS'] = 'gti'\n data_gti['FILE_DIR'] = data['FILE_DIR']\n data_gti['FILE_NAME'] = data['FILE_NAME']\n data_gti['HDU_NAME'] = hdu_list[2].name\n data_gti['SIZE'] = data['SIZE']\n data_gti['MTIME'] = data['MTIME']\n data_gti['MD5'] = data['MD5']\n\n rows.append(data_gti)\n\n rows.append(data)\n hdu_list.close()\n\n else:\n print('File not found: {}'.format(filename))\n\n names = [\n 'OBS_ID', 'HDU_TYPE', 'HDU_CLASS',\n 'FILE_DIR', 'FILE_NAME', 'HDU_NAME',\n 'SIZE', 'MTIME', 'MD5'\n ]\n\n table = Table(rows=rows, names=names)\n\n print('Writing {}'.format(indir + \"/\" + str(outfile)))\n table.write(indir + \"/\" + str(outfile), overwrite=True)\n # add hdu name\n hdulist = fits.open(indir + \"/\" + str(outfile), mode='update')\n hdulist[1].name = 'HDU_INDEX'\n hdulist.close()", "def saveTrainingStats(model, hdf5):\n stats = model.getTrainingStats()\n stats_grp = hdf5.create_group(\"training_stats\")\n stats_grp.create_dataset(\"activeK\", data=stats[\"activeK\"])\n stats_grp.create_dataset(\"elbo\", data=stats[\"elbo\"])\n stats_grp.create_dataset(\"elbo_terms\", data=stats[\"elbo_terms\"].T)\n stats_grp['elbo_terms'].attrs['colnames'] = [a.encode('utf8') for a in stats[\"elbo_terms\"].columns.values]", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def write_h5(\n lk_file,\n output_filename,\n compression_level=5,\n omit_data=None,\n *,\n crop_time_range=None,\n verbose=False,\n):\n import h5py\n\n omit_data = {omit_data} if isinstance(omit_data, str) else omit_data\n h5_file = lk_file.h5\n\n with h5py.File(output_filename, \"w\") as out_file:\n\n def traversal_function(name, node):\n if omit_data and any([fnmatch(name, o) for o in omit_data]):\n if verbose:\n print(f\"Omitted {name} from export\")\n return\n\n if isinstance(node, h5py.Dataset):\n if node.dtype.kind == \"O\":\n with warnings.catch_warnings():\n warnings.filterwarnings(\n action=\"ignore\",\n category=FutureWarning,\n message=\"Direct access to this field is deprecated\",\n )\n\n _write_cropped_metadata(\n lk_file, out_file, name, node, crop_time_range, verbose\n )\n else:\n _write_numerical_data(\n lk_file, out_file, name, node, compression_level, crop_time_range, verbose\n )\n\n else:\n out_file.create_group(f\"{name}\")\n out_file[name].attrs.update(node.attrs)\n\n h5_file.visititems(traversal_function)\n out_file.attrs.update(h5_file.attrs)", "def output_netcdf(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/netcdf/'.format(\n date)\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n \n map_data = make_proj_grids(proj_dict,grid_dict)\n lons = map_data[\"lon\"]\n lats = map_data[\"lat\"]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.nc\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date,\n start_hour,end_hour,config.forecast_model_names)\n\n \n out_file = Dataset(filename, \"w\")\n out_file.createDimension(\"x\", filtered_forecast.shape[0])\n out_file.createDimension(\"y\", filtered_forecast.shape[1])\n out_file.createVariable(\"Longitude\", \"f4\", (\"x\", \"y\"))\n out_file.createVariable(\"Latitude\", \"f4\",(\"x\", \"y\"))\n out_file.createVariable(\"Data\", \"f4\", (\"x\", \"y\"))\n out_file.variables[\"Longitude\"][:,:] = lons\n out_file.variables[\"Latitude\"][:,:] = lats\n out_file.variables[\"Data\"][:,:] = filtered_forecast\n out_file.projection = proj_dict[\"proj\"]\n out_file.lon_0 = proj_dict[\"lon_0\"]\n out_file.lat_0 = proj_dict[\"lat_0\"]\n out_file.lat_1 = proj_dict[\"lat_1\"]\n out_file.lat_2 = proj_dict[\"lat_2\"]\n out_file.close()\n \n print(\"Writing to \" + filename)\n return" ]
[ "0.6696969", "0.64458895", "0.6092665", "0.5964444", "0.5906239", "0.5880411", "0.5839755", "0.5827213", "0.5787817", "0.57605267", "0.5750614", "0.5691835", "0.56416214", "0.5637448", "0.56259525", "0.56092304", "0.5597023", "0.55758166", "0.55513054", "0.5536801", "0.55120593", "0.55050737", "0.5504486", "0.546385", "0.5445116", "0.5444944", "0.5423988", "0.54018825", "0.53993565", "0.5385142" ]
0.6754596
0
Converts the variable type of the input DataFrame (igra2,ncar,bufr)
def convert_variable_type_n(df): # available columns """ 'source_file', 'source_id', 'report_id', 'observation_id', 'record_timestamp', 'iday', 'station_id', 'lat@hdr', 'lon@hdr', 'vertco_reference_1@body', 'obsvalue@body', 'varno@body', 'units', 'number_of_pressure_levels' """ dic_var_type = { 'int32' : ['varno@body', 'number_of_pressure_levels' , 'units', 'z_coordinate_type' , 'vertco_type@body' ] , 'float32' : ['lat@hdr', 'lon@hdr' , 'vertco_reference_1@body', 'obsvalue@body', 'iday' ] , 'string' : ['source_id' , 'station_id' , 'source_file' , 'report_id', 'observation_id', ] , 'int64' : ['report_timestamp' , 'date_time', 'record_timestamp'] } convert = { 'int32' : np.int32 , 'string' : np.bytes_ , 'float32' : np.float32 , 'float64' : np.float64 } # creating a dictionary variable - nptype mapping = {} for k in dic_var_type.keys(): for l in dic_var_type[k]: mapping[l] = k for c in df.columns: try: #print('converting ' , c , ' to type ' , mapping[c] ) df[c] = df[c].astype( convert[mapping[c]] ) #print('converted: ', c ) except: #print('could not convert type column ' , c ) pass return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_type(data):\n# Categorical features\n columns = ['Browser', 'OperatingSystems', 'Region', 'TrafficType']\n for col in columns:\n data[col] = data[col].apply(lambda x: str(x))\n return data", "def convert_types(df):\n \n # Iterate through each column\n for c in df:\n \n # Convert ids and booleans to integers\n if ('SK_ID' in c):\n df[c] = df[c].fillna(0).astype(np.int32)\n \n # Convert objects to category\n elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n \n # Booleans mapped to integers\n elif list(df[c].unique()) == [1, 0]:\n df[c] = df[c].astype(bool)\n \n # Float64 to float32\n elif df[c].dtype == float:\n df[c] = df[c].astype(np.float32)\n \n # Int64 to int32\n elif df[c].dtype == int:\n df[c] = df[c].astype(np.int32)\n \n return df", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}", "def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res", "def _set_data_types(self):\n temp_df = self.raw_data\n cols = temp_df.drop('room_location', axis=1).columns\n temp_df[cols] = temp_df[cols].apply(pd.to_numeric)\n temp_df['room_location'] = temp_df['room_location'].astype(str)\n self.raw_data = temp_df", "def _convert_field_type(row):\n return row", "def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()", "def set_vars_as_type(df, varNames, dtype):\n\n myVars = list(set(df.columns).intersection(set(varNames)))\n df[myVars] = df[myVars].astype(dtype)", "def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)", "def predict_column_type(data):\n data_types = [type(item) for item in data]\n data_types = list(set(data_types))\n if len(data_types) == 1:\n return data_types[0].__name__\n elif str in data_types:\n return \"str\"\n elif float in data_types:\n return \"float\"\n elif int in data_types:\n return \"int\"\n else:\n return \"str\"", "def astype(self, dtype: Union[Dict[str, str], str]) -> 'DataFrame':\n\n def change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm):\n missing_value_code = utils.get_missing_value_code(new_kind)\n if new_kind == 'S':\n if old_kind == 'b':\n arr = arr + 1\n cur_srm = [False, 'False', 'True']\n elif old_kind in 'i':\n cur_srm, arr = _va.convert_int_to_str(arr)\n elif old_kind == 'f':\n cur_srm, arr = _va.convert_float_to_str(arr)\n elif old_kind in 'mM':\n cur_srm, arr = _va.convert_datetime_str_to_str(arr.astype('str'))\n\n new_arr[:, new_loc] = arr\n new_srm[new_loc] = cur_srm\n else:\n if new_kind != old_kind:\n nas = utils.isna_array(arr, old_kind)\n if new_kind == 'b' and old_kind != 'b':\n arr = arr.astype('bool').astype('int8')\n new_arr[:, new_loc] = arr\n if new_kind != old_kind:\n new_arr[nas, new_loc] = missing_value_code\n\n if isinstance(dtype, str):\n new_dtype: str = utils.check_valid_dtype_convert(dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n utils.check_astype_compatible(new_kind, self._data.keys())\n\n new_column_info: ColInfoT = {}\n new_arr = utils.create_empty_arr(new_kind, self.shape)\n new_data = {new_kind: new_arr}\n new_srm = {}\n col_iter = enumerate(self._col_info_iter(with_order=True, with_arr=True))\n for i, (col, old_kind, loc, order, arr) in col_iter:\n new_column_info[col] = utils.Column(new_kind, i, order)\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(i, new_kind, old_kind, arr, new_arr, cur_srm)\n elif isinstance(dtype, dict):\n col_kind_convert = {}\n for col, new_dtype in dtype.items():\n self._validate_column_name(col)\n new_dtype: str = utils.check_valid_dtype_convert(new_dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n col_kind_convert[col] = new_kind\n old_kind = self._column_info[col].dtype\n utils.check_astype_compatible(new_kind, {old_kind})\n\n new_column_info: ColInfoT = {}\n cols_per_kind: Dict[str, int] = defaultdict(int)\n for col, old_kind, loc, order in self._col_info_iter(with_order=True):\n new_kind = col_kind_convert.get(col, old_kind)\n cur_loc = cols_per_kind[new_kind]\n new_column_info[col] = utils.Column(new_kind, cur_loc, order)\n cols_per_kind[new_kind] += 1\n\n # create empty arrays for each type\n new_data = {}\n for new_kind, num_cols in cols_per_kind.items():\n shape = len(self), num_cols\n new_data[new_kind] = utils.create_empty_arr(new_kind, shape)\n\n new_srm = {}\n for col, old_kind, loc, order, arr in self._col_info_iter(with_order=True, with_arr=True):\n new_kind = new_column_info[col].dtype\n new_loc = new_column_info[col].loc\n new_arr = new_data[new_kind]\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm)\n else:\n raise TypeError('Argument dtype must be either a string or a dictionary')\n\n new_columns = self._columns.copy()\n return self._construct_from_new(new_data, new_column_info, new_columns, new_srm)", "def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def _coerce_and_store_data_types(tag_loop_dict):\n\n regex_format = re.compile(r\"\"\"\\d*\\.(?P<decimal>\\d+)(?:[Ee]?[+-]?(?P<exponent>\\d?))\"\"\")\n\n # Attempt to convert data columns from strings to integers or floats whenever possible\n # Skip any table with 'data_header' in its name because these contain mixed data\n for key in tag_loop_dict.keys():\n if u'data_header' not in key:\n tmp = tag_loop_dict[key].copy()\n tag_loop_dict[key] = tag_loop_dict[key].apply(lambda x: pd.to_numeric(x, errors=u'ignore'))\n \n # Preserve the formatting for all columns that were converted to floats\n float_cols = [x for x in tag_loop_dict[key].columns if tag_loop_dict[key][x].dtype == np.float]\n\n decimal_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('decimal'))).max())\n for col in float_cols])\n\n exponent_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('exponent'))).max())\n for col in float_cols])\n\n number_format = dict([(col,'f') if exponent_format[col] == 0 else (col,'E')\n for col in float_cols])\n\n formatter = dict([(col, '{:.' + str(decimal_format[col]) + number_format[col] + '}') \n for col in float_cols])\n \n # Save format instructions to dataframe\n tag_loop_dict[key]._print_format = formatter\n\n return tag_loop_dict", "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def numerical(df):\r\n numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()\r\n return numerical_var", "def encode_dtypes(df):\n\n global catn, cato\n\n # Nominal categories\n for name in catn:\n df[name] = df[name].astype(\"category\")\n # Add a None category for missing values\n if \"None\" not in df[name].cat.categories:\n df[name].cat.add_categories(\"None\", inplace=True)\n # Ordinal categories\n for name, levels in cato.items():\n df[name] = df[name].astype(CategoricalDtype(levels,\n ordered=True))\n return df", "def preprocess_var(bd, var):\n filepath_sv = f\"team67-ptp/data/{var}.csv\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")\n else:\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")", "def rep_dtypes(df):\n return \"(\" + re.sub(\", dtype.*\", \"\", re.sub(r\" +\", \": \", str(df.dtypes)).replace(\"\\n\", \", \")) + \")\"", "def change_col_type(df,schema):\n d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}\n \n for c,t in schema.items():\n df = df.withColumn(c,col(c).cast(d[t]))\n return df", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def split_dataframe_datatypes(df, target_var):\n\tdf_num = df.select_dtypes(include=np.number)\n\tdf_cat = df.select_dtypes(include=object)\n\n\tif target_var in df_num.columns:\n\t\tdf_tar = df_num.copy() \n\t\tdf_tar = df_tar[[target_var]]\n\t\tdf_num.drop(columns=[target_var], axis=1, inplace=True) \n\telif target_var in df_cat.columns:\n\t\tdf_tar = df_cat.copy()\n\t\tdf_tar = df_tar[[target_var]]\n\t\tdf_cat.drop(columns=[target_var], axis=1, inplace=True) \n\n\treturn df_num,df_cat,df_tar", "def inspect_dtype_object(self, column: str) -> str:\n\n series = self.df[column].dropna()\n\n # check for bool\n try:\n conv = pd.to_numeric(series)\n return self.inspect_dtype(conv)\n except ValueError:\n pass\n\n # check for mixed dtypes\n dtypes = {type(x) for x in series}\n if len(dtypes) > 1:\n raise TypeError(\"Column `{}` has mixed dtypes: {}. Currently, \"\n \"this is not supported.\"\n .format(column, dtypes))\n\n # check for string\n if isinstance(series[0], str):\n return \"str\"\n\n # raise if unsupported dtype is encountered\n raise TypeError(\"Column `{}` has dtype `{}` which is currently \"\n \"not supported.\"\n .format(column, type(series[0])))", "def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)", "def convertColumn(df, names, newType) -> pyspark.sql.dataframe.DataFrame:\n for name in names: \n df = df.withColumn(name, df[name].cast(newType))\n return df", "def __convToTyped(index, value, dtypes):\n\t#print(index, value)\n\tdtype = dtypes[index]\n\ttvalue = value\n\tif dtype == \"int\":\n\t\ttvalue = int(value)\n\telif dtype == \"float\":\n\t\ttvalue = float(value)\n\treturn tvalue", "def encoding_df(df, cols):\n import pandas as pd\n df = df[cols]\n obj_df = df.select_dtypes(include=['object']).copy()\n num_var = df.select_dtypes(include=['int','float']).copy()\n cat_var = pd.get_dummies(obj_df, columns = obj_df.columns)\n encoded_df = pd.concat([num_var, cat_var], axis=1, sort=False)\n return encoded_df", "def get_data_type(col_val):\n dtype = \"\"\n\n original_col_val = col_val\n digits_only = col_val.replace('-', '',1).replace(',', '', -1).replace(\".\", \"\")\n if digits_only.isdigit():\n try:\n int(original_col_val)\n dtype = TYPE_INT\n except ValueError:\n dtype = TYPE_FLOAT\n \n return dtype", "def _convert_other(self, column, field, recformat):\n if isinstance(recformat, _FormatX):\n # special handling for the X format\n return self._convert_x(field, recformat)\n\n scale_factors = self._get_scale_factors(column)\n _str, _bool, _number, _scale, _zero, bscale, bzero, dim = scale_factors\n\n indx = self.names.index(column.name)\n\n # ASCII table, convert strings to numbers\n # TODO:\n # For now, check that these are ASCII columns by checking the coldefs\n # type; in the future all columns (for binary tables, ASCII tables, or\n # otherwise) should \"know\" what type they are already and how to handle\n # converting their data from FITS format to native format and vice\n # versa...\n if not _str and isinstance(self._coldefs, _AsciiColDefs):\n field = self._convert_ascii(column, field)\n\n # Test that the dimensions given in dim are sensible; otherwise\n # display a warning and ignore them\n if dim:\n # See if the dimensions already match, if not, make sure the\n # number items will fit in the specified dimensions\n if field.ndim > 1:\n actual_shape = field.shape[1:]\n if _str:\n actual_shape = actual_shape + (field.itemsize,)\n else:\n actual_shape = field.shape[0]\n\n if dim == actual_shape:\n # The array already has the correct dimensions, so we\n # ignore dim and don't convert\n dim = None\n else:\n nitems = reduce(operator.mul, dim)\n if _str:\n actual_nitems = field.itemsize\n elif len(field.shape) == 1:\n # No repeat count in TFORMn, equivalent to 1\n actual_nitems = 1\n else:\n actual_nitems = field.shape[1]\n if nitems > actual_nitems and not isinstance(recformat, _FormatP):\n warnings.warn(\n \"TDIM{} value {:d} does not fit with the size of \"\n \"the array items ({:d}). TDIM{:d} will be ignored.\".format(\n indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1\n )\n )\n dim = None\n\n # further conversion for both ASCII and binary tables\n # For now we've made columns responsible for *knowing* whether their\n # data has been scaled, but we make the FITS_rec class responsible for\n # actually doing the scaling\n # TODO: This also needs to be fixed in the effort to make Columns\n # responsible for scaling their arrays to/from FITS native values\n if not column.ascii and column.format.p_format:\n format_code = column.format.p_format\n else:\n # TODO: Rather than having this if/else it might be nice if the\n # ColumnFormat class had an attribute guaranteed to give the format\n # of actual values in a column regardless of whether the true\n # format is something like P or Q\n format_code = column.format.format\n\n if _number and (_scale or _zero) and not column._physical_values:\n # This is to handle pseudo unsigned ints in table columns\n # TODO: For now this only really works correctly for binary tables\n # Should it work for ASCII tables as well?\n if self._uint:\n if bzero == 2**15 and format_code == \"I\":\n field = np.array(field, dtype=np.uint16)\n elif bzero == 2**31 and format_code == \"J\":\n field = np.array(field, dtype=np.uint32)\n elif bzero == 2**63 and format_code == \"K\":\n field = np.array(field, dtype=np.uint64)\n bzero64 = np.uint64(2**63)\n else:\n field = np.array(field, dtype=np.float64)\n else:\n field = np.array(field, dtype=np.float64)\n\n if _scale:\n np.multiply(field, bscale, field)\n if _zero:\n if self._uint and format_code == \"K\":\n # There is a chance of overflow, so be careful\n test_overflow = field.copy()\n try:\n test_overflow += bzero64\n except OverflowError:\n warnings.warn(\n \"Overflow detected while applying TZERO{:d}. \"\n \"Returning unscaled data.\".format(indx + 1)\n )\n else:\n field = test_overflow\n else:\n field += bzero\n\n # mark the column as scaled\n column._physical_values = True\n\n elif _bool and field.dtype != bool:\n field = np.equal(field, ord(\"T\"))\n elif _str:\n if not self._character_as_bytes:\n with suppress(UnicodeDecodeError):\n field = decode_ascii(field)\n\n if dim and not isinstance(recformat, _FormatP):\n # Apply the new field item dimensions\n nitems = reduce(operator.mul, dim)\n if field.ndim > 1:\n field = field[:, :nitems]\n if _str:\n fmt = field.dtype.char\n dtype = (f\"|{fmt}{dim[-1]}\", dim[:-1])\n field.dtype = dtype\n else:\n field.shape = (field.shape[0],) + dim\n\n return field" ]
[ "0.698135", "0.6574161", "0.6371382", "0.6348573", "0.6335147", "0.61783874", "0.60554516", "0.60521567", "0.603357", "0.6014633", "0.5996646", "0.59819514", "0.59644175", "0.59415495", "0.58963", "0.58877915", "0.588751", "0.58810073", "0.5805037", "0.579967", "0.5769489", "0.57477087", "0.5743041", "0.573715", "0.5713382", "0.5709348", "0.5641281", "0.5637821", "0.56165063", "0.56138414" ]
0.7458724
0
Load the cdm tables into Panda DataFrames, reading the tables from the cdm GitHub page FF To do Uncomment to get the list of all the .csv files present at the url specified
def load_cdm_tables(): tpath = os.getcwd() + '/../data' cdmpath='https://raw.githubusercontent.com/glamod/common_data_model/master/tables/' # cdm tables """ Selecting the list of table definitions. Some of the entires do not have the corresponding implemented tables """ cdmtabledeflist=['id_scheme', 'crs', 'station_type', 'observed_variable', 'station_configuration', 'station_configuration_codes', 'observations_table', 'header_table', 'source_configuration', 'sensor_configuration', 'units' , 'z_coordinate_type'] cdm_tabdef = dict() for key in cdmtabledeflist: url='table_definitions'.join(cdmpath.split('tables'))+key+'.csv' # https://github.com/glamod/common_data_model/tree/master/table_definitions/ + ..._.dat f=urllib.request.urlopen(url) col_names=pd.read_csv(f, delimiter='\t',quoting=3,nrows=0,comment='#') f=urllib.request.urlopen(url) tdict={col: str for col in col_names} cdm_tabdef[key]=pd.read_csv(f,delimiter='\t',quoting=3,dtype=tdict,na_filter=False,comment='#') """ Selecting the list of tables. 'station_configuration_codes','observations_table','header_table' are not implemented in the CDM GitHub""" cdmtablelist=['id_scheme', 'crs', 'station_type', 'observed_variable', 'station_configuration_codes','units'] cdm_tab=dict() # dictionary where each key is the name of the cdm table, and the value is read from the .dat file for key in cdmtablelist: f=urllib.request.urlopen(cdmpath+key+'.dat') col_names=pd.read_csv(f,delimiter='\t',quoting=3,nrows=0) f=urllib.request.urlopen(cdmpath+key+'.dat') tdict={col: str for col in col_names} cdm_tab[key]=pd.read_csv(f,delimiter='\t',quoting=3,dtype=tdict,na_filter=False) """ Adding the tables that currently only have the definitions but not the implementation in the CDM, OR need extensions """ cdm_tabdef['header_table'] = pd.read_csv(tpath+'/table_definitions/header_table.csv',delimiter='\t',quoting=3,comment='#') #cdm_tabdef['observations_table'] = pd.read_csv(tpath+'/table_definitions/observations_table.csv',delimiter='\t',quoting=3,comment='#') id_scheme={ cdm_tabdef['id_scheme'].element_name.values[0]:[0,1,2,3,4,5,6], cdm_tabdef['id_scheme'].element_name.values[1]:['WMO Identifier','Volunteer Observing Ships network code', 'WBAN Identifier','ICAO call sign','CHUAN Identifier', 'WIGOS Identifier','Specially constructed Identifier']} cdm_tab['id_scheme']=pd.DataFrame(id_scheme) cdm_tab['crs']=pd.DataFrame({'crs':[0],'description':['wgs84']}) """ Here we add missing entries, e.g. in the z_coordinate_type for the pressure levels in Pascal (the available CDM table in the glamod GitHub rep. contains onle the altitude in [meter] """ cdm_tab['station_type']=pd.DataFrame({'type':[0,1],'description':['Radiosonde','Pilot']}) cdm_tab['z_coordinate_type']=pd.DataFrame({'type':[0,1],'description':['height (m) above sea level','pressure (Pa)']}) # only the m above sea level is available currently in the GitHub cdm table, added pressure """ Make dictionary of variables and attributes for the observations table """ dic_obstab_attributes = {} for index, row in cdm_tabdef['observations_table'].iterrows(): dic_obstab_attributes[row['element_name'] ] = {} dic_obstab_attributes[row['element_name'] ]['description'] = row.description dic_obstab_attributes[row['element_name'] ]['external_table'] = row.external_table #dic_obs['date_time'] = ['units', 'seconds since 1900-01-01 00:00:00' ] if not os.path.isfile('dic_obstab_attributes.npy'): np.save( 'dic_obstab_attributes' , dic_obstab_attributes ) """ for tab in ['observations_table' , 'header_table', 'sensor_configuration']: #for tab in cdm_tabdef: df = cdm_tabdef[tab] variable_type[tab] = {} for index,row in df.iterrows(): if 'kind' in df.columns: variable_type[tab][row.element_name ] = kinds[row.kind] else: rt=row.type if row.type=='timestamp': rt='timestamp with timezone' variable_type[tab][row.element_name ] = kinds[rt] variable_type['observations_table']['date_time'] = np.int64 """ return cdm_tabdef, cdm_tab, tdict , dic_obstab_attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_datasets_csv(url):\n # dataset = pd.read_csv(url, sep='\\t')\n dataset = pd.read_csv(url, sep=\",\")\n dataset.columns = dataset.columns.str.replace(\" \", \"_\")\n return dataset", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def read_csv_data(url):\n\n csv_data = pd.read_csv(url)\n\n return csv_data", "def fetch(url: str, cache: str) -> pd.DataFrame:\n r = requests.get(url)\n r.raise_for_status()\n datestamp = date.today().strftime('%Y%m%d')\n name = url.split('/')[-1].replace('.csv','')\n os.makedirs(cache, exist_ok=True)\n filename = os.path.join(cache, f\"{datestamp}_{name}.csv\")\n with open(filename, \"w\") as f:\n f.write(r.text)\n return pd.read_csv(filename)", "def create_df_from_remote_csv(url):\n if url is None:\n return None\n response = requests.get(url)\n if response.status_code == 200:\n if response.headers['content-type'] == \"text/csv\":\n response.encoding = 'utf-8'\n data = pd.read_csv(io.StringIO(response.text))\n return data\n else:\n print('Error. '\n 'The file is encoded using unsupported content-type {}'\n .format(response.headers['content-type']))\n else:\n print('Error. '\n 'The file could not be downloaded. Returned HTTP status code: {}'\n .format(response.status_code))\n\n return None", "def parse_tables_from_url(url, md_file):\n\n r = requests.get(url)\n parse_tables_from_html(r.text, md_file)", "def _download_to_df(url, table_name, year, month):\n # Insert the table_name, year and month into the url.\n url = url.format(table=table_name, year=year, month=str(month).zfill(2))\n # Download the file.\n r = requests.get(url)\n if r.status_code != 200:\n raise _MissingData((\"\"\"Requested data for table: {}, year: {}, month: {} \n not downloaded. Please check your internet connection. Also check\n http://nemweb.com.au/#mms-data-model, to see if your requested\n data is uploaded.\"\"\").format(table_name, year, month))\n # Convert the contents of the response into a zipfile object.\n zf = zipfile.ZipFile(io.BytesIO(r.content))\n # Get the name of the file inside the zip object, assuming only one file is zipped inside.\n file_name = zf.namelist()[0]\n # Read the file into a DataFrame.\n data = pd.read_csv(zf.open(file_name), skiprows=1)\n # Discard last row of DataFrame\n data = data[:-1]\n return data", "def fetch_dataset(url, pandas_impl=pandas):\n\n print(f'fetching dataset at {url}')\n return pandas_impl.read_csv(url)", "def get_dfs(npages=927):\n print(\"loading data\")\n try:\n os.makedirs('./data')\n except FileExistsError:\n pass\n\n def fp(pagenum): return './data/%s.csv' % pagenum\n\n dfs = (c(\n pagenum,\n get_page,\n parse_html_table(pagenum, fp(pagenum)),\n ) if not exists(fp(pagenum)) else pd.read_csv(fp(pagenum))\n for pagenum in range(1, npages)\n )\n\n df = pd.concat(dfs)\n return df", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def parse_page(url):\n page = requests.get(url)\n csv = re.subn(\"<br />\", \"\", page.content)[0]\n #print csv\n csvf = StringIO(csv)\n #print csvf.readline()\n df = pd.read_csv(csvf, sep=',', header=0, skiprows=0, parse_dates=True, na_values=[\"N/A\",'-9999'])\n\n if 'TemperatureF' in df.columns:\n df['TemperatureC'] = FtoC(df.TemperatureF)\n #print df\n summary = df.describe()\n return df, summary", "def _get_data(*, from_web: bool) -> pd.DataFrame:\n\n df = read_in_data.SaveFormats.CSV.read(from_web=from_web)\n return df", "def get_df(config_summary_url):\n return pd.read_csv(urlretrieve(config_summary_url)[0])", "def download_csv(url: str, checksum: str) -> pd.DataFrame:\n return pd.read_csv(io.StringIO(download(url, checksum).decode(\"utf-8\")))", "def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df", "def query_WDSC():\n url = \"http://www.astro.gsu.edu/wds/Webtextfiles/wdsnewframe.html\"\n df = pd.read_csv(url)\n return df", "def load_data(a_url, path, column_headers):\n print(\"Try loading the data set from a local file if it exists.\")\n try:\n data_set = pd.read_csv(path)\n except FileNotFoundError:\n print(\"File not found, loading from Internet and saving to disk...\")\n data_set = pd.read_csv(a_url, header=None)\n data_set.columns = column_headers\n data_set.to_csv(path, sep=\",\", index=False)\n\n return data_set", "def webscrap_links(source_list):\n # Read URLs into list\n urls = []\n with open(source_list, \"r\") as file:\n urls = [line for line in file]\n\n # Create Pandas DataFrame to store and save CSV data\n dl_files = pd.DataFrame(columns=[\"filename\", \"url\"])\n\n # Retrieve source HTML per URL and process\n for url in urls:\n source = str(urlopen(url).read())\n\n # Raise an error if there are two tables\n assert source.count(\"<tbody>\") == 1\n\n # Select subset of source\n source = source[int(source.find(\"<tbody>\")):int(source.find(\"</tbody>\"))]\n\n # Split table per item (discard first bit)\n source = source.split(\"<tr>\")[1:]\n\n # For each entry, strip and extract download url and name\n prefix = \"https://filer.net\"\n for item in source:\n # Extract download url\n dl_url = item[item.find(\"\\\"\")+1:]\n dl_url = dl_url[:dl_url.find(\"\\\"\")]\n dl_url = prefix + dl_url\n\n # Extract download file name\n name = item[item.find(\"\\\">\")+2:]\n name = name[:name.find(\"</a>\")]\n\n # Add link to Dat\n dl_files = dl_files.append({\"filename\": name, \"url\": dl_url}, ignore_index=True)\n\n # Save DataFrame to CSV file\n dl_files.to_csv(\"../data/file_links.csv\", index=False)\n \n return", "def download_dataset(url=DATASET_URL):\n df = pd.read_csv(url, index_col=0)\n \n # ディレクトリが無ければ,作成する\n if not os.path.isdir(BASE_DIR):\n os.makedirs(BASE_DIR)\n \n df.to_csv(LOCAL_FILE_NAME)", "def _fetch_data(url: str, d: datetime) -> pd.DataFrame:\n return pd.read_json(url)", "def fetch_csv_from_url(url):\n\t\n\t#cache avoidance.\n\twith requests_cache.disabled():\n\t\tr = requests.get(url)\n\t\tif r.status_code == 200:\n\t\t\treturn r.iter_lines()", "def fetch_dataset(data_root_dir):\n pattern = \"winemag_dataset_*.csv\"\n\n file_list = glob.glob(os.path.join(data_root_dir, pattern))\n\n df_list = [pd.read_csv(fname) for fname in file_list]\n\n full_df = pd.concat(df_list)\n\n # give unique row names to all\n full_df.index = range(full_df.shape[0])\n\n print(\"Dataset fetched.\")\n return full_df", "def scrape_group_df(url):\n res = req.get(url, headers={'user-agent': ua.random})\n # xpath not working for some reason... : /html/body/table[3]/tbody/tr[5]/td/table\n # tree = html.fromstring(res.content)\n # table = tree.xpath('//table')\n soup = bs(res.content, 'lxml')\n tables = soup.findAll('table')\n # 7th table for now\n data_table = tables[6]\n rows = data_table.findAll('tr')\n # labels = rows[0].text.split('\\n')[1:-1] # first and last are empty\n labels = [t.text for t in rows[0].findAll('td')]\n datadict = {}\n for r in rows[1:]:\n data = [t.text for t in r.findAll('td')]\n link = r.findAll('td')[1].find('a').attrs['href']\n datadict.setdefault('link', []).append('https://finviz.com/' + link)\n for l, d in zip(labels, data):\n datadict.setdefault(l, []).append(d)\n\n df = pd.DataFrame(datadict)\n\n abbrev_cols = ['Avg Volume', 'Market Cap', 'Volume']\n pct_cols = ['Change',\n 'Dividend',\n 'EPS next 5Y',\n 'EPS past 5Y',\n 'Float Short',\n 'Perf Half',\n 'Perf Month',\n 'Perf Quart',\n 'Perf Week',\n 'Perf YTD',\n 'Perf Year',\n 'Sales past 5Y']\n numeric_cols = ['Fwd P/E',\n 'P/B',\n 'P/C',\n 'P/E',\n 'P/FCF',\n 'P/S',\n 'PEG',\n 'Recom',\n 'Rel Volume']\n df[abbrev_cols] = df[abbrev_cols].applymap(clean_abbreviations)\n df[pct_cols] = df[pct_cols].applymap(clean_pcts)\n df[numeric_cols] = df[numeric_cols].astype('float')\n df['Stocks'] = df['Stocks'].astype('int')\n df.drop('No.', inplace=True, axis=1)\n\n return df", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def fetch(url, header_path, id, ip, dbase, targets_table):\n # url = 'http://esimbad/testGSAV7/reslabo?FENID=resLaboPatDitep&NIP={}' \\\n # '&STARTDATE={}&ENDDATE={}'\n\n # header_path = '~/workspace/data/biology/header.csv'\n # constant names specific to our database\n KEY1 = 'id'\n KEY2 = 'NIP'\n C1J1 = 'C1J1'\n\n header = pd.read_csv(header_path, sep=';', encoding='latin1').columns\n\n\n engine = get_engine(id, ip, dbase)\n\n df_ids = sql2df(engine, targets_table)[[KEY1, 'nip', C1J1]]\n df_ids.rename({'nip': KEY2}, inplace=True, axis=1)\n df_ids['patient_id'] = df_ids[KEY1]\n\n cols = [KEY2, 'Analyse', 'Resultat', 'Date prelvt']\n df_res = pd.DataFrame(data=None, columns=cols)\n\n for index, row in df_ids.iterrows():\n nip = row[KEY2].replace(' ', '')\n patient_id = row['patient_id']\n c1j1_date = row[C1J1].date()\n start_date = c1j1_date - timedelta(weeks=8)\n\n c1j1 = str(c1j1_date).replace('-', '')\n start = str(start_date).replace('-', '')\n\n req = requests.get(url.format(nip, start, c1j1))\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n new_df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n new_df = new_df.loc[:, cols + ['LC']] # remove LC\n\n # normalize nip\n new_df[KEY2] = row[KEY2]\n # new_df[KEY2] = new_df[KEY2].map(str)\n # new_df[KEY2] = [nip[:4] + '-' + nip[4:] for nip in new_df[KEY2]]\n\n new_df.drop('LC', axis=1, inplace=True)\n\n df_res = pd.concat([df_res, new_df], axis=0,\n sort=False, ignore_index=True)\n\n return df_res", "def get_data(data_basename: str = f'{data_folder}/data.csv') -> pd.DataFrame:\n data_path = file_path_relative(data_basename)\n if exists(data_path):\n logger.info(f'reading data from {data_path}')\n moon_data = pd.read_csv(data_path)\n return moon_data\n\n res = requests.get(data_url)\n soup = BeautifulSoup(res.content, features='html.parser')\n\n # get second table from wikipedia\n moon_table = soup.findAll('table', {'class': 'wikitable'})[1]\n # convert to dataframe\n moon_df = pd.read_html(str(moon_table))\n moon_df = pd.DataFrame(moon_df[0])\n\n # sanitize column names\n moon_df.columns = [_sanitize_column_name(\n col) for col in moon_df.columns.values.tolist()]\n\n # sanitize orbital period\n moon_df[orbital_period_column] = moon_df[orbital_period_column].str.replace(\n brackets_remove_regex, '').str.replace('−', '-').str.strip()\n moon_df[orbital_period_column] = pd.to_numeric(\n moon_df[orbital_period_column])\n # days to seconds\n moon_df[orbital_period_column] *= (24 * 60 * 60)\n\n # sanitize semi-major axis\n moon_df[semimajor_axis_column] = moon_df[semimajor_axis_column].str.replace(\n brackets_remove_regex, '').str.strip()\n moon_df[semimajor_axis_column] = pd.to_numeric(\n moon_df[semimajor_axis_column])\n # km to m\n moon_df[semimajor_axis_column] *= 1000\n\n # sanitize mass and sort by it\n mass_column_key: str = 'mass'\n moon_df[mass_column_key] = moon_df[mass_column_key].str.replace(\n '≈', '').str.strip()\n moon_df[mass_column_key] = pd.to_numeric(moon_df[mass_column_key])\n # to kg\n moon_df[mass_column_key] *= 1e16\n moon_df = moon_df.sort_values(by=[mass_column_key], ascending=False)\n\n moon_df.to_csv(data_path, index=False)\n return moon_df", "def load_records(dir):\n\n\t# I saved all the WoS full records for 'machine learning'\n\tfiles =os.listdir(dir)\n\tdf =pd.concat([pd.read_table(df, sep='\\t',index_col = False) for df in [dir+f for f in files]])\n\tdf = df.drop_duplicates()\n\n\t#fix index\n\tindex = range(0, df.shape[0])\n\tdf.index = index\n\n\t#to get all cited refs\n\tcited_refs = [set(re.split(pattern='; ', string=str(ref).lower().lstrip().rstrip())) for ref in df.CR]\n\n\t# add as column to dataframe\n\tdf['cited_refs'] = cited_refs\n\n\t# normalise authors\n\tdf.au = [str(au).lower().lstrip().rstrip() for au in df.AF]\n\n\treturn df", "def download_global_csv(output_dir: str):\n for filename, url_path in CSVS_TO_READ:\n url = urljoin(GITHUB_BASE_URL, url_path)\n path = os.path.join(output_dir, filename)\n df = pd.read_csv(url)\n df.to_csv(path)", "def get_news():\n # empty dataframe\n df = pd.DataFrame() \n # read each url in list\n for url in inshorts_urls(): \n # add each dataframe of cards to df\n df = pd.concat([df, get_article(url)])\n # return all urls' cards\n return df", "def import_tables(file, pages):\n tables = camelot.read_pdf(\n file, pages=pages,\n flavor='stream',\n )\n return tables" ]
[ "0.71734565", "0.67320985", "0.6669976", "0.6588941", "0.65831953", "0.64777106", "0.64169997", "0.63222474", "0.63173807", "0.6298204", "0.62980694", "0.62220365", "0.6213207", "0.6168551", "0.61399347", "0.61185586", "0.60854256", "0.6081547", "0.6046216", "0.6012291", "0.59838545", "0.59662586", "0.59540415", "0.5914416", "0.59019446", "0.58975416", "0.58781654", "0.58464724", "0.5798998", "0.5790562" ]
0.7079996
1
Return a list of csv files, as fond in the url on the cdm GitHub
def csvListFromUrls(url=''): urlpath = urlopen(url) string = urlpath.read().decode('utf-8') split = string.split(' ') csv_files_list = [m.replace('"','') for m in [n.split('title="')[1] for n in split if '.csv' in n and "title" in n] ] return csv_files_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_csv_files():\n # See README.txt Ref#2.\n return [filename for filename in glob.glob(\"*.csv\")]", "def clowder_dataset_filelist(session, url, dataset):\n try:\n ret = session.get(posixpath.join(url, \"api/datasets\", dataset, \"listFiles\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret", "def download_global_csv(output_dir: str):\n for filename, url_path in CSVS_TO_READ:\n url = urljoin(GITHUB_BASE_URL, url_path)\n path = os.path.join(output_dir, filename)\n df = pd.read_csv(url)\n df.to_csv(path)", "def fetch_csv_from_url(url):\n\t\n\t#cache avoidance.\n\twith requests_cache.disabled():\n\t\tr = requests.get(url)\n\t\tif r.status_code == 200:\n\t\t\treturn r.iter_lines()", "def get_csv(self):\n all_csvs = [each for each in listdir(self.cur_dir) if each.endswith('.csv')]\n return all_csvs", "def get_allbud_urls(filename, columns):\n import pandas as pd\n import os\n path = os.path.join(os.getcwd(), filename)\n\n\n data = pd.read_csv(path)\n\n return data[columns].tolist()", "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def list_file(csv_directory):\n list_of_files = [os.path.join(dirpath, file_name)\n for dirpath, dirnames, files in os.walk(csv_directory)\n for file_name in fnmatch.filter(files, '*.csv')]\n return list_of_files", "def download_datasets_csv(url):\n # dataset = pd.read_csv(url, sep='\\t')\n dataset = pd.read_csv(url, sep=\",\")\n dataset.columns = dataset.columns.str.replace(\" \", \"_\")\n return dataset", "def list_files(path=None):\n if path == None:\n return glob.glob('Data/*.csv')\n else:\n return glob.glob(path+'*.csv')", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def get_csv():\n with requests.Session() as s:\n download = s.get(CSV_URL)\n decoded_content = download.content.decode('utf-8')\n cr = csv.reader(decoded_content.splitlines(), delimiter=',')\n my_list = list(cr)\n return [row[2] for row in my_list[1:]]", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def get_url_data(csv_file_path: str):\n with open(csv_file_path, \"r\", encoding=\"latin-1\") as url_records:\n for url_records in csv.reader(url_records):\n yield url_records", "def read_csv_data(url):\n\n csv_data = pd.read_csv(url)\n\n return csv_data", "def get_csv_rows(self):\n log.info(\"Downloading CSV data.\")\n link = self._driver.find_element_by_css_selector(self.CsvLinkSelector)\n href = link.get_attribute(\"href\")\n\n # In the latest version of requests, I think it's possible\n # to \"with\" a response object in order to guarantee cleanup,\n # but that wasn't working with the version we currently use.\n # Hence, the try/finally idiom.\n response = requests.get(href, stream=True)\n try:\n # Assumption: The file is utf-8 encoded\n resp_reader = codecs.iterdecode(response.iter_lines(), \"utf-8\")\n csv_reader = csv.reader(resp_reader)\n for row in csv_reader:\n yield row\n finally:\n if response is not None:\n response.close()", "def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()", "def data_import_links(self):\n dirpath = os.path.join(config[\"src_dir\"], config[\"data_subdir\"])\n assert os.path.exists(dirpath), f\"- data subdirectory {dirpath} was not found\"\n data = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))\n and not f.startswith('.') and f.endswith('.csv') or f.endswith('.txt')]\n data = filter(lambda f: any([re.search(f, cell.source) for cell in self.content.cells]), data)\n return [(os.path.join(config[\"data_subdir\"], f), f\"{config['github_pages_url']}/data/{f}\") for f in data]", "def download(csvpath, asset_manager_id, data_id_type, data_id_list):\n interface = interface_direct_csvpath(csvpath)\n logging.config.dictConfig(DEFAULT_LOGGING)\n logger = logging.getLogger(__name__)\n objs = []\n for data_id in data_id_list:\n Dict = dict()\n Dict[data_id_type] = data_id\n objs.append(interface.retrieve(asset_manager_id=asset_manager_id, **Dict))\n return objs", "def list_apiscout_csv(self, destination):\n res = self.__make_api_call('list/apiscout/csv', raw=True)\n with open(destination, \"wb\") as csvfile:\n csvfile.write(res)", "def csv_download_view(request):\n logging.info(\" CSV file download is working\")\n now = datetime.now()\n timestamp = now.strftime(\"%Y_%m_%d\")\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"results_' + \\\n GLOBAL_VARIABLE.get_host_name()+'_'+timestamp+'.csv\"'\n\n writer = csv.writer(response)\n list_of_cd = list(GLOBAL_VARIABLE.get_current_data())\n\n for i in range(10):\n rows = [sub_list[i] for sub_list in list_of_cd]\n writer.writerow(rows)\n\n return response", "def get_csv(url):\n try:\n csv_data = requests.get(url)\n csv = csv_data.text\n csv.strip()\n rows = csv.split('\\n')\n # Delete first row to remove headers\n del rows[0]\n data_list = []\n for row in rows:\n if row:\n columns = row.split(',')\n # Delete the following columns: Open,High,Low,Close\n del columns[1:5]\n columns[1] = float(columns[1])\n columns[2] = float(columns[2])\n data_list.append(columns)\n return data_list\n\n except Exception as e:\n print(f\"Error when trying to read {url}.\\n Cannot return anything\\n{e}\")\n return None", "def user_list_csv():\n us = user.User.query.all()\n filename = 'xxx.csv'\n csv_name = _rename_file(filename)\n url = app.config['CSV_FILES_DEST'] + '/' + csv_name\n with codecs.open(url, 'wb') as csvfile:\n #fieldnames = ['账号', '姓名', '描述', '角色', '邮箱', '电话', '工作电话', '公司', '部门', '职位']\n fieldnames = []\n if len(us) > 0:\n fieldnames = us[0].to_csv_dict().keys()\n writer = unicodecsv.writer(csvfile, encoding='utf-8-sig')\n writer.writerow(fieldnames)\n for u in us:\n dct = u.to_csv_dict()\n n_items = {}\n for name in fieldnames:\n if dct[name] is not None:\n n_items[name] = dct[name]\n else:\n n_items[name] = ''\n writer.writerow(n_items.values())\n return send_file(url)", "def webscrap_links(source_list):\n # Read URLs into list\n urls = []\n with open(source_list, \"r\") as file:\n urls = [line for line in file]\n\n # Create Pandas DataFrame to store and save CSV data\n dl_files = pd.DataFrame(columns=[\"filename\", \"url\"])\n\n # Retrieve source HTML per URL and process\n for url in urls:\n source = str(urlopen(url).read())\n\n # Raise an error if there are two tables\n assert source.count(\"<tbody>\") == 1\n\n # Select subset of source\n source = source[int(source.find(\"<tbody>\")):int(source.find(\"</tbody>\"))]\n\n # Split table per item (discard first bit)\n source = source.split(\"<tr>\")[1:]\n\n # For each entry, strip and extract download url and name\n prefix = \"https://filer.net\"\n for item in source:\n # Extract download url\n dl_url = item[item.find(\"\\\"\")+1:]\n dl_url = dl_url[:dl_url.find(\"\\\"\")]\n dl_url = prefix + dl_url\n\n # Extract download file name\n name = item[item.find(\"\\\">\")+2:]\n name = name[:name.find(\"</a>\")]\n\n # Add link to Dat\n dl_files = dl_files.append({\"filename\": name, \"url\": dl_url}, ignore_index=True)\n\n # Save DataFrame to CSV file\n dl_files.to_csv(\"../data/file_links.csv\", index=False)\n \n return", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def update_csv():\n return os.listdir('./data')", "def parse_file_list(self, file_path=None, file_name_id='Producer Granule ID', url_id='Online Access URLs'):\n\n # read in and maintain the raw csv file as df\n df = pd.read_csv(file_path)\n\n # record the number of files\n self.file_num = df.__len__()\n\n # initiate the data frame\n self.file_list = pd.DataFrame()\n self.file_list['download_dir'] = np.NaN\n self.file_list['file_name'] = df[file_name_id]\n self.file_list['online_url'] = df[url_id]\n self.file_list['status'] = 0\n self.file_list['year'] = 0\n self.file_list['day'] = 0\n self.file_list = self.file_list.reset_index(drop=True)\n\n # clean up the variables for a file list downloaded from Reverb\n # extract http urls from the file list\n print(\"Extracting http urls from the file list...\")\n self.file_list['online_url'] = self.file_list['online_url'].str.rstrip(\"\\'\").str.split(',').str[1]\n self.file_list['year'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 7]\n self.file_list['day'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 8]\n self.file_list['download_dir'] = self.download_dir + self.file_list['year'] + '/' + self.file_list['day'] + '/'", "def csvp(startingPath, csv_ext='.csv'):\n print 'walking up path=', startingPath\n csvfn = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(startingPath)\n for filename in filenames if filename.endswith(csv_ext)]\n print 'list is ', len(csvfn), ' images long'\n print 'starting with', csvfn[0]\n print 'ending with', csvfn[-1]\n return csvfn", "def download_examples(request):\n\n file_required = request.GET.get('token',None)\n path = ''\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n if file_required == 'reports':\n path = os.path.join(workpath, './static/examples/report.csv')\n\n elif file_required == 'concepts':\n path = os.path.join(workpath, './static/examples/concept.csv')\n\n elif file_required == 'labels':\n path = os.path.join(workpath, './static/examples/labels.csv')\n\n elif file_required == 'pubmed':\n path = os.path.join(workpath, './static/examples/pubmed.csv')\n\n content = open(path,'r')\n return HttpResponse(content, content_type='text/csv')", "def get_urls_and_paths():\n # Array to store tuples in (url, path) format.\n urls_and_paths = []\n\n for file_name in os.listdir('tickers'):\n # Sanity check. Only use text files.\n if file_name.endswith('.txt'):\n # Create a folder for each group (each txt file is a group)\n group = os.path.splitext(file_name)[0]\n\n # Create the folder for storing stock price data.\n os.makedirs('data/' + group)\n\n # Open the file.\n input_file = open('tickers/' + file_name)\n\n # For each line (stock), create the GET URL and store the save location.\n for line in input_file.read().splitlines():\n urls_and_paths.append((\n 'https:/www.wsj.com/market-data/quotes/' + line + '/historical-prices/download?num_rows=100000000000000&range_days=100000000000000&startDate=01/01/1970&endDate=01/01/2040',\n 'data/' + group + '/' + line.split('/')[-1] + '.csv'\n ))\n\n return urls_and_paths" ]
[ "0.71335447", "0.6901352", "0.6822499", "0.67910224", "0.6789945", "0.6654213", "0.66047746", "0.65839416", "0.6547043", "0.64927316", "0.6466528", "0.64494634", "0.6361783", "0.63590306", "0.6329721", "0.63262", "0.63022965", "0.6283138", "0.6280837", "0.62568825", "0.6256324", "0.6209753", "0.620768", "0.6130061", "0.61292255", "0.6095596", "0.6056445", "0.604111", "0.6036323", "0.6034377" ]
0.74914557
0
Replace wrong characters from the station configuration tables
def clean_station_configuration(cdm_tab ): subs={'o':[240,242,243,244,245,246,248],'O':[210,211,212,213,214,216], 'a':[224,225,226,227,228,229,230],'A':[192,193,194,195,196,197,198], 'u':[249,250,251,252,253],'U':[217,218,219,220], 'i':[236,237,238,239],'I':[204,205,206,207,304], 'S':[350],'n':[241],'c':[231],'C':[199],'e':[232,233,234,235],'E':[200,201,202,203]} for k in cdm_tab['station_configuration'].columns: if type(cdm_tab['station_configuration'][k][0]) is str: try: cdm_tab['station_configuration'][k].values[:]=cdm_tab['station_configuration'][k].values[:].astype('S') except: for l in range(cdm_tab['station_configuration'][k].values.shape[0]): try: cdm_tab['station_configuration'][k].values[l]=numpy.string_(cdm_tab['station_configuration'][k].values[l]) except: for m in range(len(cdm_tab['station_configuration'][k].values[l])): mychar=cdm_tab['station_configuration'][k].values[l][m] if ord(mychar)>128: for n,v in subs.items(): if ord(mychar) in v: cdm_tab['station_configuration'][k].values[l]=n.join(cdm_tab['station_configuration'][k].values[l].split(mychar)) cdm_tab['station_configuration'][k].values[l]=numpy.string_( (cdm_tab['station_configuration'][k].values[l] ).encode('utf-8') ) cdm_tab['station_configuration'][k]=numpy.string_(cdm_tab['station_configuration'][k]) print('Cleaned station_configuration')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _strip_invalid_characters(self: object) -> None:\n for current_invalid_character in Episode._invalid_characters:\n self.episode_broadcast = self.episode_broadcast.replace(current_invalid_character, \" \").strip()\n self.episode_inspectors = self.episode_inspectors.replace(current_invalid_character, \" \").strip()\n self.episode_name = self.episode_name.replace(current_invalid_character, \" \").strip()\n self.episode_sequence = self.episode_sequence.replace(current_invalid_character, \"-\").strip()", "def cleaner(self, w_old):\n w_new = re.sub('[\\(\\)]', '', w_old)\n w_new = re.sub('[^А-Яа-яЁё ]', 'ъ', w_new)\n w_new = re.sub(' ', ' ', w_new)\n return w_new", "def strip_other_charcter():\n pass", "def old_strip_sql(data, sap_stat=True):\r\n tab_fixs = [[\"PCOGIS.SDE.\", ''],\r\n [\"Auxiliary Equipment\", \"AUXILLARYEQUIPMENT\"]]\r\n for old_str, new_str in tab_fixs:\r\n data['TABLE'] = data['TABLE'].str.replace(old_str, new_str)\r\n data = data.dropna(subset=['COLUMN'])\r\n bad_atts = [\" \", \"SHAPE_Length\", \"HVFUSES\", \"LVFUSES\", \"SHAPE_Area\",\r\n \"ACTUALLENGTH\", \"DECOMMISSIONINGDATE\", \"DECOMMISSIONINGREASON\"]\r\n data = data[~data['COLUMN'].isin(bad_atts)]\r\n bad_tab_atts = [['SWITCHUNIT$', 'INTERRUPTINGMEDIUM$'],\r\n ['DistributionMain$', '^CROSSINGID$'],\r\n ['DistributionMain$', '^MOUNTINGTYPE$'],\r\n ['DistributionMain$', '^MOUNTINGPOSITION$']]\r\n for tab_str, att_str in bad_tab_atts:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str))]\r\n bad_doubles = [['Regulator$', '^SUBTYPECD$', 'y'],\r\n ['RegulatorStation$', '^EQUIPMENTID$', 'N'],\r\n ['SurfaceStructure$', '^APPLICATION$', 'N'],\r\n ['SurfaceStructure$', '^ENTRY$', 'N'],\r\n ['SurfaceStructure$', '^FACILITYID$', 'N'],\r\n ['SurfaceStructure$', '^MANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', '^MATERIAL$', 'N'],\r\n ['SurfaceStructure$', '^MODEL$', 'N'],\r\n ['SurfaceStructure$', '^STRUCTURESIZE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', '^BATTERYAMPERAGEHOURS$', 'N'],\r\n ['COMMSPOWERSUPPLY$', '^BATTERYCOUNT$', 'N']]\r\n if sap_stat is True:\r\n for tab_str, att_str, sap_str in bad_doubles:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].str.match(sap_str))]\r\n bad_null = [['SurfaceStructure$', '^ENCLOSURE$'],\r\n ['SurfaceStructure$', '^ENCLOSURETYPE$'],\r\n ['SurfaceStructure$', '^ENCLOSUREMANUFACTURER$']]\r\n for tab_str, att_str in bad_null:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].isnull())]\r\n return data", "def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()", "def replace_characters(self, translation_table):\n def char2ord(c):\n return ord(c) if isinstance(c, str) else c\n\n translation_table = {char2ord(c): char2ord(r) for c, r in translation_table.items()}\n\n new_docs = {}\n for dl, dt in self.docs.items():\n new_docs[dl] = dt.translate(translation_table)\n self.docs = new_docs\n\n return self", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def strip_sql(data, sap_stat=True):\r\n tab_fixs = [[\"PCOGIS.SDE.\", ''],\r\n [\"Auxiliary Equipment\", \"AUXILLARYEQUIPMENT\"]]\r\n for old_str, new_str in tab_fixs:\r\n data['TABLE'] = data['TABLE'].str.replace(old_str, new_str)\r\n data = data.dropna(subset=['COLUMN'])\r\n bad_atts = [\" \", \"SHAPE_Length\", \"HVFUSES\", \"LVFUSES\", \"SHAPE_Area\",\r\n \"None\", \"ACTUALLENGTH\", \"DECOMMISSIONINGDATE\",\r\n \"DECOMMISSIONINGREASON\", 'LOTS YET TO ADD']\r\n data = data[~data['COLUMN'].isin(bad_atts)]\r\n bad_tabs = ['LocationAttributes', 'CustomerConnections', 'TBD']\r\n data = data[~data['TABLE'].isin(bad_tabs)]\r\n bad_tab_atts = [['SWITCHUNIT$', 'INTERRUPTINGMEDIUM$'],\r\n ['DistributionMain$', 'CROSSINGID$'],\r\n ['DistributionMain$', 'MOUNTINGTYPE$'],\r\n ['DistributionMain$', 'MOUNTINGPOSITION$']]\r\n for tab_str, att_str in bad_tab_atts:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str))]\r\n bad_doubles = [['Regulator$', 'SUBTYPECD$', 'y'],\r\n ['RegulatorStation$', 'EQUIPMENTID$', 'N'],\r\n ['SurfaceStructure$', 'APPLICATION$', 'N'],\r\n ['SurfaceStructure$', 'ENTRY$', 'N'],\r\n ['SurfaceStructure$', 'FACILITYID$', 'N'],\r\n ['SurfaceStructure$', 'MANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', 'MATERIAL$', 'N'],\r\n ['SurfaceStructure$', 'MODEL$', 'N'],\r\n ['SurfaceStructure$', 'STRUCTURESIZE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYAMPERAGEHOURS$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYCOUNT$', 'N'],\r\n ['PillarPoint$', 'DATEMANUFACTURED$', 'TBC'],\r\n ['PillarPoint$', 'FACILITYID$', 'TBC'],\r\n ['PillarPoint$', 'FEEDERID$', 'TBC'],\r\n ['PillarPoint$', 'NUMBEROFUSEDCIRCUITS$', 'TBC'],\r\n ['PillarPoint$', 'SUBTYPECD$', 'N'],\r\n ['PillarPoint$', 'TOTALNUMBEROFCIRCUITS$', 'TBC'],\r\n ['PillarPoint$', 'TRUENZMGPOS$', 'N'],\r\n ['SupportStructure$', 'HIGHESTVOLTAGE$', 'N'],\r\n ['SurfaceStructure$', 'ASSETFUNCTION$', 'N'],\r\n ['SurfaceStructure$', 'ENCLOSUREMANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', 'ENCLOSURETYPE$', 'N'],\r\n ['SurfaceStructure$', 'GLOBALID$', 'N'],\r\n ['SurfaceStructure$', 'STREETNAME$', 'N'],\r\n ['SurfaceStructure$', 'STREETNO$', 'N'],\r\n ['SurfaceStructure$', 'SUBURB$', 'N'],\r\n ['SurfaceStructure$', 'SYMBOLROTATION$', 'N'],\r\n ['SurfaceStructure$', 'TOWN$', 'N'],\r\n ['Switch$', 'FACILITYID$', 'N'],\r\n ['Switch$', 'FEEDERID$', 'N'],\r\n ['Switch$', 'FEEDERID2$', 'N'],\r\n ['Switch$', 'GEONETFEEDERCODE$', 'N'],\r\n ['Switch$', 'GLOBALID$', 'N'],\r\n ['Switch$', 'GROUNDEDINDICATOR$', 'N'],\r\n ['Switch$', 'INSTALLATIONDATE$', 'N'],\r\n ['Switch$', 'MOUNTING$', 'N'],\r\n ['Switch$', 'NORMALPOSITION$', 'N'],\r\n ['Switch$', 'NUMPHASES$', 'N'],\r\n ['Switch$', 'OPERATINGVOLTAGE$', 'N'],\r\n ['Switch$', 'OUTOFORDERINDICATOR$', 'N'],\r\n ['Switch$', 'REFERENCE$', 'N'],\r\n ['Switch$', 'REMOTECONTROLLED$', 'N'],\r\n ['Switch$', 'REMOTEINDICATION$', 'N'],\r\n ['Switch$', 'RETICULATION$', 'N'],\r\n ['Switch$', 'SITEID$', 'N'],\r\n ['Switch$', 'STREETNAME$', 'N'],\r\n ['Switch$', 'STREETNO$', 'N'],\r\n ['Switch$', 'SUBTYPECD$', 'N'],\r\n ['Switch$', 'SUBURB$', 'N'],\r\n ['Switch$', 'SYMBOLROTATION$', 'N'],\r\n ['Switch$', 'TOWN$', 'N'],\r\n ['Switch$', 'WORKORDERID$', 'N'],\r\n ['SWITCHUNIT$', 'ARCQUENCHING$', 'N'],\r\n ['SWITCHUNIT$', 'C_INTJDEID$', 'N'],\r\n ['SWITCHUNIT$', 'COMMENTS$', 'N'],\r\n ['SWITCHUNIT$', 'DATEMANUFACTURED$', 'N'],\r\n ['SWITCHUNIT$', 'DATEPURCHASED$', 'N'],\r\n ['SWITCHUNIT$', 'INSTALLATIONDATE$', 'N'],\r\n ['SWITCHUNIT$', 'INSULATIONMEDIUM$', 'N'],\r\n ['SWITCHUNIT$', 'LOADBREAKINGCAPACITY$', 'N'],\r\n ['SWITCHUNIT$', 'MANUFACTURER$', 'N'],\r\n ['SWITCHUNIT$', 'MODEL$', 'N'],\r\n ['SWITCHUNIT$', 'NORMALCURRENTRATING$', 'N'],\r\n ['SWITCHUNIT$', 'NUMPHASES$', 'N'],\r\n ['SWITCHUNIT$', 'OWNER$', 'N'],\r\n ['SWITCHUNIT$', 'REFERENCE$', 'N'],\r\n ['SWITCHUNIT$', 'SERIALNUMBER$', 'N'],\r\n ['SWITCHUNIT$', 'VISUALEARTHINDICATOR$', 'N'],\r\n ['SWITCHUNIT$', 'VOLTAGERATING$', 'N'],\r\n ['SWITCHUNIT$', 'WORKORDERID$', 'N'],\r\n ['UndergroundStructure$', 'C_INTJDEID$', 'N'],\r\n ['UndergroundStructure$', 'COMMENTS$', 'N'],\r\n ['UndergroundStructure$', 'FACILITYID$', 'N'],\r\n ['UndergroundStructure$', 'FEEDERID$', 'N'],\r\n ['UndergroundStructure$', 'GLOBALID$', 'N'],\r\n ['UndergroundStructure$', 'HIGHESTVOLTAGE$', 'N'],\r\n ['UndergroundStructure$', 'INSTALLATIONDATE$', 'N'],\r\n ['UndergroundStructure$', 'OUTOFORDERINDICATOR$', 'N'],\r\n ['UndergroundStructure$', 'OWNER$', 'N'],\r\n ['UndergroundStructure$', 'REFERENCE$', 'N'],\r\n ['UndergroundStructure$', 'STREETNAME$', 'N'],\r\n ['UndergroundStructure$', 'STREETNO$', 'N'],\r\n ['UndergroundStructure$', 'SUBURB$', 'N'],\r\n ['UndergroundStructure$', 'SYMBOLROTATION$', 'N'],\r\n ['UndergroundStructure$', 'TOWN$', 'N'],\r\n ['UndergroundStructure$', 'WORKORDERID$', 'N'],\r\n ['Fuse$', 'INSTALLATIONDATE$', 'N'],\r\n ['Ground$', 'BELOWGROUNDCONNECTION$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE2$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'COOLINGTYPE3$', 'TBD'],\r\n ['POWERTRANSFORMERUNIT$', 'CTBURDENVA$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTCLASS$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTQUANTITY$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'CTRATIO$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCE2$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCE3$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'IMPEDANCEZ0$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA2$', 'N'],\r\n ['POWERTRANSFORMERUNIT$', 'RATEDMVA3$', 'N'],\r\n ['AUXILLARYEQUIPMENT$', 'MANUFACTURER$', 'N'],\r\n ['AUXILLARYEQUIPMENT$', 'MODEL$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'BATTERYTYPE$', 'N'],\r\n ['SupportStructure$', 'FUNCTION_$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'GENERATORFUELTYPE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'HOURSOFSUPPLY$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'PARALELLCOUNT$', 'N'],\r\n ['COMMSPOWERSUPPLY$', 'PARALELLCOUNT$', 'TBD'],\r\n ['COMMSPOWERSUPPLY$', 'SYSTEMVOLTAGE$', 'TBD'],\r\n ['SurfaceStructure$', 'TRUENZMGPOS$', 'N'],\r\n ['SupportStructure$', 'ABSOLUTE$', 'N'],\r\n ['DISTTRANSFUSEUNIT$', 'VOLTAGERATING$', 'N'],\r\n ['DISTTRANSFUSEUNIT$', 'WORKORDERID$', 'N'],\r\n ['SupportStructure$', 'FEEDERID$', 'TBC'],\r\n ['SupportStructure$', 'SHAPE$', ' N'],\r\n ['SupportStructure$', 'SUBTYPECD$', 'TBD'],\r\n ['SupportStructure$', 'TREATMENTTYPE$', 'N'],\r\n ['SupportStructure$', 'TRUENZMG$', 'N'],\r\n ['SupportStructure$', 'TYPEOFTOP$', 'N'],\r\n ['SupportStructure$', 'USAGETYPE$', 'N']]\r\n if sap_stat is True:\r\n for tab_str, att_str, sap_str in bad_doubles:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].str.match(sap_str))]\r\n bad_null = [['SurfaceStructure$', 'ENCLOSURE$'],\r\n ['SurfaceStructure$', 'ENCLOSUREMANUFACTURER$'],\r\n ['SurfaceStructure$', 'ENCLOSURETYPE$'],\r\n ['Fuse$', 'ACCURACY$'],\r\n ['Fuse$', 'ANCILLARYROLE$'],\r\n ['Fuse$', 'ASSETFUNCTION$'],\r\n ['Fuse$', 'C_INTJDEID$'],\r\n ['Fuse$', 'COMMENTS$'],\r\n ['Fuse$', 'CREATIONUSER$'],\r\n ['Fuse$', 'DATECREATED$'],\r\n ['Fuse$', 'DATEMODIFIED$'],\r\n ['Fuse$', 'DEVICETYPE$'],\r\n ['Fuse$', 'ELECTRICTRACEWEIGHT$'],\r\n ['Fuse$', 'ENABLED$'],\r\n ['Fuse$', 'FACILITYID$'],\r\n ['Fuse$', 'FEEDERID$'],\r\n ['Fuse$', 'FEEDERID2$'],\r\n ['Fuse$', 'FEEDERINFO$'],\r\n ['Fuse$', 'GEONETFEEDERCODE$'],\r\n ['Fuse$', 'GEONETFEEDERID$'],\r\n ['Fuse$', 'GEONETSUBSTATION$'],\r\n ['Fuse$', 'GLOBALID$'],\r\n ['Fuse$', 'INSTALLEDBY$'],\r\n ['Fuse$', 'LABELTEXT$'],\r\n ['Fuse$', 'LASTUSER$'],\r\n ['Fuse$', 'MANUFACTURER$'],\r\n ['Fuse$', 'MAXCONTINUOUSCURRENT$'],\r\n ['Fuse$', 'MAXINTERRUPTINGCURRENT$'],\r\n ['Fuse$', 'MAXOPERATINGVOLTAGE$'],\r\n ['Fuse$', 'MOUNTING$'],\r\n ['Fuse$', 'NOMINALVOLTAGE$'],\r\n ['Fuse$', 'NORMALPOSITION$'],\r\n ['Fuse$', 'NUMPHASES$'],\r\n ['Fuse$', 'OBJECTID$'],\r\n ['Fuse$', 'OPERATINGVOLTAGE$'],\r\n ['Fuse$', 'OUTOFORDERINDICATOR$'],\r\n ['Fuse$', 'OWNER$'],\r\n ['Fuse$', 'PARENTID$'],\r\n ['Fuse$', 'PHASEDESIGNATION$'],\r\n ['Fuse$', 'PREMISE$'],\r\n ['Fuse$', 'PRESENTPOSITION$'],\r\n ['Fuse$', 'RDB_UFID$'],\r\n ['Fuse$', 'REFERENCE$'],\r\n ['Fuse$', 'REMOTECONTROLLED$'],\r\n ['Fuse$', 'REMOTEINDICATION$'],\r\n ['Fuse$', 'RETICULATION$'],\r\n ['Fuse$', 'SCADACONTROLMECHANISM$'],\r\n ['Fuse$', 'SCADACONTROLTYPE$'],\r\n ['Fuse$', 'SCADAPTID$'],\r\n ['Fuse$', 'SHAPE$'],\r\n ['Fuse$', 'SITEID$'],\r\n ['Fuse$', 'STREETNAME$'],\r\n ['Fuse$', 'STREETNO$'],\r\n ['Fuse$', 'SUBTYPECD$'],\r\n ['Fuse$', 'SUBURB$'],\r\n ['Fuse$', 'SYMBOLROTATION$'],\r\n ['Fuse$', 'TIMESTAMP$'],\r\n ['Fuse$', 'TOWN$'],\r\n ['Fuse$', 'TYPE$'],\r\n ['Fuse$', 'WORKORDERID$'],\r\n ['Fuse$', 'ZONE$']]\r\n for tab_str, att_str in bad_null:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].isnull())]\r\n return data", "def canon_station_name(s, line):\n s = s.strip()\n s = re.sub('^Heathrow$', 'Heathrow Terminals 1, 2, 3', s)\n s = re.sub('^Olympia$', 'Kensington (Olympia)', s)\n s = re.sub('^Warwick Ave$', 'Warwick Avenue', s)\n s = re.sub('^Camden$', 'Camden Town', s)\n s = re.sub('^Central$', 'Finchley Central', s) # They say \"Between Central and East Finchley\"\n s = re.sub('\\s*Platform \\d$', '', s)\n s = s + ' Station'\n s = s.replace('(Bakerloo)', 'Bakerloo').replace('Earls', 'Earl\\'s') \\\n .replace(' fast ', ' ') \\\n .replace('\\xe2\\x80\\x99', \"'\") \\\n .replace('St ', 'St. ') \\\n .replace('Elephant and Castle', 'Elephant &amp; Castle') \\\n .replace('Lambeth Station', 'Lambeth North Station') \\\n .replace('Chalfont Station', 'Chalfont &amp; Latimer Station') \\\n .replace('West Brompon', 'West Brompton') \\\n .replace('Picadilly Circus', 'Piccadilly Circus') \\\n .replace('High Barent', 'High Barnet') \\\n .replace('Bartnet', 'Barnet') \\\n .replace('Faringdon', 'Farringdon') \\\n .replace('Turnham Greens', 'Turnham Green') \\\n .replace('Ruilsip', 'Ruislip') \\\n .replace('Dagemham', 'Dagenham') \\\n .replace('Edgware Road (H &amp; C)', 'Edgware Road Circle') \\\n .replace('Hammersmith (Circle and H&amp;C)', 'Hammersmith') \\\n .replace('Shepherds Bush (Central Line)', \"Shepherd's Bush\") \\\n .replace('Terminals 123', 'Terminals 1, 2, 3').replace('Terminal 1,2,3', 'Terminals 1, 2, 3') \\\n .replace('Woodford Junction', 'Woodford') \\\n .replace(\"King's Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace(\"Kings Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace('Central Finchley', 'Finchley Central').replace('District and Picc', 'D &amp; P') \\\n .replace('South Fields', 'Southfields') \\\n .replace('Regents Park', \"Regent's Park\") \\\n .replace('Bromley-by-Bow', \"Bromley-By-Bow\") \\\n .replace('Brent Oak', 'Burnt Oak') \\\n .replace('St. Johns Wood', \"St. John's Wood\") \\\n .replace('Totteridge and Whetstone', 'Totteridge &amp; Whetstone') \\\n .replace('Newbury Park Loop', 'Newbury Park') \\\n .replace('Harrow-on-the-Hill', 'Harrow on the Hill')\n if s == 'Edgware Road Station' and line == 'B':\n s = 'Edgware Road Bakerloo Station'\n if s == 'Edgware Road Station' and line != 'B':\n s = 'Edgware Road Circle Station'\n return s", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def merge_canton_dict():\n final = open('data/cantonese/final.dat', 'w')\n\n with open('data/dict.dat', 'r') as mandarin_dict:\n for line in mandarin_dict.readlines():\n char = line.split(' ')[0]\n if len(char) > 1:\n\n skip = False # In case it is already found\n\n # Do we need this one translated? Check if it already exists\n with open('data/cantonese/dict.dat', 'r') as cantonese_dict:\n for cantonese_line in cantonese_dict.readlines():\n split = cantonese_line.split('\\t')\n if char == split[0]:\n skip = True\n continue\n\n if skip:\n continue\n\n tmp = [] # Store the cantonese info for each character\n\n for c in char:\n if re.match('[\\u4E00-\\u9FCC]', c):\n with open('data/cantonese/dict.dat', 'r') as cantonese_dict:\n for cantonese_line in cantonese_dict.readlines():\n split = cantonese_line.split('\\t')\n if c == split[0]:\n # Found the character as an exact match, now we want to store the cantonese pinyin\n # But some single characters have multiple pronunciations\n regex_result = re.search('\\[(.+?)\\]', cantonese_line)\n pinyin = regex_result.group(1)\n tmp.append(pinyin.split()[0]) # As I don't speak Cantonese, assume the first one\n break # The first one found is of higher quality\n\n if tmp:\n translation = (re.search('\\](.+)', line).group(1)[2:-1])\n translation = translation.split('/CL:', 1)[0] # Don't bother with the CL (measurewords)\n\n # Add only if all characters were translatable\n if len(tmp) == len(char):\n final.write('{0}\\t[{1}]{2}\\n'.format(char, ' '.join(tmp), translation))\n\n final.close()", "def SpecialCodes(self):\n if sre.search(r\"[^aeiouy]e\\b\", self.wd): # nonsyllabic final e after C\n if ((not self.isPlural or self.wd[-2] not in SIBILANTS) and\n (not self.isPast or self.wd[-2] not in 'dt')):\n self.wd = self.wd[:-1] + encode(self.wd[-1])\n if not sre.search(r\"[aeiouy]\", self.wd): # any vowel left??\n self.wd = self.wd[:-1] + 'e' # undo the encoding\n self.wd = self.CiVcomb.sub(handleCiV, self.wd)\n self.wd = self.CCpair.sub(handleCC, self.wd)\n self.wd = self.VyVcomb.sub(handleVyV, self.wd)", "def _clean_non_alphanumeric_chars(self):\n\n for i,variable in enumerate(self.model_description.modelVariables):\n clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name)\n if clean_name != variable.name:\n log = \"Sim variable '{}' has been renamed to '{}' \".format(variable.name, clean_name)\n log += \"to comply with Bonsai naming requirements.\"\n print(log)\n self.model_description.modelVariables[i].name = clean_name\n\n return", "def _apply_character_maskings(self):\n for permutation in self.permutations:\n for char_symbol in self.characters.keys():\n for i in permutation.find_all(\"character-link\", ref=char_symbol): \n i.string.replace_with(self.characters[char_symbol])\n\n self.plain_text = \" \".join([permuation.description.text for permuation in self.permutations])\n self.reapply_plain_text_editing()", "def reset_diacritics_params(self):\n self._dia = self._DIA_NEUTRAL", "def test_special_chars(self):\r\n # afaik, location.check_list prevents $ in all fields\r\n org = 'foo.org.edu'\r\n course = 'bar.course-4'\r\n name = 'baz.run_4-3'\r\n location = Location(org, course, name, 'course', name)\r\n prob_locator = loc_mapper().translate_location(\r\n location,\r\n add_entry_if_missing=True\r\n )\r\n reverted_location = loc_mapper().translate_locator_to_location(prob_locator)\r\n self.assertEqual(location, reverted_location)", "def fix_compname_configs(ibs):\n # ibs.MANUAL_CONFIG_SUFFIX = '_MANUAL_' #+ ut.get_computer_name()\n # ibs.MANUAL_CONFIGID = ibs.add_config(ibs.MANUAL_CONFIG_SUFFIX)\n # We need to fix the manual config suffix to not use computer names anymore\n\n configid_list = ibs.get_valid_configids()\n cfgsuffix_list = ibs.get_config_suffixes(configid_list)\n\n ibs.MANUAL_CONFIG_SUFFIX = 'MANUAL_CONFIG'\n ibs.MANUAL_CONFIGID = ibs.add_config(ibs.MANUAL_CONFIG_SUFFIX)\n\n for rowid, suffix in filter(\n lambda tup: tup[1].startswith('_MANUAL_'), zip(configid_list, cfgsuffix_list)\n ):\n logger.info('EVALUATING: {!r}, {!r}'.format(rowid, suffix))\n # Fix the tables with bad config_rowids\n ibs.db.executeone(\n \"\"\"\n UPDATE {AL_RELATION_TABLE}\n SET config_rowid=?\n WHERE config_rowid=?\n \"\"\".format(\n **const.__dict__\n ),\n params=(ibs.MANUAL_CONFIGID, rowid),\n )\n\n # Delete the bad config_suffixes\n ibs.db.executeone(\n \"\"\"\n DELETE\n FROM {CONFIG_TABLE}\n WHERE config_rowid=?\n \"\"\".format(\n **const.__dict__\n ),\n params=(rowid,),\n )", "def sanitize_diagnoses(df):\n df = df.str.replace(\"\\W\", \"\") # \"\\W\" regex represents ANY non-alphanumeric character\n# assert (df.str.contains(\"\\W\")).any(), \"At least one diagnosis has a non-alphanumeric character in it\"\n return df", "def standardize_text(df: pd.DataFrame,\r\n text_field: str,\r\n output_field: str) -> pd.DataFrame:\r\n\r\n # df[output_field] = df[text_field].apply(\r\n # lambda column: emoji.get_emoji_regexp().sub(u'', column)\r\n # )\r\n\r\n df[output_field] = df[text_field].str.replace(\"'m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"’m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"´m\", ' am')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"’ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"´ve\", ' have')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"’d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"´d\", ' would')\r\n\r\n df[output_field] = df[output_field].str.replace(\"n't\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n’t\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n´t\", ' not')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"’ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"´ll\", ' will')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'s\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"’\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"´s\", ' is')\r\n\r\n df[output_field] = df[output_field].str.replace('/', ' ')\r\n df[output_field] = df[output_field].str.replace('\\.{2,}', '.')\r\n df[output_field] = df[output_field].str.replace('!{2,}', '!')\r\n df[output_field] = df[output_field].str.replace('\\?{2,}', '?')\r\n df[output_field] = df[output_field].str.replace('€+', '')\r\n df[output_field] = df[output_field].str.replace('[0-9$&~\\\\()[\\]{}<>%\\'\"“”‘’,;…+\\-_=*]+', '')\r\n df[output_field] = df[output_field].str.replace(r'http\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'http', '')\r\n df[output_field] = df[output_field].str.replace(r'@\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'@', 'at')\r\n df[output_field] = df[output_field].str.lower()\r\n df[output_field] = df[output_field].astype(str)\r\n\r\n return df", "def utify_chars(babylex_df):\n babylex_df['root'] = babylex_df['root'].str.replace(\"T\", \"ṭ\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"c\", \"š\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"S\", \"ṣ\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"x\", \"'\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"T\", \"ṭ\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"c\", \"š\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"S\", \"ṣ\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"X\", \"'\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"aa\", \"ā\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"ee\", \"ē\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"ii\", \"ī\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"uu\", \"ū\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"A\", \"â\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"E\", \"ê\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"I\", \"î\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"U\", \"û\")\n\n return babylex_df", "def fix_output(text: str) -> str:\n\n text = text.replace(\" n't\", \"n't\")\n return text", "def fix_show(show):\n if isinstance(show, list):\n show = [i.lower() for i in show]\n elif isinstance(show, STRINGTYPE):\n show = show.lower()\n show = [show]\n\n # this little 'n' business is a hack: when ngramming,\n # n shows have their n stripped, so nw should be nw \n # so we know we're ngramming and so it's not empty.\n for index, val in enumerate(show):\n if val == 'n' or val == 'nw':\n show[index] = 'nw'\n elif val == 'b' or val == 'bw':\n show[index] = 'bw'\n elif val.endswith('pl'):\n show[index] = val.replace('pl', 'x')\n else:\n if len(val) == 2 and val.endswith('w'):\n show[index] = val[0]\n return show", "def dealWithHouses(house):\r\n house = house.split(\"लिंग\")[0]\r\n ophouse = transliterate(house,sanscript.DEVANAGARI,sanscript.ITRANS) \r\n for i in mapping:\r\n ophouse = ophouse.replace(i[0],i[1])\r\n ophouse = saneHouse(ophouse.strip())\r\n return ophouse", "def dummy_junction12():\n return \"junction:chr1:176-224:+\"", "def replace_special_characters_in_list(self, full_list):\n return [n.replace(':','%3A') for n in full_list]", "def __normalize_string(self, string):\n\n if self._dia & self._DIA_PRE93:\n string = string.replace(u\"Â\", u\"Î\")\n string = string.replace(u\"ROMÎNĂ\", u\"ROMÂNĂ\")\n elif self._dia & self._DIA_POST93:\n string = string.replace(u\"Î\", u\"Â\")\n string = string.replace(u\"Â \", u\"Î\")\n\n if self._dia & self._DIA_CEDILLA:\n string = string.replace(u\"Ș\", u\"Ş\")\n string = string.replace(u\"Ț\", u\"Ţ\")\n elif self._dia & self._DIA_COMMA:\n string = string.replace(u\"Ş\", u\"Ș\")\n string = string.replace(u\"Ţ\", u\"Ț\")\n\n if self._dia & self._DIA_NONE:\n string = string.replace(u\"Î\", u\"I\")\n string = string.replace(u\"Â\", u\"A\")\n string = string.replace(u\"Ă\", u\"A\")\n string = string.replace(u\"Ș\", u\"S\")\n string = string.replace(u\"Ț\", u\"T\")\n\n return string", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def replace_special(text):\r\n text = text.replace('\\r\\n', ' ')\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('``', \"''\")\r\n text = text.replace('`', \"'\")\r\n text = text.replace('“', '\"')\r\n text = text.replace('”', '\"')\r\n text = text.replace('’', \"'\")\r\n text = text.replace('‘', \"'\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('–', \"-\")\r\n text = text.replace('\\\"', '\"')\r\n text = text.replace(\"\\'\", \"'\")\r\n return text", "def fix_map_exceptions(stations, addresses, lines):\n for i in range(0, len(stations)):\n station = stations[i]\n address = addresses[i]\n curlines = lines[i]\n\n if station == \"Wtc - Cortlandt\" or station == \"Park Place Station\" or station == \"World Trade Center\":\n stations[i] = \"World Trade Center\"\n addresses[i] = \"79 Church St\"\n lines[i] = \"1,2,3,A,C,E,N,Q,R,W\"\n if station == \"51 St\" or station == \"Lexington Av/53 St\":\n stations[i] = \"Lexington Av/53 St\"\n addresses[i] = \"201 East 53rd St\"\n lines[i] = \"4,6,6X,E,M\"\n if station == \"Lexington Av/63 St\" or station == \"Lexington Av / 59 St\":\n stations[i] = \"Lexington Av / 59 St\"\n addresses[i] = \"743 Lexington Ave\"\n lines[i] = \"4,5,6,F,N,Q,R\"\n if station == \"Broadway-Lafayette St\" or station == \"Bleecker St\":\n stations[i] = \"Bleecker St\"\n addresses[i] = \"338 Lafayette Street\"\n lines[i] = \"4,6,6X,B,D,F,M\"\n if station == \"E 180th\":\n lines[i] = \"2,5\"\n if station == \"61 St\":\n stations[i] = \"New Utrecht Av\"\n addresses[i] = \"1462 62nd St\"\n lines[i] = \"D,N,W\"\n if station == \"Canal St\" and address == \"257 Canal Street\":\n lines[i] = \"N,Q,R,J,Z,4,6\"\n if station == \"East 174 Street Station Subway\":\n lines[i] = \"2,5\"\n if station == \"Jay St - Metrotech\":\n lines[i] = \"A,C,F,N,Q,R\"\n if station == \"Court St\":\n lines[i] = \"N,Q,R\"\n if station == \"Rector St\" and address == \"33 Trinity Place\":\n lines[i] = \"N,Q,R\"\n if station == \"City Hall\":\n lines[i] = \"N,Q,R\"\n if station == \"Whitehall St\":\n lines[i] = \"N,Q,R,W\"\n if station == \"45 St\":\n lines[i] == \"N,R\"\n\n\n return stations, addresses, lines", "def unescape_special_areas(data: str):\n return re.sub(r\"[\\ue000-\\ue0ff]\", _restore_from_private_code_plane, data)" ]
[ "0.57187146", "0.5708669", "0.55323374", "0.5492229", "0.5476717", "0.54078436", "0.53837335", "0.53668267", "0.5350255", "0.5330315", "0.5287106", "0.5197462", "0.5163849", "0.51432276", "0.5143008", "0.51317203", "0.51091266", "0.5106305", "0.5104198", "0.5097012", "0.50901425", "0.50891036", "0.5085364", "0.5067187", "0.5060854", "0.50306726", "0.5027136", "0.5018613", "0.50051546", "0.499412" ]
0.7234293
0
Return the 2D center position of `pos + shift`
def _center(pos: ArrayLike, shift: ArrayLike) -> Tuple[float, float]: x = np.concatenate((pos[0], pos[0] + shift[0])) y = np.concatenate((pos[1], pos[1] + shift[1])) return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _center(pos, shift):\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2", "def center(self):\n return self.pos + self.axis / 2.0", "def crop_center(im, ps):\n if not type(ps) == int:\n raise TypeError('INPUT ps must be a scalar')\n center = [s/2 for s in im.shape[:2]]\n el = [ps / 2, ps / 2] if ps % 2 == 0 else [ps / 2, ps/2 + 1] # edge length\n return(im[center[0] - el[0] : center[0] + el[1], center[1] - el[0] : center[1] + el[1], :])", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def centerx(self):\n return self.left + self.width / 2", "def center(self) -> Tuple[int, int]:\n center_x = int((self.x1 + self.x2) // 2)\n center_y = int((self.y1 + self.y2) // 2)\n return (center_x, center_y)", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def center(box):\n x_center = box[:, 0] + (box[:, 2] - box[:, 0]) // 2\n y_center = box[:, 1] + (box[:, 3] - box[:, 1]) // 2\n return torch.stack((x_center, y_center)).t().to(box.device)", "def reconstruct_seq_centered(seq, nucleosome_pos):\n\n # equivalence\n d_nucleotide = {0: 1, # 'A',\n 1: 0, # 'C',\n 2: 1, # 'T',\n 3: 0, # 'G'\n }\n\n seqd = np.vectorize(d_nucleotide.get)(seq)\n array_nuc = []\n\n # select only the nucleosome positions\n for pos in nucleosome_pos:\n array_nuc.append(seqd[pos - 58:pos + 59])\n\n # do the stack\n center_nucleosome = np.sum(array_nuc, axis=0) / len(array_nuc)\n\n return center_nucleosome", "def get_center_tile(self):\n mid_x = int(len(self.map)/2)\n mid_y = int(len(self.map[0])/2)\n return self.map[mid_x][mid_y]", "def translateToFirstCutoffCell(pos):\r\n x = pos[0]\r\n y = pos[1]\r\n z = pos[2]\r\n \r\n while x >= Na:\r\n x = x - Na\r\n \r\n while y >= Nb:\r\n y = y - Nb\r\n \r\n while z >= Nc:\r\n z = z - Nc\r\n \r\n return (x,y,z)", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def _get_center_pos(self):\n if not hasattr(self, 'lon_center'):\n raise ValueError('ERROR: You need to specify first the center position!')\n d = np.abs((self.x.lon - self.lon_center) ** 2. + (self.x.lat - self.lat_center) ** 2.)\n dmin = d.min()\n m = d == dmin\n\n idx = np.indices(d.shape)\n i = idx[0][m][0]\n j = idx[1][m][0]\n\n if (np.abs(1. - self.x.lon[i, j] / self.lon_center) > 0.05) or (np.abs(1. - self.x.lat[i, j] / self.lat_center) > 0.05): # at least 5% acc.\n print 'lon: ', self.x.lon[i, j], self.lon_center\n print 'lat: ', self.x.lat[i, j], self.lat_center\n i = None\n j = None\n return i, j", "def center(self):\n return (self.upper_right + self.lower_left) * 0.5", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def center(self) -> Tuple[float, float]:\n return self.x + self.width / 2, self.y + self.height / 2", "def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]", "def get_pos(self) -> tuple:\n return self.rect.center", "def get_pos_in_pixels(self):\n pixelpos = Vector(self.pos.x * 32, -self.pos.y * 32)\n return pixelpos + self.offset", "def center(self):\n\n return (\n self.x() + (self.width() / 2),\n self.y() + (self.height() / 2)\n )", "def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2", "def find_center(self):\n return(Point(self.corner.x + self.width/2.0, self.corner.y + self.height/2.0))", "def getcellcenter(self,cellx,celly):\n xpos = self.xmargin + cellx*CELLSIZE + CELLSIZE/2\n ypos = self.ymargin + celly*CELLSIZE + CELLSIZE/2\n return (xpos,ypos)", "def get_center(self):\n x = round(self.x_pos)\n y = round(self.y_pos)\n return [int(x),int(y)]", "def _get_pose_center(self, landmarks):\n left_hip = landmarks[self._landmark_names.index('left_hip')]\n right_hip = landmarks[self._landmark_names.index('right_hip')]\n center = (left_hip + right_hip) * 0.5\n return center", "def get_center(self):\n lon, lat = self.coordinates\n\n dimx = lon.shape[0]\n dimy = lon.shape[1]\n \n return (lon[dimx/2][dimy/2],lat[dimx/2][dimy/2])", "def init_at_shifted_center(\n cls,\n vertex_data,\n shifts,\n lattice_constants,\n aligner_edge=None\n ):\n\n positions = []\n for vertex, shift in zip(vertex_data, shifts):\n total_shift = 0\n for dim_shift, constant in zip(shift, lattice_constants):\n total_shift += dim_shift * constant\n positions.append(vertex.position + total_shift)\n\n position = np.divide(\n np.sum(positions, axis=0),\n len(positions)\n )\n return cls(*position)", "def center(self):\n return (self.centerx, self.centery)" ]
[ "0.87730724", "0.6642049", "0.65281355", "0.64186084", "0.6198713", "0.6174678", "0.6117257", "0.6109757", "0.60876495", "0.6070367", "0.60687387", "0.6055401", "0.6047456", "0.60136634", "0.6009561", "0.59890777", "0.59838825", "0.5973223", "0.59308547", "0.5927332", "0.5923841", "0.5897815", "0.5893101", "0.588824", "0.58824396", "0.58420944", "0.5834306", "0.5826064", "0.580123", "0.5783794" ]
0.83657295
1
Plot the sites, hoppings and periodic boundaries of the lead
def plot(self, lead_length: int = 6, ax: Optional[plt.Axes] = None, **kwargs) -> None: if ax is None: ax = plt.gca() pos = self.system.positions sub = self.system.sublattices inner_hoppings = self.system.hoppings.tocoo() boundary = self.system.boundaries[0] outer_hoppings = boundary.hoppings.tocoo() props = structure_plot_properties(**kwargs) props['site'].setdefault('radius', self.system.lattice.site_radius_for_plot()) blend_gradient = np.linspace(0.5, 0.1, lead_length) for i, blend in enumerate(blend_gradient): offset = i * boundary.shift plot_sites(pos, sub, offset=offset, blend=blend, **props['site'], ax=ax) plot_hoppings(pos, inner_hoppings, offset=offset, blend=blend, **props['hopping'], ax=ax) plot_hoppings(pos, outer_hoppings, offset=offset - boundary.shift, blend=blend, boundary=(1, boundary.shift), **props['boundary'], ax=ax) label_pos = _center(pos, lead_length * boundary.shift * 1.5) pltutils.annotate_box("lead {}".format(self.index), label_pos, bbox=dict(alpha=0.7), ax=ax) decorate_structure_plot(**props, ax=ax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_bond_lattice(lattice, worm, observables):\n # create bond grid for plotting\n line_range = np.linspace(0, lattice.L, lattice.L+1)\n x_grid, y_grid = np.meshgrid(line_range, line_range)\n\n\n # initialize figure.\n fig = plt.figure(figsize=(9, 9))\n ax = plt.axes(xlim=(0, lattice.L), ylim=(0, lattice.L))\n ax.set_xlabel(r'$T = %.2f,\\;\\langle H \\rangle = %.3f$' \n % (observables.T_range[-1], observables.mean_energy[0, -1]),\n fontsize=16, position=(0.5,-0.085))\n plt.subplots_adjust(bottom=0.1, top=0.96, right=0.96, left=0.04)\n # create grid (gray lines).\n plt.plot(x_grid, y_grid, c='#dddddd', lw=1)\n plt.plot(y_grid, x_grid, c='#dddddd', lw=1)\n ax.set_title(r'$\\rm{\\bf High\\ Temperature\\ Domain\\!\\ }$',\n fontsize=14, loc=('center'))\n # convert boolean bond data to numeric arrays for plotting.\n colors = ['aquamarine', 'midnightblue', 'skyblue', 'blueviolet', 'cadetblue', 'cornflowerblue', 'coral', 'firebrick', 'purple']\n #colors = ['azure']*8\n\n # plot bond lines.\n cm = plt.get_cmap('jet')\n #ax.set_color_cycle([cm(1.*i/(worm.q-1)) for i in range(worm.q-1)])\n for i in range(1, 2):\n xh = x_grid[lattice.bonds[0]==i].flatten()\n yh = y_grid[lattice.bonds[0]==i].flatten()\n xv = x_grid[lattice.bonds[1]==i].flatten()\n yv = y_grid[lattice.bonds[1]==i].flatten()\n h_bonds = np.hstack((np.vstack((xh, xh+1)), np.vstack((xv, xv))))\n v_bonds = np.hstack((np.vstack((yh, yh)), np.vstack((yv, yv+1))))\n plt.plot(h_bonds, v_bonds, 'r', lw=3)\n\n # plot worm head and tail.\n plt.plot(worm.tail[0], worm.tail[1], 'bs', ms=10)\n plt.plot(worm.head[0], worm.head[1], 'g>', ms=15)\n # disable clipping to show periodic bonds.\n for o in fig.findobj():\n o.set_clip_on(False)", "def plot(sys, num_lead_cells=2, unit='nn',\n site_symbol=None, site_size=None,\n site_color=None, site_edgecolor=None, site_lw=None,\n hop_color=None, hop_lw=None,\n lead_site_symbol=None, lead_site_size=None, lead_color=None,\n lead_site_edgecolor=None, lead_site_lw=None,\n lead_hop_lw=None, pos_transform=None,\n cmap='gray', colorbar=True, file=None,\n show=True, dpi=None, fig_size=None, ax=None):\n if not mpl_available:\n raise RuntimeError(\"matplotlib was not found, but is required \"\n \"for plot()\")\n\n syst = sys # for naming consistency inside function bodies\n # Generate data.\n sites, lead_sites_slcs = sys_leads_sites(syst, num_lead_cells)\n n_syst_sites = sum(i[1] is None for i in sites)\n sites_pos = sys_leads_pos(syst, sites)\n hops, lead_hops_slcs = sys_leads_hoppings(syst, num_lead_cells)\n n_syst_hops = sum(i[1] is None for i in hops)\n end_pos, start_pos = sys_leads_hopping_pos(syst, hops)\n\n # Choose plot type.\n def resize_to_dim(array):\n if array.shape[1] != dim:\n ar = np.zeros((len(array), dim), dtype=float)\n ar[:, : min(dim, array.shape[1])] = array[\n :, : min(dim, array.shape[1])]\n return ar\n else:\n return array\n\n loc = locals()\n\n def check_length(name):\n value = loc[name]\n if name in ('site_size', 'site_lw') and isinstance(value, tuple):\n raise TypeError('{0} may not be a tuple, use list or '\n 'array instead.'.format(name))\n if isinstance(value, (str, tuple)):\n return\n try:\n if len(value) != n_syst_sites:\n raise ValueError('Length of {0} is not equal to number of '\n 'system sites.'.format(name))\n except TypeError:\n pass\n\n for name in ['site_symbol', 'site_size', 'site_color', 'site_edgecolor',\n 'site_lw']:\n check_length(name)\n\n # Apply transformations to the data\n if pos_transform is not None:\n sites_pos = np.apply_along_axis(pos_transform, 1, sites_pos)\n end_pos = np.apply_along_axis(pos_transform, 1, end_pos)\n start_pos = np.apply_along_axis(pos_transform, 1, start_pos)\n\n dim = 3 if (sites_pos.shape[1] == 3) else 2\n if dim == 3 and not has3d:\n raise RuntimeError(\"Installed matplotlib does not support 3d plotting\")\n sites_pos = resize_to_dim(sites_pos)\n end_pos = resize_to_dim(end_pos)\n start_pos = resize_to_dim(start_pos)\n\n # Determine the reference length.\n if unit == 'pt':\n reflen = None\n elif unit == 'nn':\n if n_syst_hops:\n # If hoppings are present use their lengths to determine the\n # minimal one.\n distances = end_pos - start_pos\n else:\n # If no hoppings are present, use for the same purpose distances\n # from ten randomly selected points to the remaining points in the\n # system.\n points = _sample_array(sites_pos, 10).T\n distances = (sites_pos.T.reshape(1, -1, dim) -\n points.reshape(-1, 1, dim)).reshape(-1, dim)\n distances = np.sort(np.sum(distances**2, axis=1))\n # Then check if distances are present that are way shorter than the\n # longest one. Then take first distance longer than these short\n # ones. This heuristic will fail for too large systems, or systems with\n # hoppings that vary by orders and orders of magnitude, but for sane\n # cases it will work.\n long_dist_coord = np.searchsorted(distances, 1e-16 * distances[-1])\n reflen = sqrt(distances[long_dist_coord])\n\n else:\n # The last allowed value is float-compatible.\n try:\n reflen = float(unit)\n except:\n raise ValueError('Invalid value of unit argument.')\n\n # make all specs proper: either constant or lists/np.arrays:\n def make_proper_site_spec(spec, fancy_indexing=False):\n if callable(spec):\n spec = [spec(i[0]) for i in sites if i[1] is None]\n if (fancy_indexing and isarray(spec)\n and not isinstance(spec, np.ndarray)):\n try:\n spec = np.asarray(spec)\n except:\n spec = np.asarray(spec, dtype='object')\n return spec\n\n def make_proper_hop_spec(spec, fancy_indexing=False):\n if callable(spec):\n spec = [spec(*i[0]) for i in hops if i[1] is None]\n if (fancy_indexing and isarray(spec)\n and not isinstance(spec, np.ndarray)):\n try:\n spec = np.asarray(spec)\n except:\n spec = np.asarray(spec, dtype='object')\n return spec\n\n site_symbol = make_proper_site_spec(site_symbol)\n if site_symbol is None: site_symbol = defaults['site_symbol'][dim]\n # separate different symbols (not done in 3D, the separation\n # would mess up sorting)\n if (isarray(site_symbol) and dim != 3 and\n (len(site_symbol) != 3 or site_symbol[0] not in ('p', 'P'))):\n symbol_dict = defaultdict(list)\n for i, symbol in enumerate(site_symbol):\n symbol_dict[symbol].append(i)\n symbol_slcs = []\n for symbol, indx in symbol_dict.items():\n symbol_slcs.append((symbol, np.array(indx)))\n fancy_indexing = True\n else:\n symbol_slcs = [(site_symbol, slice(n_syst_sites))]\n fancy_indexing = False\n\n site_size = make_proper_site_spec(site_size, fancy_indexing)\n site_color = make_proper_site_spec(site_color, fancy_indexing)\n site_edgecolor = make_proper_site_spec(site_edgecolor, fancy_indexing)\n site_lw = make_proper_site_spec(site_lw, fancy_indexing)\n\n hop_color = make_proper_hop_spec(hop_color)\n hop_lw = make_proper_hop_spec(hop_lw)\n\n # Choose defaults depending on dimension, if None was given\n if site_size is None: site_size = defaults['site_size'][dim]\n if site_color is None: site_color = defaults['site_color'][dim]\n if site_edgecolor is None:\n site_edgecolor = defaults['site_edgecolor'][dim]\n if site_lw is None: site_lw = defaults['site_lw'][dim]\n\n if hop_color is None: hop_color = defaults['hop_color'][dim]\n if hop_lw is None: hop_lw = defaults['hop_lw'][dim]\n\n # if symbols are split up into different collections,\n # the colormapping will fail without normalization\n norm = None\n if len(symbol_slcs) > 1:\n try:\n if site_color.ndim == 1 and len(site_color) == n_syst_sites:\n site_color = np.asarray(site_color, dtype=float)\n norm = matplotlib.colors.Normalize(site_color.min(),\n site_color.max())\n except:\n pass\n\n # take spec also for lead, if it's not a list/array, default, otherwise\n if lead_site_symbol is None:\n lead_site_symbol = (site_symbol if not isarray(site_symbol)\n else defaults['site_symbol'][dim])\n if lead_site_size is None:\n lead_site_size = (site_size if not isarray(site_size)\n else defaults['site_size'][dim])\n if lead_color is None:\n lead_color = defaults['lead_color'][dim]\n lead_color = matplotlib.colors.colorConverter.to_rgba(lead_color)\n\n if lead_site_edgecolor is None:\n lead_site_edgecolor = (site_edgecolor if not isarray(site_edgecolor)\n else defaults['site_edgecolor'][dim])\n if lead_site_lw is None:\n lead_site_lw = (site_lw if not isarray(site_lw)\n else defaults['site_lw'][dim])\n if lead_hop_lw is None:\n lead_hop_lw = (hop_lw if not isarray(hop_lw)\n else defaults['hop_lw'][dim])\n\n hop_cmap = None\n if not isinstance(cmap, str):\n try:\n cmap, hop_cmap = cmap\n except TypeError:\n pass\n\n # make a new figure unless axes specified\n if not ax:\n fig = _make_figure(dpi, fig_size, use_pyplot=(file is None))\n if dim == 2:\n ax = fig.add_subplot(1, 1, 1, aspect='equal')\n ax.set_xmargin(0.05)\n ax.set_ymargin(0.05)\n ax.set_ylim(-10,10)\n ax.set_aspect('equal',anchor='SW')\n else:\n warnings.filterwarnings('ignore', message=r'.*rotation.*')\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n warnings.resetwarnings()\n ax.ylim([-10,10])\n else:\n fig = None\n\n # plot system sites and hoppings\n for symbol, slc in symbol_slcs:\n size = site_size[slc] if isarray(site_size) else site_size\n col = site_color[slc] if isarray(site_color) else site_color\n edgecol = (site_edgecolor[slc] if isarray(site_edgecolor) else\n site_edgecolor)\n lw = site_lw[slc] if isarray(site_lw) else site_lw\n\n symbol_coll = symbols(ax, sites_pos[slc], size=size,\n reflen=reflen, symbol=symbol,\n facecolor=col, edgecolor=edgecol,\n linewidth=lw, cmap=cmap, norm=norm, zorder=2)\n\n end, start = end_pos[: n_syst_hops], start_pos[: n_syst_hops]\n line_coll = lines(ax, end, start, reflen, hop_color, linewidths=hop_lw,\n zorder=1, cmap=hop_cmap)\n\n # plot lead sites and hoppings\n norm = matplotlib.colors.Normalize(-0.5, num_lead_cells - 0.5)\n cmap_from_list = matplotlib.colors.LinearSegmentedColormap.from_list\n lead_cmap = cmap_from_list(None, [lead_color, (1, 1, 1, lead_color[3])])\n\n for sites_slc, hops_slc in zip(lead_sites_slcs, lead_hops_slcs):\n lead_site_colors = np.array([i[2] for i in sites[sites_slc]],\n dtype=float)\n\n # Note: the previous version of the code had in addition this\n # line in the 3D case:\n # lead_site_colors = 1 / np.sqrt(1. + lead_site_colors)\n symbols(ax, sites_pos[sites_slc], size=lead_site_size, reflen=reflen,\n symbol=lead_site_symbol, facecolor=lead_site_colors,\n edgecolor=lead_site_edgecolor, linewidth=lead_site_lw,\n cmap=lead_cmap, zorder=2, norm=norm)\n\n lead_hop_colors = np.array([i[2] for i in hops[hops_slc]], dtype=float)\n\n # Note: the previous version of the code had in addition this\n # line in the 3D case:\n # lead_hop_colors = 1 / np.sqrt(1. + lead_hop_colors)\n end, start = end_pos[hops_slc], start_pos[hops_slc]\n lines(ax, end, start, reflen, lead_hop_colors, linewidths=lead_hop_lw,\n cmap=lead_cmap, norm=norm, zorder=1)\n\n min_ = np.min(sites_pos, 0)\n max_ = np.max(sites_pos, 0)\n m = (min_ + max_) / 2\n if dim == 2:\n w = np.max([(max_ - min_) / 2, (reflen, reflen)], axis=0)\n ax.update_datalim((m - w, m + w))\n ax.autoscale_view(tight=True)\n else:\n # make axis limits the same in all directions\n # (3D only works decently for equal aspect ratio. Since\n # this doesn't work out of the box in mplot3d, this is a\n # workaround)\n w = np.max(max_ - min_) / 2\n ax.auto_scale_xyz(*[(i - w, i + w) for i in m], had_data=True)\n\n # add separate colorbars for symbols and hoppings if ncessary\n if symbol_coll.get_array() is not None and colorbar and fig is not None:\n fig.colorbar(symbol_coll)\n if line_coll.get_array() is not None and colorbar and fig is not None:\n fig.colorbar(line_coll)\n\n _maybe_output_fig(fig, file=file, show=show)\n\n return fig", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot_lead(lead_smap, index, lead_length=6, **kwargs):\n pos = lead_smap.positions\n sub = lead_smap.sublattices\n inner_hoppings = lead_smap.hoppings.tocoo()\n boundary = lead_smap.boundaries[0]\n outer_hoppings = boundary.hoppings.tocoo()\n\n props = structure_plot_properties(**kwargs)\n\n blend_gradient = np.linspace(0.5, 0.1, lead_length)\n for i, blend in enumerate(blend_gradient):\n offset = i * boundary.shift\n plot_sites(pos, sub, offset=offset, blend=blend, **props['site'])\n plot_hoppings(pos, inner_hoppings, offset=offset, blend=blend, **props['hopping'])\n plot_hoppings(pos, outer_hoppings, offset=offset - boundary.shift, blend=blend,\n boundary=(1, boundary.shift), **props['boundary'])\n\n label_pos = _center(pos, lead_length * boundary.shift * 1.5)\n pltutils.annotate_box(\"lead {}\".format(index), label_pos, bbox=dict(alpha=0.7))\n\n _decorate_structure_plot(**props)", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot_leads_s(self, leads=None, start_time=0, end_time=5):\n\n if leads is None:\n print(\"No lead number or list provided, defaulting to plot all...\")\n leads = self.lead_spec\n elif isinstance(leads, list):\n _lead = leads\n leads = np.zeros_like(self.lead_spec)\n for l in _lead:\n leads[l] = 1\n elif isinstance(leads, int):\n _lead = leads\n leads = np.zeros_like(self.lead_spec)\n leads[_lead] = 1\n else:\n print('Error: plot_leads() argument must be int, a list of ints, or empty for all leads')\n return\n\n # Try to import matplotlib\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print(\"matplotlib not installed\")\n return\n\n # Check to make sure ecg data is loaded\n if not self.ecg_data_loaded:\n print(\"ECG data not loaded yet...\")\n return\n\n # Loop through given leads to plot\n for index, lead in enumerate(leads):\n if lead != 1:\n pass\n else:\n print(\"Plotting lead: \" + str(index))\n # Resolution[0] = 10000nv = 0.01mv\n y = self.ecg_lead_voltages[index] * (self.resolution[index] / 1000000.0)\n x = np.linspace(0, self.samples_per_lead / self.sampling_rate,\n num=self.samples_per_lead)\n y.sort()\n plt.plot(x, y, label='lead ' + str(index))\n plt.title('ECG')\n plt.xlabel('s')\n plt.ylabel('mV')\n plt.xlim(0, 3600 * 24)\n plt.ylim(-1, 1)\n plt.legend()\n plt.show()\n\n return", "def plot_priorsamps(meta):\n priorsamps = np.array(meta.priordist.sample_ps(len(meta.colors))[0])\n f = plt.figure(figsize=(5,10))\n sps_log = f.add_subplot(2,1,1)\n sps_lin = f.add_subplot(2,1,2)\n sps_log.set_title(meta.name)\n f.subplots_adjust(hspace=0, wspace=0)\n sps_log.set_ylabel(r'$\\ln[p(z|\\vec{\\theta})]$')\n sps_lin.set_xlabel(r'$z$')\n sps_lin.set_ylabel(r'$p(\\vec{\\theta})$')\n sps_log.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n sps_lin.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n plotstep(sps_log,meta.binends,meta.logintPz,l=r'Log Interim Prior $\\ln[p(z|\\vec{\\theta}^{0})$]')\n plotstep(sps_lin,meta.binends,meta.intPz,l=r'Interim Prior $p(z|\\vec{\\theta}^{0})$')\n for c in lrange(meta.colors):\n plotstep(sps_log,meta.binends,priorsamps[c]-np.log(meta.ngals),c=meta.colors[c])\n plotstep(sps_lin,meta.binends,np.exp(priorsamps[c]-np.log(meta.ngals)),c=meta.colors[c])\n sps_log.legend(loc='upper right',fontsize='x-small')\n sps_lin.legend(loc='upper right',fontsize='x-small')\n f.savefig(os.path.join(meta.topdir, 'priorsamps.pdf'),bbox_inches='tight', pad_inches = 0)\n return", "def plot_fig44a_wlc_inhomo_test7():\n\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n #paramters from calculation, all in nm\n a = 5 #looping radius\n DEL = 15.3 #spacing between nucleosomes\n MINUS1NUC = -83.3 #distance between -1 nuc and TSS\n PLUS1NUC = 0\n NNUC = 50 #number of nucleosomes on each side of TSS\n LPB = 0.34 #nm per basepair (as used in calculation)\n\n Llinks = np.concatenate(([245], np.tile(45, 49)))\n Rlinks = np.tile(45, 49)\n #plot as negative distance from TSS in bp\n unwrap=0\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap)\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap)\n x = np.concatenate((ldna_Llinks[::-1], [0], ldna_Rlinks))\n y = np.loadtxt('csvs/Sarah/pnucsite_test7.txt')\n ax.plot(x, y, '-o', markersize=3, color=teal_flucts)\n plt.xlabel(r'Distance from TSS (bp)')\n plt.ylabel(r'$P_\\mathrm{loop}(a=5\\mathrm{nm}; L)\\;\\;\\;(\\mathrm{nm}^{-3})$')\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.yscale('log')\n #plt.ylim([10**-3, 10**-0.4])\n plt.xlim([-10000, 10000])\n plt.savefig(f'plots/thesis/fig44a_wlc-inhomo-looping.pdf', bbox_inches='tight')", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot_ewald_peak_distances(self, ewaldsphere, filename='output.png', plot_region=[None, None, None, None], plot_buffers=[0.16, 0.035, 0.16, 0.03], label_peaks=False, blanked_figure=False, peaks_present=None, max_hkl=10, thresh=0.01):\n \n\n # Plot styling\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['axes.labelsize'] = 30\n plt.rcParams['xtick.labelsize'] = 'xx-large'\n plt.rcParams['ytick.labelsize'] = 'xx-large'\n\n #plt.rcParams['axes.labelsize'] = 35\n #plt.rcParams['xtick.labelsize'] = 28\n #plt.rcParams['ytick.labelsize'] = 28\n\n\n fig = plt.figure(figsize=(7,7))\n #fig.subplots_adjust(left=0.17, bottom=0.15, right=0.97, top=0.94, wspace=0.2, hspace=0.2)\n #ax = plt.subplot(111)\n left_buf, right_buf, bottom_buf, top_buf = plot_buffers\n fig_width = 1.0-right_buf-left_buf\n fig_height = 1.0-top_buf-bottom_buf\n ax = fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height], aspect='equal' )\n\n \n\n if True:\n # Symmetry-peaks\n\n self.apply_rotation_z( -120.0 )\n peaks_x, peaks_y, names, distances = self.peak_list(ewaldsphere, peaks_present=peaks_present, plot_region=plot_region, max_hkl=max_hkl, thresh=thresh)\n \n #plt.scatter( peaks_x, peaks_y, s=80, facecolor='none', edgecolor=(0.7,0.7,0), linewidth=1.5 ) # Yellow peaks\n for x, y, d in zip(peaks_x, peaks_y, distances):\n size, linewidth, alpha = self.d_to_marker(d, thresh=thresh)\n plt.scatter( x, y, s=size, facecolor='none', edgecolor=(0.7,0.7,0), linewidth=linewidth, alpha=alpha ) # Yellow peaks\n if label_peaks:\n for x, y, s in zip( peaks_x, peaks_y, names ):\n plt.text( x, y, s, size=12, color='0.6', horizontalalignment='left', verticalalignment='bottom' )\n\n\n self.apply_rotation_z( +240.0 )\n peaks_x, peaks_y, names, distances = self.peak_list(ewaldsphere, peaks_present=peaks_present, plot_region=plot_region, max_hkl=max_hkl, thresh=thresh)\n \n #plt.scatter( peaks_x, peaks_y, s=80, facecolor='none', edgecolor=(0,0.7,0.7), linewidth=1.5 ) # Blue-green peaks\n for x, y, d in zip(peaks_x, peaks_y, distances):\n size, linewidth, alpha = self.d_to_marker(d, thresh=thresh)\n plt.scatter( x, y, s=size, facecolor='none', edgecolor=(0,0.7,0.7), linewidth=linewidth, alpha=alpha ) # Blue-green peaks\n \n if label_peaks:\n for x, y, s in zip( peaks_x, peaks_y, names ):\n plt.text( x, y, s, size=12, color='0.6', horizontalalignment='left', verticalalignment='bottom' )\n\n self.apply_rotation_z( -120.0 )\n\n \n \n # Regular peaks\n peaks_x, peaks_y, names, distances = self.peak_list(ewaldsphere, peaks_present=peaks_present, plot_region=plot_region, max_hkl=max_hkl, thresh=thresh)\n \n #plt.scatter( peaks_x, peaks_y, s=80, facecolor='none', edgecolor=(0,1,0), linewidth=1.5 ) # Green peaks\n for x, y, d in zip(peaks_x, peaks_y, distances):\n size, linewidth, alpha = self.d_to_marker(d, thresh=thresh)\n plt.scatter( x, y, s=size, facecolor='none', edgecolor=(0,1,0), linewidth=linewidth, alpha=alpha ) # Green peaks\n \n if label_peaks:\n for x, y, s in zip( peaks_x, peaks_y, names ):\n if blanked_figure:\n plt.text( x, y, s, size=12, color='1.0', horizontalalignment='left', verticalalignment='bottom' )\n else:\n plt.text( x, y, s, size=12, color='0.0', horizontalalignment='left', verticalalignment='bottom' )\n \n \n \n # Axis scaling\n xi, xf, yi, yf = ax.axis()\n if plot_region[0] != None: xi = plot_region[0]\n if plot_region[1] != None: xf = plot_region[1]\n if plot_region[2] != None: yi = plot_region[2]\n if plot_region[3] != None: yf = plot_region[3]\n if plot_region[0]==None and plot_region[1]==None and plot_region[2]==None and plot_region[3]==None:\n xf = max( xi, xf, yi, yf )\n yf = xf\n xi = -xf\n yi = -yf\n \n \n # Show central meridian of Ewald sphere\n qxys, qzs = ewaldsphere.central_meridian_arc()\n plt.plot( qxys, qzs, '-', color='0.5', linewidth=0.5 )\n plt.plot( -1*qxys, qzs, '-', color='0.5', linewidth=0.5 )\n \n \n \n ax.axis( [xi, xf, yi, yf] )\n \n if blanked_figure:\n plt.xticks( [] )\n plt.yticks( [] )\n else:\n plt.xlabel( r'$q_{xy} \\, (\\mathrm{\\AA^{-1}})$', size=30 )\n plt.ylabel( r'$q_{z} \\, (\\mathrm{\\AA^{-1}})$', size=30 )\n \n \n plt.savefig( filename, transparent=blanked_figure ) \n plt.close()", "def plot_fig43b_spreading_yeast():\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n y = np.loadtxt('csvs/ratio_lp50a10ad5hp0.0067379_yeast.txt')\n links35 = np.tile(35, 44)\n Rlinks = np.array([47, 21, 18, 15, 20, 17])\n Llinks = np.array([245, 30, 26, 15, 23, 35])\n #links to right of methylation site (50 in total)\n Rlinks = np.concatenate((Rlinks, links35))\n #links to left of methylation site (50 in total)\n Llinks = np.concatenate((Llinks, links35))\n #cumulative chain length including burried basepairs\n unwrap = 0\n #plot as positive distance from TSS in bp\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp\n #plot as negative distance from TSS in bp\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap) #max WLC chain length in bp\n x = np.concatenate((ldna_Llinks[::-1], ldna_Rlinks))\n ax.plot(x, y, color='k')\n ax.set_xlabel(r'Distance from TSS (bp)')\n ax.set_ylabel('Relative enrichment')\n #plot inset using Crabtree data\n axins = inset_axes(ax, width=\"40%\", height=\"40%\", \n bbox_to_anchor=(.1, .1, .8, .8),\n bbox_transform=ax.transAxes, loc=2)\n xcrabtree = np.array([-10256, -3077, -2241, -1485, -739, -309, -169, 489, 1746, 3087, 4400, 5300])\n REday0 = np.array([0.27, 0.13, 0.46, 0.12, 0.17, 0.33, 0.33, 0.31, 0.32, 0.27, 0.21, 0.33])\n REday5 = np.array([0.19, 0.40, 0.89, 1.55, 0.97, 1.25, 2.25, 3.57, 3.03, 2.09, 1.12, 0.14])\n ycrabtree = REday5/np.mean(REday0)\n axins.plot(xcrabtree, ycrabtree)\n axins.set_xlim([-10000, 10000])\n ax.set_xlim([-10000, 10000])\n #axins.set_ylabel('Relative enrichment', fontsize=8)\n #axins.set_xlabel('Distance from TSS (bp)', fontsize=8)\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.savefig(f'plots/thesis/fig43b_spreading-TSS-yeast.pdf', bbox_inches='tight')", "def plot_directed(glomnums):\n odor_corrs_means = []\n odor_corrs_SDs = []\n air_corrs_means = []\n air_corrs_SDs = []\n corrs_deltafrate = []\n fig = figure()\n for gni,glomnum in enumerate(glomnums):\n print \"Computing phasic and deltafrate correlations for # of gloms =\",glomnum\n ## Set graph=True below to plot neg corr-ed responses too.\n corr_deltafrate, odor_corrs, air_corrs, overall_odor_mean, overall_air_mean = \\\n plot_decorrs_special([glomnum],graph=True)\n ax = fig.add_subplot(len(glomnums),1,gni+1)\n #hist(air_corrs,20,range=(-1.0,1.0),normed=True,histtype='step',\\\n # color='b',linewidth=2,label='air %2.1f'%overall_air_mean+'Hz')\n hist(odor_corrs,20,range=(-1.0,1.0),normed=True,histtype='step',\\\n color='r',linewidth=2,label='odor %2.1f'%overall_odor_mean+'Hz')\n ax.set_xticks([])\n #ax.set_xticklabels(['0.75','1.25'])\n ## just to scale up the ticks fontsize.\n axes_labels(ax,'','',adjustpos=False,fontsize=34)\n\n corrs_deltafrate.append(corr_deltafrate)\n ## mean and SD of phasic correlations of odor and air\n odor_corrs_means.append(mean(odor_corrs))\n odor_corrs_SDs.append(std(odor_corrs))\n air_corrs_means.append(mean(air_corrs))\n air_corrs_SDs.append(std(air_corrs))\n\n ax.set_yticks([])\n #biglegend(legendlocation='upper left')\n if gni == len(glomnums)-1:\n ax.set_xticks([-1.0,0.0,1.0])\n ax.set_xticklabels(['-1','0','1'])\n axes_labels(ax,'phase correlation','',adjustpos=False,fontsize=30)\n plt.tight_layout()\n\n ## mean phase corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n #plot(glomnums,air_corrs_means,color='b',linewidth=2,label='air')\n plot(glomnums,odor_corrs_means,color='r',linewidth=2,label='odor')\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','phase correlation mean',\\\n adjustpos=False,fontsize=30)\n #biglegend(legendlocation='lower left')\n plt.tight_layout()\n ## spread of phase corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n #errorbar(glomnums,air_corrs_SDs,color='b',linewidth=2,label='air')\n errorbar(glomnums,odor_corrs_SDs,color='r',linewidth=2,label='odor')\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','phase correlation spread',\\\n adjustpos=False,fontsize=30)\n #biglegend(legendlocation='upper left')\n plt.tight_layout()\n ## delta frate corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n plot(glomnums,corrs_deltafrate,color='b',linewidth=2)\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','$\\Delta$frate correlation',\\\n adjustpos=False,fontsize=30)\n tight_layout()", "def plot_ecg_lead_derivative(self, leads=None, start_time=0, end_time=5):\n if leads is None:\n print(\"No lead number or list provided, defaulting to plot all...\")\n leads = self.lead_spec\n elif isinstance(leads, list):\n _lead = leads\n leads = np.zeros_like(self.lead_spec)\n for l in _lead:\n leads[l] = 1\n elif isinstance(leads, int):\n _lead = leads\n leads = np.zeros_like(self.lead_spec)\n leads[_lead] = 1\n else:\n print('Error: plot_leads() argument must be int, a list of ints, or empty for all leads')\n return\n\n # Try to import matplotlib\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print(\"matplotlib not installed\")\n return\n\n # Check to make sure ecg data is loaded\n if not self.ecg_data_loaded:\n print(\"ECG data not loaded yet...\")\n return\n\n # Loop through given leads to plot\n for index, lead in enumerate(leads):\n if lead != 1:\n pass\n else:\n print(\"Plotting lead: \" + str(index))\n # Resolution[0] = 10000nv = 0.01mv\n y = self.ecg_lead_derivatives[index][:5000] * (self.resolution[index] / 1000000.0)\n\n plt.plot(x=y, y=self.ecg_time_data[1:5001], label='lead ' + str(index))\n plt.title('ECG')\n plt.xlabel('s')\n plt.ylabel('mV')\n plt.xlim(start_time, end_time)\n plt.ylim(-1, 1)\n plt.legend()\n plt.show()\n\n return", "def plot(self, aVals, bVals):\n with self.pt as sp:\n # Top subplot: The range of interest\n X = np.linspace(self.xMin, self.xMax, self.N)\n self.subplot(sp, X, aVals, bVals)\n # Bottom subplot: Positive X surrounding the range of\n # interest\n X = np.linspace(0, 2*self.xMax, self.N)\n sp.add_axvline(self.xMin)\n sp.add_axvline(self.xMax)\n self.subplot(sp, X, aVals, bVals, semilog=True)\n self.pt.show()", "def control_plot(data: (List[int], List[float], pd.Series, np.array),\n upper_control_limit: (int, float), lower_control_limit: (int, float),\n highlight_beyond_limits: bool = True, highlight_zone_a: bool = True,\n highlight_zone_b: bool = True, highlight_zone_c: bool = True,\n highlight_trend: bool = True, highlight_mixture: bool = True,\n highlight_stratification: bool = True, highlight_overcontrol: bool = True,\n ax: Axis = None):\n\n data = coerce(data)\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.plot(data)\n ax.set_title('Zone Control Chart')\n\n spec_range = (upper_control_limit - lower_control_limit) / 2\n spec_center = lower_control_limit + spec_range\n zone_c_upper_limit = spec_center + spec_range / 3\n zone_c_lower_limit = spec_center - spec_range / 3\n zone_b_upper_limit = spec_center + 2 * spec_range / 3\n zone_b_lower_limit = spec_center - 2 * spec_range / 3\n zone_a_upper_limit = spec_center + spec_range\n zone_a_lower_limit = spec_center - spec_range\n\n ax.axhline(spec_center, linestyle='--', color='red', alpha=0.6)\n ax.axhline(zone_c_upper_limit, linestyle='--', color='red', alpha=0.5)\n ax.axhline(zone_c_lower_limit, linestyle='--', color='red', alpha=0.5)\n ax.axhline(zone_b_upper_limit, linestyle='--', color='red', alpha=0.3)\n ax.axhline(zone_b_lower_limit, linestyle='--', color='red', alpha=0.3)\n ax.axhline(zone_a_upper_limit, linestyle='--', color='red', alpha=0.2)\n ax.axhline(zone_a_lower_limit, linestyle='--', color='red', alpha=0.2)\n\n left, right = ax.get_xlim()\n right_plus = (right - left) * 0.01 + right\n\n ax.text(right_plus, upper_control_limit, s='UCL', va='center')\n ax.text(right_plus, lower_control_limit, s='LCL', va='center')\n\n ax.text(right_plus, (spec_center + zone_c_upper_limit) / 2, s='Zone C', va='center')\n ax.text(right_plus, (spec_center + zone_c_lower_limit) / 2, s='Zone C', va='center')\n ax.text(right_plus, (zone_b_upper_limit + zone_c_upper_limit) / 2, s='Zone B', va='center')\n ax.text(right_plus, (zone_b_lower_limit + zone_c_lower_limit) / 2, s='Zone B', va='center')\n ax.text(right_plus, (zone_a_upper_limit + zone_b_upper_limit) / 2, s='Zone A', va='center')\n ax.text(right_plus, (zone_a_lower_limit + zone_b_lower_limit) / 2, s='Zone A', va='center')\n\n plot_params = {'alpha': 0.3, 'zorder': -10, 'markersize': 14}\n\n if highlight_beyond_limits:\n beyond_limits_violations = control_beyond_limits(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(beyond_limits_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(beyond_limits_violations, 'o', color='red', label='beyond limits', **plot_params)\n\n if highlight_zone_a:\n zone_a_violations = control_zone_a(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_a_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_a_violations, 'o', color='orange', label='zone a violations', **plot_params)\n\n if highlight_zone_b:\n zone_b_violations = control_zone_b(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_b_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_b_violations, 'o', color='blue', label='zone b violations', **plot_params)\n\n if highlight_zone_c:\n zone_c_violations = control_zone_c(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_c_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_c_violations, 'o', color='green', label='zone c violations', **plot_params)\n\n if highlight_trend:\n zone_trend_violations = control_zone_trend(data=data)\n if len(zone_trend_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_trend_violations, 'o', color='purple', label='trend violations', **plot_params)\n\n if highlight_mixture:\n zone_mixture_violations = control_zone_mixture(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_mixture_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_mixture_violations, 'o', color='brown', label='mixture violations', **plot_params)\n\n if highlight_stratification:\n zone_stratification_violations = control_zone_stratification(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_stratification_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_stratification_violations, 'o', color='orange', label='stratification violations',\n **plot_params)\n\n if highlight_overcontrol:\n zone_overcontrol_violations = control_zone_overcontrol(data=data,\n upper_control_limit=upper_control_limit,\n lower_control_limit=lower_control_limit)\n if len(zone_overcontrol_violations):\n plot_params['zorder'] -= 1\n plot_params['markersize'] -= 1\n ax.plot(zone_overcontrol_violations, 'o', color='blue', label='overcontrol violations',\n **plot_params)\n\n ax.legend()", "def draw_lattice(ax, lat, bondcolor='k', colormap='BlueBlackRed', lw=1, climv=0.1):\n if (lat.BL < 0).any():\n if lat.PVxydict is None:\n raise RuntimeError('PVxydict must be supplied to draw_lattice when periodic BCs exist!')\n else:\n PVx, PVy = PVxydict2PVxPVy(lat.PVxydict, lat.NL, lat.KL)\n # get indices of periodic bonds\n perINDS = np.unique(np.where(lat.BL < 0)[0])\n perBL = np.abs(lat.BL[perINDS])\n # # Check\n # print 'perBL = ', perBL\n # plt.plot(xy[:,0], xy[:,1],'b.')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.05, xy[i,1],str(i))\n # plt.show()\n normINDS = np.setdiff1d(np.arange(len(lat.BL)), perINDS)\n BLtmp = lat.BL[normINDS]\n lines = [zip(lat.xy[BLtmp[i, :], 0], lat.xy[BLtmp[i, :], 1]) for i in range(len(BLtmp))]\n\n xy_add = np.zeros((4, 2))\n\n # Add periodic bond lines to image\n for row in perBL:\n # print 'NL[row[0]] = ', NL[row[0]]\n colA = np.argwhere(lat.NL[row[0]] == row[1])[0][0]\n colB = np.argwhere(lat.NL[row[1]] == row[0])[0][0]\n xy_add[0] = lat.xy[row[0]]\n xy_add[1] = lat.xy[row[1]] + np.array([PVx[row[0], colA], PVy[row[0], colA]])\n xy_add[2] = lat.xy[row[1]]\n xy_add[3] = lat.xy[row[0]] + np.array([PVx[row[1], colB], PVy[row[1], colB]])\n # print 'first line : ', zip(xy_add[0:2,0], xy_add[0:2,1])\n # print 'second line : ', zip(xy_add[2:4,0], xy_add[2:4,1])\n lines += zip(xy_add[0:2, 0], xy_add[0:2, 1]), zip(xy_add[2:4, 0], xy_add[2:4, 1])\n\n # CHECK\n # line_segments = LineCollection(lines, # Make a sequence of x,y pairs\n # linewidths = lw, #could iterate over list\n # linestyles = 'solid',\n # cmap='seismic',\n # norm=plt.Normalize(vmin=-climv,vmax=climv))\n # ax.add_collection(line_segments)\n # for i in range(len(xy)):\n # ax.text(xy[i,0] + 0.05, xy[i,1],str(i))\n # plt.pause(.01)\n else:\n if np.shape(BL)[0] > 1:\n lines = [zip(lat.xy[lat.BL[i, :], 0], lat.xy[lat.BL[i, :], 1]) for i in range(np.shape(lat.BL)[0])]\n else:\n lines = [zip(lat.xy[lat.BL[i][0]], lat.xy[lat.BL[i][1]]) for i in range(np.shape(lat.BL)[0])]\n\n if bondcolor is None:\n line_segments = LineCollection(lines, # Make a sequence of x,y pairs\n linewidths=lw, # could iterate over list\n linestyles='solid',\n cmap=colormap,\n norm=plt.Normalize(vmin=-climv, vmax=climv))\n line_segments.set_array(bs)\n else:\n line_segments = LineCollection(lines, linewidths=lw, linestyles='solid', colors=bondcolor)\n\n ax.add_collection(line_segments)\n return ax", "def plot_fig43a_looping_TSS(a=10, loglog=True):\n\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n #convert a to basepairs\n a_in_bp = a / ncg.dna_params['lpb']\n links35 = np.tile(35, 44)\n Rlinks = np.array([47, 21, 18, 15, 20, 17])\n Llinks = np.array([245, 30, 26, 15, 23, 35])\n #links to right of methylation site (50 in total)\n Rlinks = np.concatenate((Rlinks, links35))\n #links to left of methylation site (50 in total)\n Llinks = np.concatenate((Llinks, links35))\n #cumulative chain length including burried basepairs\n unwrap = 0\n #plot as positive distance from TSS in bp\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp\n #plot as negative distance from TSS in bp\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap) #max WLC chain length in bp\n rvals = np.linspace(0.0, 1.0, 1000)\n integral_R = np.load('../deepti/csvs/Bprops/0unwraps/heterogenous/Sarah/Rlinks_1to50nucs/kinkedWLC_greens_Rlinks_50nucs_1000rvals.npy')\n integral_L = np.load('../deepti/csvs/Bprops/0unwraps/heterogenous/Sarah/Llinks_1to50nucs/kinkedWLC_greens_Llinks_50nucs_1000rvals.npy')\n Prob_a_Rlinks_kinked = wlc.prob_R_in_radius_a_given_L(a_in_bp, integral_R, rvals, Rlinks, unwrap)\n Prob_a_Llinks_kinked = wlc.prob_R_in_radius_a_given_L(a_in_bp, integral_L, rvals, Llinks, unwrap)\n #plot in units of nm^(-3)\n ax.plot(ldna_Rlinks, Prob_a_Rlinks_kinked/(ncg.dna_params['lpb']**3), '-o', markersize=3, color=teal_flucts)\n ax.plot(ldna_Llinks, Prob_a_Llinks_kinked/(ncg.dna_params['lpb']**3), '-o', markersize=3, color=teal_flucts)\n plt.xlabel(r'Distance from TSS (bp)')\n plt.ylabel(r'$P_\\mathrm{loop}(a=10\\mathrm{nm}; L)\\;\\;\\;(\\mathrm{nm}^{-3})$')\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.yscale('log')\n plt.ylim([10**-3, 10**-0.4])\n plt.xlim([-10000, 10000])\n plt.savefig(f'plots/thesis/fig43a_looping-TSS-yeast.pdf', bbox_inches='tight')", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def plot_examples(cms):\r\n data = amp_axis\r\n\r\n fig, axs = plt.subplots(1, 2, figsize=(30, 8)) #create two plots\r\n for [ax, cmap] in zip(axs, cms):\r\n psm = ax.pcolormesh(time_axis, tof_axis, data, cmap=cmap, rasterized=True, vmin = 250) #specify axis and minimum amplitude value to show on the graph\r\n fig.colorbar(psm, ax=ax, label = 'Amplitude') #define the legend of the amplitude data\r\n \r\n ax.set_ylabel('Time of Flight [\\u03bcs]') #set label for y axis\r\n ax.set_xlabel('Time [min]') #set label for x axis\r\n \r\n ax.hlines(8.744, 0, stop_time, colors = 'white') #create two white lines for the safe operating range for ToF\r\n ax.hlines(9.555, 0, stop_time, colors = 'white') \r\n \r\n plt.show()", "def bodePlot(H_s,w_range=(0,8),points=800):\n \n w = logspace(*w_range,points)\n h_s = lambdify(s,H_s,'numpy')\n H_jw = h_s(1j*w)\n \n # find mag and phase\n mag = 20*np.log10(np.abs(H_jw))\n phase = angle(H_jw,deg = True)\n \n eqn = Eq(H,simplify(H_s))\n display(eqn)\n \n fig,axes = plt.subplots(1,2,figsize=(18,6))\n ax1,ax2 = axes[0],axes[1]\n \n # mag plot\n ax1.set_xscale('log')\n ax1.set_ylabel('Magntiude in dB')\n ax1.set_xlabel('$\\omega$ in rad/s')\n ax1.plot(w,mag)\n ax1.grid()\n ax1.set_title(\"Magnitude of $H(j \\omega)$\")\n \n # phase plot\n ax2.set_ylabel('Phase in degrees')\n ax2.set_xlabel('$\\omega$ in rad/s')\n ax2.set_xscale('log')\n ax2.plot(w,phase)\n ax2.grid()\n ax2.set_title(\"Phase of $H(j \\omega)$\")\n \n plt.show()", "def plotbond(self):\n if self.dimension==1 or self.dimension==2:\n for bond in self.bonds:\n x = [bond.coordinate1[0],bond.coordinate2[0]]\n y = [bond.coordinate1[1],bond.coordinate2[1]]\n plt.plot(x,y,c=bond.color)\n elif self.dimension==3:\n for bond in self.bonds:\n x = [bond.coordinate1[0],bond.coordinate2[0]]\n y = [bond.coordinate1[1],bond.coordinate2[1]]\n z = [bond.coordinate1[2],bond.coordinate2[2]]\n\n plt.plot(x,y,z,c=bond.color)", "def plot_restriction_sites(self,cut_pos,colour='black',direction='up',add_text='',\n\t\t\ttemporary=None,delete_temporary=1,underline_unique=1):\n\t\tenzymes=cut_pos.keys() #enzyme names, for string label of each site\n\n\t\t#\n\t\t# Control vars\n\t\t#\n\t\tif not getattr(self,'new_seq_win_objs',None):\n\t\t\tself.new_seq_win_objs={}\n\t\t#\n\t\tstep=0\n\t\tcolour_cycle=[colour]#,'green','magenta','cyan','blue']\n\t\t\n\t\t# Keep track of all the sites we plot\n\t\t\n\t\tif not getattr(self,'donepos',None):\n\t\t\tself.donepos={}\n\t\tif not getattr(self,'temp_sites',None):\n\t\t\tself.temp_sites={}\n\t\t\tself.temp_objs={}\n\t\t\n\t\t# Delete all temporary sites from last time\n\t\t\n\t\tif delete_temporary:\n\t\t\tfor obj in self.temp_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\t\tself.temp_objs={}\n\t\t\t\n\t\t\t# Remove the influence of the tempsites on self.donepos\t\t\t\n\t\t\tfor site in self.temp_sites.keys():\n\t\t\t\tself.donepos[site]=self.donepos[site]-self.temp_sites[site]\n\t\t\t#\n\t\t\tself.temp_sites={}\n\t\t\n\t\t# Plot up or down?\n\t\t\n\t\tif direction=='up':\n\t\t\toperator=-1\n\t\t\tbase_shift=0\n\t\t\ty_org_shift=0\n\t\telse:\n\t\t\t# Down\n\t\t\toperator=1\n\t\t\tbase_shift=60\n\t\t\ty_org_shift=10\n\t\t#\n\t\t#List for storing restr. label objects\n\t\t#\n\t\tif not getattr(self,'sites',None):\n\t\t\tself.sites=list()\n\t\tif not getattr(self,'tempsites',None):\n\t\t\tself.tempsites=list()\n\n\t\ti=0\n\t\t#\n\t\t# Main Loop\n\t\t#\n\t\tfor enzyme in enzymes:\n\t\t\tpositions=cut_pos[enzyme]\n\t\t\tfor site in positions:\n\t\t\t\t#print site\n\t\t\t\tfor subsite in site:\n\t\t\t\t\t#print subsite\n\t\t\t\t\tx,y=self.get_base_pos_on_screen(subsite+1)\n\t\t\t\t\tyorg=y # Store org y position as end-point for line\n\t\t\t\t\tyorg=yorg+y_org_shift\n\t\t\t\t\t#Apply the base shift for drawing upside or downside\n\t\t\t\t\ty=y+base_shift\n\n\t\t\t\t\t# If a comparison sequence is loaded raise more\n\t\t\t\t\tif self.show_comp_sequence.get()==1:\n\t\t\t\t\t\tif direction=='up':\n\t\t\t\t\t\t\tif self.maxseqlevel==0:\n\t\t\t\t\t\t\t\ty=(y-15)/self.y_scale\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ty=y-(self.maxseqlevel*15)/self.y_scale\n\t\t\t\t\t\t\tif self.primer_displayed==1 and self.maxseqlevel>0:\n\t\t\t\t\t\t\t\ty=(y-15)/self.y_scale\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ty=(y-15)*self.y_scale\n\n\t\t\t\t\tmaxlevel=self.canvas_height-50\n\t\t\t\t\t#determine if more than one cut at this site\n\t\t\t\t\tl=15\n\t\t\t\t\tlevel=1\n\t\t\t\t\tif self.donepos.has_key(subsite):\n\t\t\t\t\t if self.donepos[subsite]>1:\n\t\t\t\t\t\t\ty=y+l*operator\n\t\t\t\t\t\t\tlevel=level+1\n\n\t\t\t\t\t#iterate over donepos to see if nearby sites, and if so shift up\n\t\t\t\t\tfor donesite in self.donepos.keys():\n\t\t\t\t\t\t\tif abs(donesite-subsite)<15:\n\t\t\t\t\t\t\t\ty=y+l*operator\n\t\t\t\t\t\t\t\tlevel=level+1\n\t\t\t\t\t#\n\t\t\t\t\t# Should we only show unique sites?\n\t\t\t\t\t#\n\t\t\t\t\tif self.show_only_unique_sites.get()==1 and len(positions)>1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#\n\t\t\t\t\t# Maximum number of sites (other than the unique site button)\n\t\t\t\t\t#\n\t\t\t\t\tif len(positions)<=self.max_num_sites.get() or self.max_num_sites.get()==100:\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# Select colour\n\t\t\t\t\t\t#\n\t\t\t\t\t\tcol=colour_cycle[step]\n\t\t\t\t\t\tstep=step+1\n\t\t\t\t\t\tif step==len(colour_cycle):\n\t\t\t\t\t\t\tstep=0\n\n\t\t\t\t\t\t# Underline unique sites\n\t\t\t\t\t\tfont = self.restr_font.get() + \" 10\"\n\t\t\t\t\t\tuniquetag='nu'\n\t\t\t\t\t\tif underline_unique:\n\t\t\t\t\t\t\tif len(positions)==1:\n\t\t\t\t\t\t\t\tuniquetag='u'\n\t\t\t\t\t\t\t\tfont=font+' underline'\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Draw site/enzyme label here\n\t\t\t\t\t\t\n\t\t\t\t\t\tobj=self.seqframe.create_text(x,(y-45)/self.y_scale,text=enzyme+add_text,\n\t\t\t\t\t\t\t\t\t\tactivefill='red',font=font,anchor='sw',fill=col,\n\t\t\t\t\t\t\t\t\t\ttags=('textlabel',uniquetag,level,enzyme+add_text,subsite))\n\n\t\t\t\t\t\tif temporary:\n\t\t\t\t\t\t\tself.temp_objs[obj]=1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.new_seq_win_objs[obj]=1\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Keep track of where we've plotted everything\t\t\t\t\t\t\n\t\t\t\t\t\tif not self.donepos.has_key(subsite):\n\t\t\t\t\t\t\tself.donepos[subsite]=0\n\t\t\t\t\t\tself.donepos[subsite]=self.donepos[subsite]+1\n\t\t\t\t\t\t#\n\t\t\t\t\t\tif temporary:\n\t\t\t\t\t\t\tif not self.temp_sites.has_key(subsite):\n\t\t\t\t\t\t\t\tself.temp_sites[subsite]=0\n\t\t\t\t\t\t\tself.temp_sites[subsite]=self.temp_sites[subsite]+1\n\n\t\t\t\t\t\t#add site text item to list of those currently on canvas\n\t\t\t\t\t\t#if not temporary:\n\t\t\t\t\t\t#\tself.sites.append(obj)\n\n\n\t\t#try to tidy up labels on canvas - testing\n\t\tself.tidy_restriction_labels(direction,temporary,underline_unique)\n\t\t\n\t\t# Restriction details?\t\t\n\t\tself.restriction_details()\n\t\treturn", "def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print('matplotlib is not available.')\n else:\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(8, 4))\n\n ax.plot(x, 'b', lw=1)\n if ind.size:\n label = 'valley' if valley else 'peak'\n label = label + 's' if ind.size > 1 else label\n ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,\n label='%d %s' % (ind.size, label))\n ax.legend(loc='best', framealpha=.5, numpoints=1)\n ax.set_xlim(-.02*x.size, x.size*1.02-1)\n ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()\n yrange = ymax - ymin if ymax > ymin else 1\n ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)\n ax.set_xlabel('Data #', fontsize=14)\n ax.set_ylabel('Amplitude', fontsize=14)\n mode = 'Valley detection' if valley else 'Peak detection'\n #ax.set_title(\"Deuxième détection\")\n ax.set_title(\"%s (mph=%s, mpd=%d, threshold=%s, edge='%s')\"\n % (mode, str(mph), mpd, str(threshold), edge))\n # plt.grid()\n plt.show()", "def plot_xy(nc,params,tms,lev=None):\n \n import matplotlib.pyplot as plt\n import ggWRFutils as gW\n from datetime import datetime\n import numpy as np\n wvar={}\n for p in params:\n if p != 'Times':\n if p=='WS10':\n wvar[p]=np.sqrt(nc.variables['U10'][:]**2+nc.variables['U10'][:]**2)\n elif p=='UV10': \n wvar['U10']=nc.variables['U10'][:,:,:] \n wvar['V10']=nc.variables['V10'][:,:,:] \n elif p=='UV':\n wvar['U']=nc.variables['U'][:,lev,:,:] \n wvar['V']=nc.variables['V'][:,lev,:,:] \n elif len(nc.variables[p].shape) > 3:\n wvar[p]=nc.variables[p][:,lev,:,:] \n else: \n wvar[p]=nc.variables[p][:] \n Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc)\n for p in params:\n if params[p]=='pcolor':\n plt.pcolor(lon,lat,wvar[p][tms,:,:],shading='flat')\n plt.colorbar()\n if params[p]=='contourf':\n plt.contourf(lon,lat,wvar[p][tms,:,:],50)\n plt.colorbar()\n if params[p]=='contour':\n plt.contourf(lon,lat,wvar[p][tms,:,:])\n plt.colorbar()\n if params[p]=='quiver':\n if p=='UV10':\n plt.quiver(lon[::10,::10],lat[::10,::10],wvar['U10'][tms,::10,::10],wvar['V10'][tms,::10,::10],units='width')\n elif p=='UV':\n plt.quiver(lon,lat,wvar['U'][tms,:,:],wvar['V'][tms,:,:])\n plt.hold(True)\n plt.xlim(lon.min(),lon.max())\n plt.ylim(lat.min(),lat.max())\n fig=plt.gcf()\n return fig", "def plot_flights_n_boundary(df_flights, x_column, y_column, hue_column,\n color=None, alpha=1,\n color_column=None, color_map=None,\n df_lower_boundary=None, df_upper_boundary=None,\n xlims=None, ylims=None,\n title=None, xlabel=None, ylabel=None,\n lower_boundary_label=None,\n upper_boundary_label=None,\n highlight_flight=None,\n highlight_flights=None,\n label_highlighted_flights=False,\n include_legend=True):\n df_flights = df_flights.copy()\n fig, ax = plt.subplots()\n if xlims is None:\n x_min = round(min(df_flights[x_column]), -1)\n x_min_margin = round(0.1 * (round(min(df_flights[x_column]), -1)) -\n ceil(max(df_flights[x_column])), -2)\n x_max = ceil(max(df_flights[x_column]))\n x_max_margin = round(0.1 * (ceil(max(df_flights[x_column])) -\n round(min(df_flights[x_column]), -1)), -2)\n xlims = (x_min + x_min_margin, x_max + x_max_margin)\n else:\n xlims = sorted(xlims)\n df_flights = df_flights[\n df_flights[x_column] >= xlims[0]][\n df_flights[x_column] <= xlims[1]]\n if ylims is None:\n ylims = (\n round(min(df_flights[y_column]), -1),\n ceil(max(df_flights[y_column]))\n )\n else:\n ylims = sorted(ylims)\n df_flights = df_flights[\n df_flights[y_column] >= ylims[0]][\n df_flights[y_column] <= ylims[1]\n ]\n if xlabel is None:\n xlabel = x_column\n if ylabel is None:\n ylabel = y_column\n # if title is None:\n # title = y_column+' vs '+x_column\n if lower_boundary_label is None:\n lower_boundary_label = 'Lower boundary'\n if upper_boundary_label is None:\n upper_boundary_label = 'Upper boundary'\n\n if color is None:\n if color_map is None:\n color = None\n else:\n color = [color_map[i] for i in color_column]\n ax.set_xlim(*xlims)\n ax.set_ylim(*ylims)\n array_collection = [\n np.transpose(\n np.column_stack(\n df_flights[df_flights[hue_column] == flight_id]\n [[x_column, y_column]].to_numpy())\n ) for flight_id in df_flights[hue_column].unique()\n ]\n line_segments = LineCollection(array_collection,\n linewidths=(0.5, 1, 1.5, 2),\n linestyles='solid',\n alpha=alpha,\n color=color,\n label='Flights')\n line_segments.set_array(np.arange(len(array_collection)))\n ax.add_collection(line_segments)\n\n if (df_lower_boundary is not None) & (df_upper_boundary is not None):\n _ = ax.plot(df_lower_boundary[x_column],\n df_lower_boundary[y_column],\n color='black',\n linewidth=2.5,\n label=lower_boundary_label)\n _ = ax.plot(df_upper_boundary[x_column],\n df_upper_boundary[y_column],\n color='black',\n linewidth=2.5,\n label=upper_boundary_label)\n # ax.fill_betweenx(df_lower_boundary[y_column],\n # df_lower_boundary[x_column],\n # df_upper_boundary[x_column],\n # color=\"black\",\n # alpha=0.2)\n ax.fill_between(df_lower_boundary[x_column],\n df_lower_boundary[y_column],\n df_upper_boundary[y_column],\n color=\"black\",\n alpha=0.2)\n\n if highlight_flight is not None:\n _ = ax.plot(df_flights[df_flights[hue_column] ==\n highlight_flight][x_column],\n df_flights[df_flights[hue_column] ==\n highlight_flight][y_column],\n color='red',\n linewidth=1.25,\n label=highlight_flight)\n\n if highlight_flights is not None:\n for flight in highlight_flights:\n _ = ax.plot(df_flights[df_flights[hue_column] ==\n flight][x_column],\n df_flights[df_flights[hue_column] ==\n flight][y_column],\n color='red',\n linewidth=1.25,\n label=flight if label_highlighted_flights else \"_nolegend_\")\n\n if include_legend:\n ax.legend()\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n\n plt.show()\n\n return fig, ax", "def plot_planned_trajectory(ax, xs, ys, headings, steers, physical_params, interval = 20):\n ax.plot(xs, ys, color=\"r\")\n for i in range(len(steers)):\n # ellipse = Ellipse(xy = (x, y), width = x_length, height = y_length, angle = np.rad2deg(heading), alpha = 0.4, ec = \"k\", fc = fc)\n # ax.add_patch(ellipse)\n if i % interval == 0:\n plot_vehicle(ax, xs[i], ys[i], headings[i], steers[i], 0.7, 0.7, physical_params.wheel_length, physical_params.wheel_width)\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n ax.axis('equal')", "def __plot(data, days: int = None):\n if days is not None:\n points = days * 144\n else:\n points = len(data)\n\n temp = data[-points:, 1]\n\n plt.plot(range(points), temp)\n plt.grid()\n plt.show()" ]
[ "0.6571925", "0.6494316", "0.6387066", "0.6355867", "0.6060526", "0.60148257", "0.59904087", "0.5880127", "0.5846436", "0.5815426", "0.5802742", "0.574608", "0.5697785", "0.56814563", "0.56812817", "0.5646751", "0.5629472", "0.5623733", "0.5613848", "0.5612298", "0.56044704", "0.56009567", "0.55846334", "0.557411", "0.5562398", "0.5559633", "0.55519634", "0.5542147", "0.55391467", "0.5531426" ]
0.65872544
0
Plot the band structure of an infinite lead
def plot_bands(self, start: float = -pi, end: float = pi, step: float = 0.05, **kwargs) -> None: bands = self.calc_bands(start, end, step) bands.plot(**kwargs) plt.title("lead {}".format(self.index))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def band_plot(N=400,a=1.0):\n foot_step=2*np.pi/N\n x=np.arange(0.0,2*np.pi/a,foot_step)\n y=band_energy(x)\n plt.plot(x,y)", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def display_band_structure_1d(num_atoms, h_poly, cycles = 1, phase_offset = 0):\n x = []\n y = [[] for i in range(num_atoms)]\n n = 100*cycles\n for k in range(-n/2, n/2):\n # for k in range(0, n):\n alpha = 2*math.pi*k/n+phase_offset\n phase = numpy.exp(alpha*1j)\n #h_minus, h_zero, h_plus = compute_hamiltonian(num_atoms, atoms, bonds)\n #h = h_minus*phase.conjugate()+h_zero+h_plus*phase\n h = eval_hamiltonian(num_atoms, h_poly, (phase, 1))\n\n e, v = eigensystem(h)\n #print k,h,e\n\n x.append(alpha)\n for i in range(num_atoms):\n y[i].append(e[i])\n\n for i in range(num_atoms):\n # matplotlib.pyplot.plot(x, y[i])\n for cycle in range(0, cycles):\n matplotlib.pyplot.plot(x[0:100], y[i][100*cycle:100*(cycle+1)])\n # matplotlib.pyplot.show()", "def _plot_band(self, ax, x_coord):\n ax.plot(\n [x_coord - self.band_width / 2.0, x_coord + self.band_width / 2.0],\n [-self.migration_distance, -self.migration_distance],\n lw=self.band_thickness,\n c=self.band_color,\n )", "def plotband(_band, _period):\n # Frequency = 1 / Period\n _freq = 1 / _period\n _xfit, _lobf = calclobf(_band, _period)\n # Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit\n plt.style.use('seaborn-whitegrid')\n plt.errorbar((_band[:, 0] * _freq) % 1, _band[:, 1], _band[:, 2], fmt='.', color='gray',\n ecolor='lightgray', capsize=0, zorder=0)\n # Plot the graph of the line of best fit\n plt.plot(_xfit, _lobf, '-k', lw=2, zorder=2)\n # Set x-axis limits to 1 period\n plt.xlim(0, 1)\n # Set graph and axis titles\n plt.xlabel(\"Phase\")\n plt.ylabel(\"Magnitude\")\n plt.title(\"Folded light curve\")\n # Invert y-axis as convention\n plt.gca().invert_yaxis()\n # Display to screen\n plt.show()", "def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt=\"pdf\"):\n\n eigenval_lines = open(\"EIGENVAL\").readlines()\n kpoints_lines = open(\"KPOINTS\").readlines()\n\n # IBZ k-points used for SCF but not useful for plotting bands.\n ibz_kpoints = [k for k in kpoints_lines[3:] if int(k.split()[3]) != 0]\n # Lines containing hig-symmetry k-points (e.g. Gamma)\n vertex_lines = [k for k in kpoints_lines[3:] if len(k.split()) == 5]\n n_bands = int(eigenval_lines[5].split()[2])\n with open(\"OUTCAR\", \"r\") as outcar:\n for line in outcar:\n if \"E-fermi\" in line:\n efermi = float(line.split()[2])\n break\n spin_polarized = False\n if len(eigenval_lines[8].split()) == 5:\n spin_polarized = True\n\n bs_kpoints = []\n vertices = []\n bands = [[[], []] for x in range(n_bands)]\n\n i = 7 + len(ibz_kpoints)*(n_bands+2)\n while i < len(eigenval_lines):\n kpt_coords = [float(x) for x in eigenval_lines[i].split()[:3]]\n for kpt in vertex_lines:\n ref_coords = [float(x) for x in kpt.split()[:3]]\n if euclidean(kpt_coords, ref_coords) < 0.0001:\n kpt_coords.append(kpt.split()[-1])\n vertices.append(kpt_coords)\n break\n bs_kpoints.append(kpt_coords)\n for j in range(n_bands):\n i += 1\n split_line = eigenval_lines[i].split()\n bands[j][0].append(float(split_line[1]) - efermi)\n if spin_polarized:\n bands[j][1].append(float(split_line[2]) - efermi)\n i += 2\n\n path_lengths, kpt_distances = [], [0]\n discontinuity = False\n for i in range(1, len(vertices)):\n if discontinuity:\n path_lengths.append(0)\n else:\n path_lengths.append(euclidean(vertices[i][:3],vertices[i-1][:3]))\n\n if i < len(vertices)-1 and vertices[i][3] != vertices[i-1][3] and\\\n vertices[i][3] != vertices[i+1][3] and not discontinuity:\n discontinuity = True\n else:\n discontinuity = False\n\n n_kpt_divs = len(bs_kpoints) / float(len(path_lengths))\n\n x, j = 0, 0\n for i in range(1, len(bs_kpoints)):\n if len(bs_kpoints[i]) == 4 and len(bs_kpoints[i-1]) == 4 and \\\n bs_kpoints[i][3] != bs_kpoints[i-1][3]:\n x += 0\n else:\n x += euclidean(bs_kpoints[i][:3], bs_kpoints[i-1][:3])\n kpt_distances.append(x)\n\n ax = plt.figure(figsize=(11, 8.5)).gca()\n font = FontProperties()\n font.set_size(24)\n font.set_family(\"serif\")\n large_font = font.copy()\n large_font.set_size(32)\n\n for b in bands:\n ax.plot(kpt_distances, b[0], 'b-')\n if spin_polarized:\n ax.plot(kpt_distances, b[1], 'r--')\n if draw_fermi:\n ax.plot([min(kpt_distances), max(kpt_distances)], [0, 0], 'k-')\n ax.set_xlim(min(kpt_distances), max(kpt_distances))\n ax.set_xticks([])\n\n d = 0\n ax.text(d, ylim[0]*1.05, r\"$\\mathrm{%s}$\" % vertices[0][-1],\n fontproperties=font, verticalalignment=\"top\",\n horizontalalignment=\"center\")\n for i in range(len(path_lengths)):\n d += path_lengths[i]\n if i < len(path_lengths)-1 and path_lengths[i+1] == 0 and\\\n vertices[i+1][-1] != vertices[i+2][-1]:\n label = \"{}|{}\".format(vertices[i+1][-1], vertices[i+2][-1])\n else:\n label = vertices[i+1][-1]\n if path_lengths[i] != 0:\n ax.text(d, ylim[0]*1.05, r\"$\\mathrm{%s}$\" % label,\n fontproperties=font, verticalalignment=\"top\",\n horizontalalignment=\"center\")\n ax.plot([d, d], [ylim[0], ylim[1]], 'k--')\n\n ax.set_ylim(ylim)\n ax.set_ylabel(r\"$\\mathrm{E - E_F (eV)}$\", fontproperties=large_font)\n ax.set_yticklabels([int(t) for t in ax.get_yticks()], fontproperties=font)\n plt.savefig(\"band_structure.{}\".format(fmt))", "def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):\n # Set pyplot style to be consisten within the program\n plt.style.use('seaborn-whitegrid')\n # Frequency = 1 / Period\n _freq = 1 / _period\n\n # Create single dataset from all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n # Iterate through each band and plot to screen\n i = 0\n while i < 5:\n # Array to set colours for each band\n _colours = ['-b', '-g', '-r', '-c', '-m']\n # Array to set strings for graph legend\n _legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']\n # Determine the line of best fit for each band\n _xfit, _lobf = calclobf(_bands[i], _period)\n # Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit\n plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])\n i += 1\n\n # Set x-axis limit to a single period\n plt.xlim(0, 1)\n # Set graph and axis titles\n plt.xlabel(\"Phase\")\n plt.ylabel(\"Magnitude\")\n plt.title(\"Folded light curve\")\n # Show the legend\n plt.legend()\n # Invert y-axis as convention\n plt.gca().invert_yaxis()\n # Save to current folder\n plt.savefig('curve.png')\n # Display to screen\n plt.show()", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def plot_bond_lattice(lattice, worm, observables):\n # create bond grid for plotting\n line_range = np.linspace(0, lattice.L, lattice.L+1)\n x_grid, y_grid = np.meshgrid(line_range, line_range)\n\n\n # initialize figure.\n fig = plt.figure(figsize=(9, 9))\n ax = plt.axes(xlim=(0, lattice.L), ylim=(0, lattice.L))\n ax.set_xlabel(r'$T = %.2f,\\;\\langle H \\rangle = %.3f$' \n % (observables.T_range[-1], observables.mean_energy[0, -1]),\n fontsize=16, position=(0.5,-0.085))\n plt.subplots_adjust(bottom=0.1, top=0.96, right=0.96, left=0.04)\n # create grid (gray lines).\n plt.plot(x_grid, y_grid, c='#dddddd', lw=1)\n plt.plot(y_grid, x_grid, c='#dddddd', lw=1)\n ax.set_title(r'$\\rm{\\bf High\\ Temperature\\ Domain\\!\\ }$',\n fontsize=14, loc=('center'))\n # convert boolean bond data to numeric arrays for plotting.\n colors = ['aquamarine', 'midnightblue', 'skyblue', 'blueviolet', 'cadetblue', 'cornflowerblue', 'coral', 'firebrick', 'purple']\n #colors = ['azure']*8\n\n # plot bond lines.\n cm = plt.get_cmap('jet')\n #ax.set_color_cycle([cm(1.*i/(worm.q-1)) for i in range(worm.q-1)])\n for i in range(1, 2):\n xh = x_grid[lattice.bonds[0]==i].flatten()\n yh = y_grid[lattice.bonds[0]==i].flatten()\n xv = x_grid[lattice.bonds[1]==i].flatten()\n yv = y_grid[lattice.bonds[1]==i].flatten()\n h_bonds = np.hstack((np.vstack((xh, xh+1)), np.vstack((xv, xv))))\n v_bonds = np.hstack((np.vstack((yh, yh)), np.vstack((yv, yv+1))))\n plt.plot(h_bonds, v_bonds, 'r', lw=3)\n\n # plot worm head and tail.\n plt.plot(worm.tail[0], worm.tail[1], 'bs', ms=10)\n plt.plot(worm.head[0], worm.head[1], 'g>', ms=15)\n # disable clipping to show periodic bonds.\n for o in fig.findobj():\n o.set_clip_on(False)", "def plot_basins(f, Df, zeros, domain, res=1000, iters=15):\n raise NotImplementedError(\"Problem 7 Incomplete\")", "def plot_band( # pylint: disable=too-many-statements,too-many-locals,too-many-branches\n band: ty.Union[dict, orm.BandsData],\n ref_zero: float = 0,\n ax=None,\n):\n from matplotlib import rc\n\n if ref_zero is None:\n ref_zero = 0\n\n # Uncomment to change default font\n # rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n rc(\n \"font\",\n **{\n \"family\": \"serif\",\n \"serif\": [\n \"Computer Modern\",\n \"CMU Serif\",\n \"Times New Roman\",\n \"DejaVu Serif\",\n ],\n },\n )\n # To use proper font for, e.g., Gamma if usetex is set to False\n rc(\"mathtext\", fontset=\"cm\")\n\n rc(\"text\", usetex=True)\n # Deprecated\n # https://github.com/matplotlib/matplotlib/blob/main/doc/api/prev_api_changes/api_changes_3.3.0/deprecations.rst#textlatexpreview-rcparam\n # plt.rcParams.update({'text.latex.preview': True})\n\n print_comment = False\n\n all_data = get_band_dict(band)\n\n if not all_data.get(\"use_latex\", False):\n rc(\"text\", usetex=False)\n\n # x = all_data['x']\n # bands = all_data['bands']\n paths = all_data[\"paths\"]\n tick_pos = all_data[\"tick_pos\"]\n tick_labels = all_data[\"tick_labels\"]\n\n # Option for bands (all, or those of type 1 if there are two spins)\n further_plot_options1 = {}\n further_plot_options1[\"color\"] = all_data.get(\"bands_color\", \"k\")\n further_plot_options1[\"linewidth\"] = all_data.get(\"bands_linewidth\", 0.5)\n further_plot_options1[\"linestyle\"] = all_data.get(\"bands_linestyle\", None)\n further_plot_options1[\"marker\"] = all_data.get(\"bands_marker\", None)\n further_plot_options1[\"markersize\"] = all_data.get(\"bands_markersize\", None)\n further_plot_options1[\"markeredgecolor\"] = all_data.get(\n \"bands_markeredgecolor\", None\n )\n further_plot_options1[\"markeredgewidth\"] = all_data.get(\n \"bands_markeredgewidth\", None\n )\n further_plot_options1[\"markerfacecolor\"] = all_data.get(\n \"bands_markerfacecolor\", None\n )\n\n # Options for second-type of bands if present (e.g. spin up vs. spin down)\n further_plot_options2 = {}\n further_plot_options2[\"color\"] = all_data.get(\"bands_color2\", \"r\")\n # Use the values of further_plot_options1 by default\n further_plot_options2[\"linewidth\"] = all_data.get(\n \"bands_linewidth2\", further_plot_options1[\"linewidth\"]\n )\n further_plot_options2[\"linestyle\"] = all_data.get(\n \"bands_linestyle2\", further_plot_options1[\"linestyle\"]\n )\n further_plot_options2[\"marker\"] = all_data.get(\n \"bands_marker2\", further_plot_options1[\"marker\"]\n )\n further_plot_options2[\"markersize\"] = all_data.get(\n \"bands_markersize2\", further_plot_options1[\"markersize\"]\n )\n further_plot_options2[\"markeredgecolor\"] = all_data.get(\n \"bands_markeredgecolor2\", further_plot_options1[\"markeredgecolor\"]\n )\n further_plot_options2[\"markeredgewidth\"] = all_data.get(\n \"bands_markeredgewidth2\", further_plot_options1[\"markeredgewidth\"]\n )\n further_plot_options2[\"markerfacecolor\"] = all_data.get(\n \"bands_markerfacecolor2\", further_plot_options1[\"markerfacecolor\"]\n )\n\n if ax is None:\n fig = plt.figure()\n p = fig.add_subplot(1, 1, 1) # pylint: disable=invalid-name\n else:\n p = ax # pylint: disable=invalid-name\n\n first_band_1 = True\n first_band_2 = True\n\n for path in paths:\n if path[\"length\"] <= 1:\n # Avoid printing empty lines\n continue\n x = path[\"x\"]\n # for band in bands:\n # pylint: disable=redefined-argument-from-local\n for band, band_type in zip(path[\"values\"], all_data[\"band_type_idx\"]):\n # For now we support only two colors\n if band_type % 2 == 0:\n further_plot_options = further_plot_options1\n else:\n further_plot_options = further_plot_options2\n\n # Put the legend text only once\n label = None\n if first_band_1 and band_type % 2 == 0:\n first_band_1 = False\n label = all_data.get(\"legend_text\", None)\n elif first_band_2 and band_type % 2 == 1:\n first_band_2 = False\n label = all_data.get(\"legend_text2\", None)\n\n p.plot(x, [_ - ref_zero for _ in band], label=label, **further_plot_options)\n\n p.set_xticks(tick_pos)\n p.set_xticklabels(tick_labels)\n p.set_xlim([all_data[\"x_min_lim\"], all_data[\"x_max_lim\"]])\n p.set_ylim([all_data[\"y_min_lim\"] - ref_zero, all_data[\"y_max_lim\"] - ref_zero])\n p.xaxis.grid(True, which=\"major\", color=\"#888888\", linestyle=\"-\", linewidth=0.5)\n\n if all_data.get(\"plot_zero_axis\", False):\n p.axhline(\n 0.0,\n color=all_data.get(\"zero_axis_color\", \"#888888\"),\n linestyle=all_data.get(\"zero_axis_linestyle\", \"--\"),\n linewidth=all_data.get(\"zero_axis_linewidth\", 0.5),\n )\n if all_data[\"title\"]:\n p.set_title(all_data[\"title\"])\n if all_data[\"legend_text\"]:\n p.legend(loc=\"best\")\n p.set_ylabel(all_data[\"yaxis_label\"])\n\n try:\n if print_comment:\n print(all_data[\"comment\"])\n except KeyError:\n pass\n\n if ax is None:\n plt.show()", "def f4():\n n = 4\n v = np.arange(n)**0.75 * 0.2\n e = (np.arange(n)+1)**0.7 * 1e-1\n\n n = 12\n v = np.arange(n)\n e = np.array([0.1]*n) * 10e-0\n\n print(Sumb(v,e))\n\n f = plt.figure()\n a = f.add_subplot(111)\n\n dx = 0.0001\n x = np.arange(-1,v[-1]+1,dx)\n y = x.copy()\n y[:] = 0.\n for i in range(n):\n yx = lg(x,v[i],e[i])\n a.plot(x,np.exp(yx),label='{:d}'.format(i))\n y += yx\n y = np.exp((y - np.max(y))/n**2)\n y /= np.sum(y) * dx \n a.plot(x,y,label='sum')\n s = np.argsort(y)[::-1]\n ys = np.cumsum(y[s]) * dx\n yi = np.argwhere(ys > 0.682689492137)[0][0]\n print('mean = {:2f}'.format(x[s[0]]))\n print('sigma = {:2f}'.format(yi*dx/2))\n xy = np.ndarray((yi+2,2))\n i0,i1 = min(s[:yi]), max(s[:yi])\n xy[:yi,0] = x[i0:i1+1]\n xy[:yi,1] = y[i0:i1+1]\n xy[yi:,1] = 0\n xy[yi:,0] = x[[i1,i0]]\n a.add_patch(Polygon(xy,fill=True,color='green',ec='none',alpha=0.25))\n \n leg = plt.legend()\n plt.draw()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()", "def plot_basins(f, Df, zeros, domain, res=1000, iters=15):\r\n #initialize grid\r\n r = np.linspace(domain[0],domain[1],res)\r\n i = np.linspace(domain[2],domain[3],res)\r\n real, imag = np.meshgrid(r,i)\r\n X = real + 1j*imag\r\n for t in range(iters):\r\n #run Newton's method iters times\r\n Xk = X\r\n X = Xk - f(Xk)/Df(Xk)\r\n Y = np.zeros((res,res))\r\n #for each entry, find the closest zero and store in Y\r\n for l in range(res):\r\n for k in range(res):\r\n Y[l,k] = np.argmin(abs(zeros - X[l,k]))\r\n #plot the results\r\n plt.pcolormesh(real, imag, Y, cmap=\"brg\")\r\n plt.title('Zeros Basins')\r\n plt.show()", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def plot_bands(calc_layer_stack_output, layers):\r\n points = calc_layer_stack_output['points']\r\n Evac = calc_layer_stack_output['Evac']\r\n num_points = len(points)\r\n \r\n # Note: layer_list is NOT the same as layers = [layer0, layer1, ...],\r\n # layer_list is [layer0, layer0, ... layer1, layer1, ... ], i.e. the\r\n # layer of each successive point.\r\n layer_list = [where_am_I(layers, pt)['current_layer']\r\n for pt in points]\r\n matl_list = [layer.matl for layer in layer_list]\r\n chi_list = [matl.chi for matl in matl_list]\r\n EG_list = [matl.EG for matl in matl_list]\r\n CB_list = [Evac[i] - chi_list[i] for i in range(num_points)]\r\n VB_list = [CB_list[i] - EG_list[i] for i in range(num_points)]\r\n EF_list = [0 for i in range(num_points)]\r\n \r\n# dx = points[1] - points[0]\r\n# CB_pad = np.pad(CB_list, 1, \"edge\")\r\n# elec_list = [(CB_pad[i+1] - CB_pad[i-1])/(2*dx) for i in range(1, len(CB_pad)-1)]\r\n \r\n plt.figure()\r\n \r\n plt.plot(points,CB_list,'k-', #conduction band: solid black line\r\n points,VB_list,'k-', #valence band: solid black line\r\n points,EF_list,'r--') #fermi level: dashed red line\r\n \r\n # Draw vertical lines at the boundaries of layers\r\n for i in range(len(layers)-1):\r\n plt.axvline(sum(layer.lz for layer in layers[0:i+1]), \r\n color = 'k', linewidth = 1, linestyle = '--')\r\n \r\n # The title of the graph describes the stack\r\n # for example \"1.3e18 n-Si / 4.5e16 p-Si / 3.2e17 n-Si\"\r\n layer_name_string_list = ['{:.1e}'.format(layer.dope) + ' '\r\n + layer.n_or_p + '-' + layer.matl.name\r\n for layer in layers]\r\n# plt.title(' / '.join(layer_name_string_list))\r\n plt.xlabel('Position (nm)')\r\n plt.ylabel('Electron energy (eV)')\r\n plt.xlim(0, sum(layer.lz for layer in layers))", "def waterfall( array, ax = None, offset = None, border = 0, labels = True, bins = None, show = True, **kwargs ):\n\n if array.ndim is 2:\n if offset is None:\n offset = np.max( np.average( array, axis = 0 ) )\n\n fig = plt.figure( figsize = ( 6, 6 ) )\n bgcolor = 'w'\n ax = fig.add_subplot( 111, facecolor = bgcolor, **kwargs )\n color = 'k'\n\n if bins is None:\n bins = np.arange( array.shape[1] )\n\n\n x_min = 0\n x_max = len( bins ) - 1\n y_min = 0 - offset\n y_max = ( 1 + len( array ) ) * offset\n x_low = x_min - ( x_max - x_min ) * border\n x_high = ( x_max - x_min ) * border + x_max\n y_low = y_min - ( y_max - y_min ) * border\n y_high = ( y_max - y_min ) * border + y_max\n\n\n for i in np.arange( len( array ) ):\n ax.plot( array[i][bins] + offset * i, color )\n\n ax.set_xlim( x_low, x_high )\n ax.set_ylim( y_low, y_high )\n\n if not labels:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n if show:\n plt.show()\n else:\n plt.close()\n else:\n raise DimensionError( \"Invalid dimensions. Required: 2. (Actual: {})\".format( array.ndim ) )\n\n return ax", "def __plot(data, days: int = None):\n if days is not None:\n points = days * 144\n else:\n points = len(data)\n\n temp = data[-points:, 1]\n\n plt.plot(range(points), temp)\n plt.grid()\n plt.show()", "def lcfig( z=7 ):\n\n for modelname in ['z15G','z25G','z40G'] :\n\n # initialize a supernova model :\n snmodel = sncosmo.Model(source=modelname)\n\n # Fix the redshift for this instantiation of the model\n # (NOTE: this does not apply any cosmological dimming. It only\n # shifts the wavelengths)\n snmodel.set( z = z )\n\n # generate the H band light curve\n tobs = np.arange( 0, 1000, 10 )\n M160 = snmodel.bandmag( 'f160w', 'ab', tobs ) # Absolute Magnitude\n m160 = M160 + cosmo.distmod( z ).value # apparent magnitude\n\n pl.plot( tobs, m160 )\n ax = pl.gca()\n ax.invert_yaxis()\n ax.set_xlabel('Time (observer-frame days)')\n ax.set_ylabel('Apparent Magnitude in F160W')\n ax.set_xlim( 0, 1000 )\n ax.set_ylim( 36, 28 )\n\n pl.draw()", "def n27_and_sidebands():\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5, 4))\n # n=26 through n=29\n folder = os.path.join(\"..\", \"..\", \"2018-09-06\")\n fname = \"1_dye_fscan.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n ax.axhline(0, color='grey')\n data.plot(x='fpoly', y='sig', label=\"MW Off\", c='k', ax=ax)\n # sidebands\n folder = os.path.join(\"..\", \"..\", \"2018-09-09\")\n fname = \"1_freq_dye.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['asig'] = data['sig'] - 0.3\n ax.axhline(-0.3, color='grey')\n data.plot(x='fpoly', y='asig', label=\"MW On\", c='k', ax=ax)\n # pretty figure\n ax.legend().remove()\n ax.set_ylabel(r\"$e^-$ Signal\")\n ax.set_yticks([])\n ax.set_xlabel(\"Frequency (GHz from Limit)\")\n ax.set_xticks([-4863, -4511, -4195, -3908])\n ax.text(-4400, -0.15, \"MW On\")\n ax.text(-4400, 0.3, \"MW Off\")\n # save\n fig.tight_layout()\n fig.savefig(\"n27_and_sidebands.pdf\")\n return", "def plot_bs(self, show=False, density=True, pcolor=\"r\", mcolor=\"b\", lw=1.0, subtract = 0):\n cm = matplotlib.cm.jet\n\n if (subtract !=0):\n geqbispectra = subtract\n else:\n geqbispectra = np.zeros(np.shape(self.eqbispectra))\n\n if (density):\n \"\"\" also read the local overdensity value and plot line colors according to\n the density value, + = red, - = blue; adjust alpha accordingly\n \"\"\"\n if len(self.ds)<self.Nsubs:\n print (\"no density data\")\n return 0\n\n ads=np.abs(self.ds)\n meands=np.mean(self.ds)\n mads=np.max(ads)\n normds=np.array([ads[i]/mads for i in range(len(ads))])\n self.normds=normds\n\n cNorm = colors.Normalize(min(self.ds), vmax=max(self.ds))\n scalarMap = cmap.ScalarMappable(norm=cNorm, cmap=cm)\n scalarMap.set_array([])\n\n fig, ax = self.plt.subplots()\n\n for sub in range(self.Nsubs):\n #print sub\n if not(density):\n lplot=ax.plot(self.klist, self.fNLeq[sub])\n else:\n colorVal = scalarMap.to_rgba(self.ds[sub])\n lplot = ax.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=colorVal, alpha=normds[sub], linewidth=lw)\n \"\"\"\n if self.ds[sub]>meands:\n self.plt.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=pcolor, alpha=normds[sub], linewidth=lw)\n else:\n self.plt.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=mcolor, alpha=normds[sub], linewidth=lw)\n \"\"\"\n\n ax.set_xlabel(r\"$k {\\rm (h/Mpc)}$\")\n ax.set_ylabel(r\"${\\rm Q}(k)$\")\n ax.set_xscale('log')\n cbar = fig.colorbar(scalarMap, format='%.0e')\n #self.plt.yscale('log')\n if (show):\n self.plt.show()", "def one_period_plot():\n file = \"Data/matfiles/20131221.mat\"\n object = MatReader(file)\n\n NeA = object.NeA\n latA = object.latA\n times = object.secondsA\n mlt = object.mltA\n ind1 = 2606 #lat inds\n ind2 = 13940 #lat inds\n \n ind1 = 3197 #mlat inds\n ind2 = 14390 #mlat inds\n \n T = ind2 - ind1\n ind1 += int(T/2)\n ind2 += int(T/2)\n\n latA = latA[ind1:ind2]\n NeA = NeA[ind1:ind2]\n # NeA = object.meanie(NeA, 5)\n times = times[ind1:ind2]\n mlt = mlt[ind1:ind2]\n mlt = hour_round(mlt)\n\n lats = np.zeros_like(latA)\n lats[0] = latA[0]\n for i in range(len(latA)-1):\n dlat = latA[i+1] - latA[i]\n if dlat < 0:\n lats[i+1] = lats[i] - dlat\n else:\n lats[i+1] = lats[i] + dlat\n\n lats += 90\n\n xticks = np.array([-90, -70, -30, 30, 70, 110, 150, 210, 250, 270]) + 90\n gridticks = np.array([-90, -70, -30, 30, 70, 77, 103, 110, 150, 210, 250, 270]) + 90\n # plt.plot(lats, NeA, \".\", markersize = 1)\n # plt.plot([0, 0], [0, np.max(NeA)], \"k\")\n # plt.plot([30, 30], [0, np.max(NeA)], \"k\")\n # plt.plot([60, 60], [0, np.max(NeA)], \"k\")\n # plt.plot([120, 120],[0, np.max(NeA)], \"k\")\n # plt.plot([150, 150], [0, np.max(NeA)], \"k\")\n # plt.plot([167, 167], [0, np.max(NeA)], \"k\")\n # plt.plot([193, 193], [0, np.max(NeA)], \"k\")\n # plt.plot([210, 210], [0, np.max(NeA)], \"k\")\n # plt.plot([240, 244], [0, np.max(NeA)], \"k\")\n # plt.plot([300, 300], [0, np.max(NeA)], \"k\")\n # plt.plot([330, 330], [0, np.max(NeA)], \"k\")\n # plt.plot([360, 360], [0, np.max(NeA)], \"k\")\n # plt.xticks(xticks)\n # plt.xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n # plt.ylabel(\"Electron density [cm$^{-1}$]\")\n # plt.title(\"One SWARM satellite period\")\n # plt.grid(\"on\", axis = \"x\", xdata = gridticks)\n #adding letters\n x = (gridticks[:-1] + gridticks[1:])/2 - 3\n y = np.zeros_like(x) - np.max(NeA)/40\n s = [\"S\", \"B\", \"A\", \"B\", \"C\", \"D\", \"C\", \"B\", \"A\", \"B\", \"S\"]\n # for i in range(len(x)):\n # plt.text(x[i], y[i], s[i], fontsize = 10)\n # plt.savefig(\"Figures/swarm_period.pdf\")\n # plt.show()\n\n # plt.plot(times, latA)\n # plt.plot(times, mlt)\n # plt.show()\n print(lats[0])\n print(lats[-1])\n \n fig, ax = plt.subplots()\n ax.plot(lats, NeA, \".\", markersize = 1)\n ax.set_xticks(xticks, minor=False)\n ax.set_xticks([167, 193], minor=True)\n ax.xaxis.grid(True, which = \"major\")\n ax.xaxis.grid(True, which = \"minor\")\n for i in range(len(x)):\n ax.text(x[i], y[i], s[i], fontsize = 10)\n ax.set_xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n ax.set_ylabel(\"Electron density [cm$^{-1}$]\")\n ax.set_title(\"One Swarm satellite period\")\n # plt.savefig(\"Figures/swarm_period.pdf\")\n plt.show()\n plt.plot(mlt, NeA)\n plt.show()\n plt.plot(mlt, lats)\n plt.show()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot_wav(decomp):\n \n plt.figure(figsize=(10,10))\n gs = GridSpec(4, 4)\n \n ax = plt.subplot(gs[0, 0])\n plt.imshow(decomp[0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[1,0])\n plt.imshow(decomp[1][0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[0, 1])\n plt.imshow(decomp[1][1])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[1, 1])\n plt.imshow(decomp[1][2])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[2:,:2])\n plt.imshow(decomp[2][0])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[:2,2:])\n plt.imshow(decomp[2][1])\n plt.xticks([])\n plt.yticks([])\n \n ax = plt.subplot(gs[2:,2:])\n plt.imshow(decomp[2][2])\n plt.xticks([])\n plt.yticks([])\n \n plt.tight_layout()\n \n return", "def plot_graph(self) -> None:", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def plot(self):\n\t\tself.plotOfSpect()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()" ]
[ "0.66511804", "0.64478564", "0.61882246", "0.60987866", "0.6028533", "0.59789073", "0.5939781", "0.5918501", "0.5852052", "0.58147776", "0.5804578", "0.5693301", "0.5680562", "0.5675579", "0.567436", "0.56429476", "0.5581434", "0.5549642", "0.5546036", "0.5541444", "0.5512552", "0.5498148", "0.5496131", "0.54945576", "0.54717225", "0.5471289", "0.54601693", "0.5460004", "0.5459873", "0.5453975" ]
0.67208236
0
Save response and redirect to next question or thanks when done
def handle_answer(): choice = request.form['answer'] text = request.form.get("text", "") # add this response to the list in the session responses = session[RESPONSES_KEY] responses.append({"choice": choice, "text": text}) # add this response to the session session[RESPONSES_KEY] = responses survey_code = session[CURRENT_SURVEY_KEY] survey = surveys[survey_code] if (len(responses) == len(survey.questions)): # survey is complete return redirect("/complete") else: return redirect(f"/questions/{len(responses)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_question():\n # responses variable will go on to store all of the user's answers to the questions\n session[ANSWERS_KEY] = []\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def handle_answer():\n \n extracted_answer = request.form.get('answers')\n responses.append(extracted_answer)\n\n length = len(responses)\n\n return redirect(f\"/questions/{length}\")", "def store_answer():\n #breakpoint()\n answer = request.form['answer']\n response = session[ANSWERS_KEY]\n response.append(answer)\n session[ANSWERS_KEY] = response\n return redirect(f\"/questions/{len(session[ANSWERS_KEY])}\")", "def add_answer():\n\n ans = request.form['answer']\n responses.append(ans)\n\n responses = session['responses']\n responses.append(ans)\n session['responses'] = responses\n \n # redirect...\n q_num = len(responses)\n \n if q_num >= len(satisfaction_survey.questions):\n return redirect(\"/finished\")\n\n else:\n return redirect(f\"/questions/{q_num}\")", "def redirectSubmit():\n postRequest = request.json or request.form or request.args\n print postRequest\n\n rawText = str(postRequest.items()[0][1])\n collist = key_words_filter(rawText)\n if len(collist) != 0:\n dna.db.fileter_cato(collist,0)\n if dna.currentquestion.qid == -1:\n print \"error got\"\n SESSION_INFO.result = dna.currentList\n q = Question()\n q.qid = \"-1\"\n SESSION_INFO.question = q\n SESSION_INFO.answerlist = dna.answerList\n\n\n\n\n return render_template('question.html', session_info=json.dumps(SESSION_INFO.toJson()))", "def start_survey():\n session[RESPONSES_KEY] = []\n\n return redirect(\"/questions/0\")", "def say_thanks():\n survey_id = session[CURRENT_SURVEY_KEY]\n survey = surveys[survey_id]\n responses = session[RESPONSES_KEY]\n\n html = render_template(\"thanks.html\", survey=survey, responses=responses)\n\n # Set cookie noting this survey is done so they can't retake it\n response = make_response(html)\n response.set_cookie(f\"completed_{survey_id}\", \"yes\", max_age=60)\n return response", "def start_survey():\n\n session[RESPONSES_KEY] = []\n\n return redirect(\"/questions/0\")", "async def respond(self, ctx, index, *, response):\n try:\n config = self.bot.db['questions'][str(ctx.guild.id)][str(ctx.channel.id)]\n except KeyError:\n return\n if not response:\n await hf.safe_send(ctx, \"You need to type something for your response.\")\n return\n if len(response.split()) == 1:\n try:\n msg = await ctx.channel.fetch_message(int(response))\n await ctx.message.add_reaction('⤴')\n ctx.message = msg\n ctx.author = msg.author\n response = msg.content\n except (discord.NotFound, ValueError):\n pass\n if index not in config['questions']:\n await hf.safe_send(ctx, \"Invalid question index. Make sure you're typing this command in the channel \"\n \"the question was originally made in.\")\n return\n\n try:\n log_channel = ctx.guild.get_channel(config['log_channel'])\n except discord.NotFound:\n await hf.safe_send(ctx, \"The original log channel can't be found (type `;q setup`)\")\n return\n try:\n log_message = await log_channel.fetch_message(config['questions'][index]['log_message'])\n except discord.NotFound:\n await hf.safe_send(ctx, \"The original question log message could not be found. Type `;q a <index>` to \"\n \"close the question and clear it.\")\n return\n\n emb: discord.Embed = log_message.embeds[0]\n value_text = f\"⁣⁣⁣\\n[Jump URL]({ctx.message.jump_url})\"\n emb.add_field(name=f\"Response by {ctx.author.name}#{ctx.author.discriminator}\",\n value=value_text.replace('⁣⁣⁣', response[:1024-len(value_text)]))\n await log_message.edit(embed=emb)\n config['questions'][index].setdefault('responses', []).append(ctx.message.jump_url)\n await self._delete_log(ctx)\n await self._post_log(ctx)\n await ctx.message.add_reaction('✅')", "def game_post():\n game = Game.current_game\n question = game.current_question\n \n answer_keys = request.form[\"answer_keys\"].split('++')\n print(answer_keys)\n question['user_response'] = answer_keys\n \n return redirect(url_for(\"show_answer\"))", "def finish_survey():\n return render_template(\"thanks.html\")", "def question_page(q_num): \n if q_num == len(responses) and q_num < len(satisfaction_survey.questions):\n title=satisfaction_survey.title\n question = satisfaction_survey.questions[q_num].question\n choices = satisfaction_survey.questions[q_num].choices\n return render_template(\"question.html\", survey_title=title, question=question, choices=choices, q_num=q_num)\n\n elif q_num != len(responses) and len(responses) < len(satisfaction_survey.questions):\n flash(\"Please answer the questions in order\")\n return redirect(f\"/questions/{len(responses)}\")\n \n elif q_num != len(responses) and len(responses) == len(satisfaction_survey.questions):\n flash(\"If you want to edit your response, please contact us\")\n return redirect(\"/finished\")\n \n else:\n \"\"\" default?\"\"\"\n return render_template(\"thanks.html\")", "def next_question(self):\n self.user_answers = []\n self.curent_question = choice(self.to_ask)", "def answer_question(request, control_question_pk, step):\n\n control_question_obj = ControlQuestion.objects.get(pk=control_question_pk)\n\n control_question_obj.answered_on = timezone.now()\n control_question_obj.answered_by = request.user\n control_question_obj.answer_correct = True\n\n # Save changes\n control_question_obj.save()\n\n return http.HttpResponseRedirect(request.META.get(\n 'HTTP_REFERER', '/') + '#setup_step_' + step)", "def complete():\n survey_id = session[current_survey_key]\n survey = surveys[survey_id]\n responses = session[RESPONSES_KEY]\n\n html= render_template(\"completion.html\", survey=survey, responses=responses)\n\n response = make_response(html)\n response.set_cookie(f\"completed_{survey_id}\", \"yes\", max_age=60)\n return response", "def process_open_ended_question_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tprevious_message = message.previous_message\n\t\twhile hasattr(previous_message, \"previous_message\") and previous_message.previous_message != None:\n\t\t\tprevious_message = previous_message.previous_message\n\n\t\tfor feedback in previous_message.feedbacks.all():\n\t\t\tfeedback.note=response\n\t\t\tfeedback.datetime_responded=now\n\t\t\tfeedback.save()\n\n\t\ttemplate = 'messages/response_open_ended_question.txt'\n\t\tcontent = render_to_string(template)\n\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\treturn HttpResponse(content=content, content_type='text/plain')", "def fetch_response(self):\n if self.stored_answer:\n return \"Thanks for your answer. Your answer has been saved. \"\\\n \"I will get back to you when the destined asker, rates your response. \"\\\n \"Keep your fingers crossed. Hopefully the asker will give you good ratings, \"\\\n \"and your karma points will boost up.\"\\\n \"Meanwhile, you can ask another question, or post answer for requested question.\"\n else:\n self.stored_answer = True\n return \"Sorry, you did not enter the Answer in the required format. \"\\\n \"Eg - \\\"[Answer][qid:<placeholder for question_number>] <Placeholder for Answer>\\\". Try again\"", "def question_page():\n question_start_time = session.get('start_time')\n question_last_answer = session.get('last_answer')\n counter = session.get('counter')\n if not counter:\n session['counter'] = 0\n if session['counter'] < 5:\n if session['counter'] > 0:\n check_answer(question_start_time, question_last_answer, counter)\n session['counter'] += 1\n question = _get_question()\n session['start_time'] = time()\n session['last_answer'] = question.pop()\n answers = {}\n letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n for k, a in enumerate(question):\n if k > 0:\n answers[letters[k-1]] = a\n # answers = zip(letters, question[1:-1]\n return render_template('question.html', question=question[0], answers=answers, counter=session['counter']),\n else:\n check_answer(question_start_time, question_last_answer, counter)\n return redirect(url_for('result_page'))", "def _post_question(self, question):\n self.messages_sent.append(question)\n if self.user_input:\n return input(question)\n else:\n ans = self.lines.pop(0)\n print(question + ans)\n return ans", "def post_answer(request):\n if request.method == 'GET':\n response_data = list(range(10))\n # geodata = response.json()\n return Response(\n data=response_data\n )\n # snippets = Snippet.objects.all()\n # serializer = SnippetSerializer(snippets, many=True)\n # return Response(serializer.data)\n\n elif request.method == 'POST':\n data = request.data\n print(type(data))\n userResponse.append(data)\n if int(data['questionCode']) in userQuestions:\n user_question = userQuestions[int(data['questionCode'])]\n print(user_question)\n\n # get response and movie list\n updatedMovieList = imdb.get_imdb_movies()\n robotMessage = assistant.ask_assistant(user_question)\n responseData = {\"nextQuestionString\": robotMessage,\"nextQuestionCode\": int(data['questionCode'])+1,\"updatedMovieList\" : updatedMovieList}\n return Response(\n data=responseData\n )", "def process_refill_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tdef process_response(return_message_type):\n\t\t\tfor feedback in message.feedbacks.all():\n\t\t\t\tfeedback.note = Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()]\n\t\t\t\tfeedback.save()\n\t\t\ttemplate = 'messages/refill_questionnaire_responses/' + \\\n\t\t\t Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()] + \\\n\t\t\t '.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=return_message_type, content=content, previous_message=message)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\n\t\t# Switch on type of response\n\t\t# a - Haven't gotten the chance\n\t\tif response.lower() == 'a':\n\t\t\t# Schedule a medication reminder for later\n\t\t\tone_hour = datetime.datetime.now() + datetime.timedelta(hours=1)\n\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# b - Too expensive\n\t\telif response.lower() == 'b':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone needs to refill\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# c - Concerned about side effects\n\t\telif response.lower() == 'c':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone has side effects\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# d - Other\n\t\telif response.lower() == 'd':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.OPEN_ENDED_QUESTION)\n\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def _post_question(self, question):\n self.messages_sent.append(question)\n return input(question)", "def action_next(self, cr, uid, ids, context=None):\n survey_obj = self.pool.get('survey')\n search_obj = self.pool.get('ir.ui.view')\n if context is None: context = {}\n\n this = self.browse(cr, uid, ids, context=context)[0]\n survey_id = this.survey_id.id\n context.update({'survey_id': survey_id, 'sur_name_id': this.id})\n cr.execute('select count(id) from survey_history where user_id=%s\\\n and survey_id=%s' % (uid,survey_id))\n\n res = cr.fetchone()[0]\n sur_rec = survey_obj.browse(cr,uid,survey_id,context=context)\n if sur_rec.response_user and res >= sur_rec.response_user:\n raise osv.except_osv(_('Warning!'),_(\"You cannot give response for this survey more than %s times.\") % (sur_rec.response_user))\n\n if sur_rec.max_response_limit and sur_rec.max_response_limit <= sur_rec.tot_start_survey:\n raise osv.except_osv(_('Warning!'),_(\"You cannot give more responses. Please contact the author of this survey for further assistance.\"))\n\n search_id = search_obj.search(cr,uid,[('model','=','survey.question.wiz'),('name','=','Survey Search')])\n return {\n 'view_type': 'form',\n \"view_mode\": 'form',\n 'res_model': 'survey.question.wiz',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'search_view_id': search_id[0],\n 'context': context\n }", "def browser_bot_stuff(self, response: HTMLResponse):\n if self.participant.is_browser_bot:\n browser_bots.set_attributes(\n participant_code=self.participant.code,\n request_path=self.request.url.path,\n html=response.body.decode('utf-8'),\n )\n has_next_submission = browser_bots.enqueue_next_post_data(\n participant_code=self.participant.code\n )\n if has_next_submission:\n # this doesn't work because we also would need to do this on OutOfRange page.\n # sometimes the player submits the last page, especially during development.\n # if self._index_in_pages == self.participant._max_page_index:\n auto_submit_js = '''\n <script>\n var form = document.querySelector('#form');\n form.submit();\n // browser-bot-auto-submit\n form.on('submit', function (e) {\n e.preventDefault();\n });\n </script>\n '''\n extra_content = auto_submit_js.encode('utf8')\n response.body += extra_content\n response.headers['Content-Length'] = str(\n int(response.headers['Content-Length']) + len(extra_content)\n )\n else:\n browser_bots.send_completion_message(\n session_code=self.participant._session_code,\n participant_code=self.participant.code,\n )", "def biostar_question_redirect( self, trans, payload={} ):\n return self.biostar_redirect( trans, payload=payload, biostar_action='new' )", "def check_responses(self, button):\n with self.out:\n clear_output()\n\n for i, question in enumerate(self.questions):\n self.create_feedback(i+1, question.correct())", "async def resp(self, ctx, index, *, response):\n x = self.bot.get_command('question respond')\n if await x.can_run(ctx):\n await ctx.invoke(x, index, response=response)", "def complete_questionnaire(self):\n while True:\n data = {\n \"question\": \"questionnaire\",\n \"number\": 1,\n \"response\": json.dumps(self.question_responses),\n }\n url = \"{host}/question/{self.participant_id}\".format(\n host=self.host, self=self\n )\n try:\n result = requests.post(url, data=data)\n result.raise_for_status()\n except RequestException:\n self.stochastic_sleep()\n continue\n return True", "def on_submit(self):\n\n if self.question_type == \"programming\":\n database_api.sendAnswers(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n self.question_no,\n Cache.get(\"info\", \"nick\"),\n self.ids[\"input_code_answer\"].text.replace(\"\\n\",\n \"*[SEAS-SLASH-N]*\"\n )\n )\n\n return True\n elif self.question_type == \"short_answer\":\n database_api.sendAnswers(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n self.question_no,\n Cache.get(\"info\", \"nick\"),\n self.ids[\"input_short_answer\"].text.replace(\"\\n\",\n \"*[SEAS-SLASH-N]*\"\n )\n )\n\n return True\n elif self.question_type == \"multiple_choice\":\n try:\n students_choice = self.multiple_choice_answer\n except:\n students_choice = \"\"\n\n database_api.sendAnswers(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n self.question_no,\n Cache.get(\"info\", \"nick\"),\n students_choice\n )\n\n return True\n else:\n return False", "def ask(self) -> Union[redirect, HTMLBody]:\n\t\tform = StudentQueryForm()\n\t\tif self.is_authorized:\n\t\t\tif request.method == \"GET\":\n\t\t\t\treturn render_template(\n\t\t\t\t\"ask.jinja2\", \n\t\t\t\tform=form,\n\t\t\t)\n\t\t\tif form.validate_on_submit():\n\t\t\t\t# Add a question to the database\n\t\t\t\tquestion_doc = QuestionDocument(\n\t\t\t\t\tquestion_id=uuid4().hex,\n\t\t\t\t\tsubmitters_name=form.name.data,\n\t\t\t\t\tsubmitters_email=form.email.data,\n\t\t\t\t\tsubmission_date=datetime.now(),\n\t\t\t\t\tquestion=form.question.data,\n\t\t\t\t)\n\t\t\t\tself.db.insert_question(question_doc)\n\t\t\t\t\n\t\t\t\t# Send a notification email to both the student and the President\n\t\t\t\tself.mailer.send_confirmation(question_doc)\n\t\t\t\tself.mailer.send_notification(question_doc)\n\n\t\t\t\t# Flash and log a success message\n\t\t\t\tcurrent_app.logger.info(f\"Successfully submitted question {question_doc['question_id']}\")\n\t\t\t\tflash(\"Thanks for your question! You will receive an email response soon!\")\n\t\t\telif form.recaptcha.errors:\n\t\t\t\t# Flash a captcha error\n\t\t\t\tcurrent_app.logger.info(\"Caught recaptcha error.\")\n\t\t\t\tflash(\"Please complete the reCAPTCHA.\")\n\t\t\telse: # pragma: no cover\n\t\t\t\t# Flash unknown error\n\t\t\t\tcurrent_app.logger.info(\"Caught unknown error.\")\n\t\t\t\tflash(\"Error.\")\n\t\t\treturn redirect(url_for(\"ask\"))\n\t\tflash(\"Please login to ask questions.\")\n\t\treturn redirect(url_for(\"login\"))" ]
[ "0.7377074", "0.7321153", "0.72295934", "0.72033834", "0.69435203", "0.6750071", "0.6735021", "0.67152864", "0.658852", "0.6523324", "0.6363237", "0.6334974", "0.6329038", "0.6250559", "0.62394965", "0.6238538", "0.6195629", "0.6136314", "0.612431", "0.60994416", "0.6094137", "0.6075318", "0.60558975", "0.60486305", "0.60311276", "0.603016", "0.6022953", "0.6020919", "0.59844655", "0.5982534" ]
0.7651496
0
Return True if the interval [start, end] does not overlap with any region in the skip list.
def isValid(self, start, end): for s in self.skip: if start <= s[0] <= end or start <= s[1] <= end: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def does_overlap(self, start, stop):\n\n ranges = [list(range(key, self.map[key] + 1)) for key in self.map]\n all_coords = [item for sublist in ranges for item in sublist]\n # removing all_coords implementation until we write some tests\n for i in range(start, stop + 1):\n if i in all_coords:\n return True\n return False", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True", "def present_in_slice(self, start, stop):\n return self.starts_before(start) and self.ends_after(stop - 1)", "def overlaps(self, region):\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n\n if self.end is None or region.start is None or region.start <= self.end:\n if self.start is None or region.end is None or region.end >= self.start:\n return True\n return False", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def overlaps(self, begin, end=None):\n if end is not None:\n # An overlap means that some C exists that is inside both ranges:\n # begin <= C < end\n # and \n # self.begin <= C < self.end\n # See https://stackoverflow.com/questions/3269434/whats-the-most-efficient-way-to-test-two-integer-ranges-for-overlap/3269471#3269471\n return begin < self.end and end > self.begin\n try:\n return self.overlaps(begin.begin, begin.end)\n except:\n return self.contains_point(begin)", "def overlaps(self, other):\n\n if self.start.equal(other.start) or self.stop.equal(other.stop):\n return True\n elif self.start.before(other.start) and self.stop.after(other.start):\n return True\n elif other.stop.after(self.start) and other.stop.before(self.stop):\n return True\n else:\n return False", "def overlaps(self, other):\n return self.start <= other.end and self.end >= other.start", "def contains(self, interval):\n first, last = self._intersect(interval)\n return first != last", "def overlaps(self, chrom, start, end, strand=None):\n if (self.chrom != chrom \n or min(self.end, end) - max(self.start, start) <= 0 \n or (strand is not None and self.strand != strand)): \n return False\n return True", "def IsInRange(self, id, start, isStartInclusive, end, isEndInclusive):\r\n if isStartInclusive == False:\r\n start = (start + 1) % NODES\r\n if isEndInclusive == True:\r\n end = (end + 1) % NODES\r\n allRanges = []\r\n if(start < end):\r\n allRanges.append(range(start, end))\r\n else:\r\n allRanges.append(range(start, NODES))\r\n allRanges.append(range(0, end))\r\n for r in allRanges:\r\n if id in r:\r\n return True\r\n return False", "def intersect(start1, stop1, start2, stop2):\n\tassert isinstance(start1, int)\n\tassert isinstance(stop2, int)\n\tassert isinstance(start2, int)\n\tassert isinstance(stop2, int)\n\tassert start1 <= stop1\n\tassert start2 <= stop2\n\t\n\t# if interval 1 is completely to the left of interval 2\n\tif stop1 < start2:\n\t\treturn False\n\t\n\t# if interval 1 is completely to the right of interval2\n\tif stop2 < start1:\n\t\treturn False\n\t\t\n\treturn True", "def add_range(self, start, end) -> bool:\n start = _normalize_datetime(start)\n end = _normalize_datetime(end)\n assert end > start\n\n if self._start_time is None:\n self._start_time = start\n\n if start < self._start_time:\n delta = int((self._start_time - start).total_seconds() / 60)\n self._start_time = start\n self._backing_int = self._backing_int << delta\n\n start_idx = self._datetime_to_index(start)\n end_idx = self._datetime_to_index(end)\n idx_range = end_idx - start_idx\n range_mask = ((1 << (idx_range + 1)) - 1) << start_idx\n\n has_overlap = (self._backing_int & range_mask) > 0\n self._backing_int |= range_mask\n return has_overlap", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def covers(self, other):\n return self._start <= other._start and self._end >= other._end", "def doesNotOverlap( self, other):\n return not self.overlaps( other)", "def contains_interval(self, other):\n return (\n self.begin <= other.begin and\n self.end >= other.end\n )", "def add_exclude_interval(\n self,\n start: CxoTimeLike,\n stop: CxoTimeLike,\n comment: str,\n pad_start: Optional[u.Quantity] = None,\n pad_stop: Optional[u.Quantity] = None,\n source: str = \"Auto-generated\",\n ):\n # For testing, we can skip all exclude intervals to generate violations\n if self.no_exclude:\n return\n\n start = CxoTime(start)\n if pad_start is not None:\n start = start - pad_start\n stop = CxoTime(stop)\n if pad_stop is not None:\n stop = stop + pad_stop\n\n # Ensure interval is contained within validation interval.\n if start >= self.stop or stop <= self.start:\n return\n start = max(start, self.start)\n stop = min(stop, self.stop)\n\n exclude = {\n \"start\": start.date,\n \"stop\": stop.date,\n \"states\": self.state_name,\n \"comment\": comment,\n \"source\": source,\n }\n logger.info(\n f\"{self.state_name}: excluding interval {start} - {stop}: {comment}\"\n )\n self.exclude_intervals.add_row(exclude)\n self.exclude_intervals.sort(\"start\")", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)", "def _validate_no_overlap(params, error_callback):\n dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],\n params['dhcp_end']))\n inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],\n params['inspection_end']))\n # If there is any intersection of the two sets then we have a problem\n if dhcp_set & inspection_set:\n message = ('Inspection DHCP range \"%s-%s\" overlaps provisioning '\n 'DHCP range \"%s-%s\".' %\n (params['inspection_start'], params['inspection_end'],\n params['dhcp_start'], params['dhcp_end']))\n error_callback(message)", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def covers_overlaps(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.covers_overlaps(bounds)", "def isUndefinedRange(program: ghidra.program.model.listing.Program, startAddress: ghidra.program.model.address.Address, endAddress: ghidra.program.model.address.Address) -> bool:\n ...", "def check_availibity(self, starts_at: datetime.datetime, ends_at: datetime.datetime):\n for unavailable in self.unavailable:\n if unavailable[0] <= starts_at <= unavailable[1]:\n return False\n if unavailable[0] <= ends_at <= unavailable[1]:\n return False\n return True", "def is_between(value, start, end, including_start=False, including_end=False):\n if not including_start and not including_end: # not include both start and end\n if (start < value < end):\n return True\n elif (start > end) and (start < value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif not including_start and including_end: # include end but not the start\n if value == end:\n return True\n elif (start < value <= end):\n return True\n elif (start > end) and ((start < value <= (2**m - 1)) or (0 <= value <= end)):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif including_start and not including_end: # include start but not the end\n if value == start:\n return True\n elif (start <= value < end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != end):\n return False\n return False\n else: # include both start and end\n if (start <= value <= end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value <= end):\n return True\n elif start == end:\n return True\n return False", "def _remove_overlaps(start, end) -> int:\n start = self._x_positions[start % self.n_cols]\n end = self._x_positions[int(end) % self.n_cols]\n n_removed = 0\n for x, col in self._cols.items():\n if start - self.col_width <= x <= start or end <= x <= end + self.col_width:\n if col.label is None:\n n_removed += col.mark_as('ignore')\n return n_removed" ]
[ "0.7434765", "0.7223908", "0.69054854", "0.68777937", "0.6707784", "0.66524976", "0.64958876", "0.64859533", "0.64682317", "0.63680816", "0.6349625", "0.63234466", "0.6302115", "0.62551606", "0.62365174", "0.62196153", "0.62146914", "0.6151166", "0.6150224", "0.61310005", "0.61306703", "0.6115963", "0.6103493", "0.6094476", "0.6014299", "0.59743726", "0.5947672", "0.5935107", "0.5918034", "0.59066844" ]
0.75347215
0
Returns a tuple containing the start positions of two mates in a read pair. The positions are produced by selecting a start position at random, and adding to it a random insert size, having mean insertSize and standard deviation insertStdev.
def genInsertPosition(self): insize = np.random.normal(self.insertSize, self.insertStdev) while True: start = random.randint(self.fpstart, self.fpend) end = start + insize if end < self.fpend and self.isValid(start, end): return (start, end - self.readlen)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end", "def get_initial_pt_and_increments():\n\ti = random.randint(1, 4)\n\tif i == 1: # start at the top left\n\t\tinitial_pt = (random.randint(ALLOWANCE_FROM_EDGE, IMAGE_SIZE[0]/2),\n\t\t\t\trandom.randint(ALLOWANCE_FROM_EDGE, IMAGE_SIZE[1]/2))\n\t\tincrements = ([5, 10], [7, 15])\n\telif i == 2: # start at the bottom left\n\t\tinitial_pt = (random.randint(IMAGE_SIZE[0]/2, IMAGE_SIZE[0] - ALLOWANCE_FROM_EDGE),\n\t\t\trandom.randint(ALLOWANCE_FROM_EDGE, IMAGE_SIZE[1]/2))\n\t\tincrements = ([-10, -5], [7, 15])\n\telif i == 3: # start at the bottom right\n\t\tinitial_pt = (random.randint(IMAGE_SIZE[0]/2, IMAGE_SIZE[0] - ALLOWANCE_FROM_EDGE),\n\t\t\trandom.randint(IMAGE_SIZE[1]/2, IMAGE_SIZE[1] - ALLOWANCE_FROM_EDGE))\n\t\tincrements = ([-10, -5], [-15, -7])\n\telse: # start at the top right\n\t\tinitial_pt = (random.randint(ALLOWANCE_FROM_EDGE, IMAGE_SIZE[0]/2),\n\t\t\trandom.randint(IMAGE_SIZE[1]/2, IMAGE_SIZE[1] - ALLOWANCE_FROM_EDGE))\n\t\tincrements = ([5,10], [-15, -7])\n\treturn initial_pt, increments", "def get_random_start_and_end_points_in_file(self, file_data):\n start_point = random.randint(2500, len(file_data))\n end_point = start_point + random.randint(0, len(file_data) - start_point)\n\n return start_point, end_point", "def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)", "def _rand_pos(self, xLow, xHigh, yLow, yHigh):\n\n return (\n self.np_random.randint(xLow, xHigh),\n self.np_random.randint(yLow, yHigh)\n )", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def get_random_pos(self):\n i = np.random.randint(self.n)\n j = np.random.randint(self.m)\n return [i, j]", "def pickup_samples(pts1, pts2):\n\n #\n # Your code here\n #\n\n x = min(len(pts1), len(pts2))\n return np.random.choice(range(x), min_num_pairs(), replace=False)", "def random_pose(self):\n position = self._start\n while self[position].distance < np.sum(self._rooms.shape) * 2:\n position = np.array(\n [random.randrange(limit) for limit in self._rooms.shape]\n )\n direction = random.choice(self.exits(position))\n return (position, direction)", "def choose_starter(p1, p2):\n names = [p1, p2]\n random.shuffle(names)\n return (names[0], names[1])", "def _create_random_offsets(self, block_locations):\n\n min_x, max_x, min_y, _ = self._find_min_and_max_coords(block_locations)\n x_offset = randrange(10 - (max_x - min_x)) - min_x\n y_offset = 0 - min_y\n return [x_offset, y_offset]", "def initpoint(self):\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n return (row, col)", "def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))", "def generate_segment(\n self,\n p1: Tuple[float, float],\n p2: Tuple[float, float],\n randomized: bool = True,\n random_state: Optional[np.random.mtrand.RandomState] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n\n range_x = p2[0] - p1[0]\n range_y = p2[1] - p1[1]\n\n idx = self._get_permutated_segments_indices(randomized, random_state)\n new_y = np.hstack((np.array(p1[1]), p1[1] + np.cumsum(range_y * self.dy[idx])))\n new_x = np.hstack((np.array(p1[0]), p1[0] + np.cumsum(range_x * self.dx[idx])))\n return new_x, new_y", "def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))", "def get_end_coordinates(start_coordinates, size):\r\n size -= 1\r\n x = random.choice([start_coordinates[0] + size,\r\n start_coordinates[0], start_coordinates[0] - size])\r\n y = start_coordinates[1]\r\n if x > 9:\r\n x = random.choice([start_coordinates[0],\r\n start_coordinates[0] - size])\r\n elif x < 0:\r\n x = random.choice([start_coordinates[0],\r\n start_coordinates[0] + size])\r\n elif x == start_coordinates[0]:\r\n y = random.choice([start_coordinates[1] + size,\r\n start_coordinates[1] - size])\r\n if y < 0:\r\n y = start_coordinates[1] + size\r\n elif y > 9:\r\n y = start_coordinates[1] - size\r\n return x, y", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def getPos(self,len,end,nodes):\n start=end\n if self.count==nodes:\n last=len\n else:\n last=end+(int)(len/(nodes+1))\n self.count+=1\n return (start,last)", "def choose_starting_points(self, side):\n # Left Side\n if side == 1:\n x = np.random.uniform(self.left_side[\"x_min\"], self.left_side[\"x_max\"])\n y = np.random.uniform(self.left_side[\"y_min\"], self.left_side[\"y_max\"])\n # Bottom\n elif side == 2:\n x = np.random.uniform(self.bottom[\"x_min\"], self.bottom[\"x_max\"])\n y = np.random.uniform(self.bottom[\"y_min\"], self.bottom[\"y_max\"])\n # Right Side\n elif side == 3:\n x = np.random.uniform(self.right_side[\"x_min\"], self.right_side[\"x_max\"])\n y = np.random.uniform(self.right_side[\"y_min\"], self.right_side[\"y_max\"])\n # Top\n elif side == 4:\n x = np.random.uniform(self.top[\"x_min\"], self.top[\"x_max\"])\n y = np.random.uniform(self.top[\"y_min\"], self.top[\"y_max\"])\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return x, y", "def random_positions(mini, maxi):\n x_cord = (maxi - mini)*np.random.random(SIZE) + mini\n y_cord = (maxi - mini)*np.random.random(SIZE) + mini\n return np.column_stack([x_cord, y_cord])", "def get_indel_pos(genome, size_range):\n start_pos = random.randint(100,len(genome.seq)-110) # positions 100bp from start or end will not be variable\n size = size_range[random.randint(0, len(size_range)-1)]\n end_pos = start_pos + random.randint(1,10)\n\n if random.randint(0,1) == 0: # insertion\n end_pos = start_pos+1\n else:\n end_pos = start_pos + size\n size *= -1\n\n unavail = False\n for n in range(start_pos, end_pos):\n if n in genome.unavail_pos:\n unavail = True\n break\n if unavail:\n start_pos, end_pos = get_del_pos(genome)\n return (start_pos, end_pos, size)", "def assgin_pos(self, range_x, range_y, n_p):\n # n_p random integers\n pos_x = random.sample(range(0, int(100*n_p)), n_p)\n # get a random number\n tmp1 = random.uniform(0, 1)\n # keep position in the range of x and looks \"very random\"\n pos_x %= range_x - tmp1\n # same procedure for y\n pos_y = random.sample(range(0, int(100*n_p)), n_p)\n tmp1 = random.uniform(0, 1)\n pos_y %= range_y - tmp1\n return pos_x, pos_y", "def GenerateOffsets(frames, cut_pixels):\n def GenRandom(max_rand):\n return random.randint(-max_rand, max_rand)\n\n max_rand = int(cut_pixels / 2)\n if frames == 2:\n return([(0, 0), (max_rand, max_rand), (0, 0)])\n\n finished = False\n while not finished:\n coords = [(0, 0)]\n for i in range(frames):\n good_random = False\n while not good_random:\n new_coord = []\n for point in coords[i]:\n change = GenRandom(max_rand)\n while(abs(point + change) > cut_pixels):\n change = GenRandom(max_rand)\n new_coord.append(point + change)\n coord_tuple = tuple(new_coord)\n if coord_tuple not in coords[-2:]:\n coords.append(coord_tuple)\n good_random = True\n if ((len(coords) == frames + 1) and (coords[-1] == (0, 0))):\n finished = True\n return(coords)", "def _get_random_pos_on_a_side(self):\n pass", "def getStartState(self):\n #return (self.position, self.food.copy())\n return self.position", "def createRandomRange(self, start, end) :\n\t\ttime = random.randint(1, end-start)\n\t\treturn (start, start+time)", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def find_rpt_coords(self) -> (int, int):\n start_size = self.size\n end_size = self.size + len(self.allele)\n coord = self.coord\n fasta_alt = self.fasta_alt\n while self.allele == fasta_alt:\n coord += len(self.allele)\n start_size += len(self.allele)\n end_size += len(self.allele)\n fasta_alt = self.seq[start_size:end_size]\n new_start = coord - len(self.allele)\n new_end = new_start + len(self.allele) - 1\n return new_start, new_end", "def firstMove(self):\n return (10, 10)", "def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y" ]
[ "0.5988597", "0.59602445", "0.5950955", "0.5834952", "0.5787519", "0.57752746", "0.57375985", "0.5677156", "0.5668214", "0.5666423", "0.56609106", "0.56546366", "0.5638042", "0.56298256", "0.56073266", "0.56041735", "0.5542233", "0.55215186", "0.5492197", "0.5475261", "0.54677194", "0.5461015", "0.5386306", "0.53762925", "0.53645194", "0.535308", "0.5338946", "0.5318449", "0.53158844", "0.531578" ]
0.67196673
0
Return a read starting at `pos' with the length contained in the `readlen' attribute, with the base qualities specified in `probs'. If the read would overlap a header line, return False.
def getOneRead(self, pos, probs): if not self.isValid(pos, pos + self.readlen): return False bases = [] f = self.stream f.seek(pos) n = 0 while True: b = f.read(1) if b == '>': return False if b not in "ACGTNXacgtnx": continue if random.random() < probs[n]: while True: nb = random.choice('ACGT') if nb != b: b = nb break bases.append(b) n += 1 if n == self.readlen: break return bases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeOneRead(self, pos, probs, out):\n if not self.isValid(pos, pos + self.readlen):\n return False\n f = self.stream\n f.seek(pos)\n n = 0\n while True:\n b = f.read(1)\n if b not in \"ACGTNXacgtnx\":\n continue\n if random.random() < probs[n]:\n while True:\n nb = random.choice('ACGT')\n if nb != b:\n b = nb\n break\n out.write(b)\n n += 1\n if n == self.readlen:\n break\n return True", "def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos", "def single_reads(reads_pos):\n #TODO: use more complicated estimator?\n return (len(reads_pos) + 1) / (len(reads_pos) - 1) * (reads_pos[-1] - reads_pos[0])", "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):\n # TODO instead of taking HTSeq_alignment, this could just take the seq and N_errors, like add_RISCC_read does?\n self._ensure_dataset_None(dataset_name)\n # increment total_read_count, and add read ID to the ID set\n self.total_read_count += read_count\n # figure out if the read is perfect and increment perfect_read_count if yes; return True if perfect else False.\n # TODO may want to come up with a better option than 10 for the \"errors\" of unaligned seqs\n if position in SPECIAL_POSITIONS.all_undefined:\n N_errors = 10\n else:\n N_errors = check_mutation_count_by_optional_NM_field(HTSeq_alignment, negative_if_absent=False)\n # add sequence position/readcount data the detailed dictionary.\n seq = HTSeq_alignment.read.seq\n try: self.sequences_counts_positions_errors[seq][0] += read_count\n except KeyError: self.sequences_counts_positions_errors[seq] = [read_count, position, N_errors]\n if N_errors==0: \n self.perfect_read_count += read_count\n return True\n else:\n return False", "def make_read_request(file_offset=1, byte_count=MAX_READ):\n return StenoPacket(\n packet_id=StenoPacket.ID_READ,\n p1=file_offset,\n p2=byte_count,\n )", "def __call__(self, read, info: ModificationInfo):\n n_count = read.sequence.lower().count('n')\n if self.is_proportion:\n if len(read) == 0:\n return False\n return n_count / len(read) > self.cutoff\n else:\n return n_count > self.cutoff", "def random_read(infile, nbytes, start_offset, num_samples): \n\tseq = \"\"\n\twhile len(seq) < num_samples:\n\t\t# Assuming 1 byte chars.\n\t\tstartidx = random.randint(start_offset, nbytes - num_samples)\n\t\tinfile.seek(startidx)\n\n\t\tseq = \"\"\n\t\tfor line in infile:\n\t\t\tfor ch in line:\n\t\t\t\tif is_valid_char(ch):\n\t\t\t\t\tseq = seq + ch\n\t\t\tif len(seq) >= num_samples:\n\t\t\t\tbreak\n\n\treturn seq[:num_samples].upper()+'\\n'", "def readTrim(read, start, end):\n score = 0\n if not read.is_unmapped:\n regTrim = 0\n upS = read.cigar[0][1] if read.cigar[0][0] == 4 else 0\n dnS = read.cigar[-1][1] if read.cigar[-1][0] == 4 else 0\n \n trimS = None\n trimE = None\n if start > read.pos:\n for queryPos, targetPos in read.aligned_pairs:\n if trimS is None and targetPos >= start:\n trimS = queryPos\n else:\n score += abs(read.pos - start)\n if end < read.aend:\n for queryPos, targetPos in read.aligned_pairs[::-1]:\n if trimE is None and targetPos <= end:\n trimE = queryPos\n else:\n score += abs(read.aend-end)\n \n if trimS is not None:\n trimS = max(0, trimS) + upS\n else:\n trimS = 0\n \n if trimE is not None:\n trimE = min(len(read.seq), trimE) - dnS\n else:\n trimE = len(read.seq)\n seq = read.seq[trimS:trimE]\n qual = read.qual[trimS:trimE]\n if not read.is_reverse:\n seq = seq.translate(revComp)[::-1]\n qual = qual[::-1]\n \n return seq, qual", "def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)", "def seek_record_read(self, offset, count):\n self.seek(offset)\n return self.record_read(count)", "def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def get_features(fastq_input,\n label=None,\n subportions=3,\n positions=(0, 0),\n header=False,\n reduced=True,\n output=sys.stdout,\n debug=False):\n def get_qual_features(qual_ascii, reduced=False):\n seq_qual_prob = list(\n map(lambda x: transform_phred_to_prob(x, offset=offset),\n qual_ascii))\n return quality_features(seq_qual_prob, reduced=reduced)\n\n reader = SeqReader(fastq_input, file_type='fastq')\n position = 0\n x, y = positions[0], positions[1]\n if x and y and y < x:\n x, y = y, x\n count = 0\n my_header = []\n if header:\n if reduced:\n my_header = HEADER[0:4]\n else:\n my_header = HEADER.copy()\n for i in range(subportions + subportions - 1):\n for item in my_header:\n my_header.append(item + \"_\" + str(i + 1))\n if label:\n my_header.append(\"label\")\n if output:\n print(\"\\t\".join(my_header), file=sys.stdout)\n all_features = []\n all_labels = []\n for record in reader:\n features = []\n _, read, qual, _ = record\n position += 1\n if len(qual) == 0 or (x and (position < x or position > y)):\n continue\n count += 1\n offset = get_offset(qual)\n if debug:\n print(\"{} Qual len:{} Offset: {}\".format(count, len(qual), offset),\n file=sys.stderr)\n features += get_qual_features(qual, reduced=reduced)\n totallength = int(len(read) / subportions)\n halflength = int(totallength / 2)\n for i in range(subportions + subportions - 1):\n if i == subportions + subportions - 2:\n finallength = len(read)\n else:\n finallength = i * halflength + totallength\n if debug:\n sys.stderr.write(\n \"{} Read len:{} Offset: {} Begin: {}, End: {} {} \\n\".\n format(count, len(read), offset, i * halflength,\n finallength, qual[i * halflength:finallength]))\n sys.stderr.flush()\n features += get_qual_features(qual[i * halflength:finallength],\n reduced)\n if label and output:\n features.append(label)\n elif label and not output:\n all_labels.append(label)\n if output:\n print(\"\\t\".join(map(str, features)), file=output)\n else:\n all_features.append(features)\n if not output:\n return count, all_features, all_labels, my_header\n return count", "def variants_in_read(bamrecord, counter, mqmin=20, bqmin=20):\n \n if not bamrecord.cigar:\n return 0, ()\n\n for op, count in bamrecord.cigar:\n if op:\n return 0, ()\n\n if (bamrecord.is_duplicate or bamrecord.is_unmapped or\n bamrecord.is_qcfail or bamrecord.is_secondary or\n bamrecord.mapq < mqmin):\n return 0, ()\n if not bamrecord.is_proper_pair:\n return 0, ()\n \n try:\n md = bamrecord.opt('MD')\n except KeyError:\n return 0, ()\n if md.isdigit(): # read is reference\n for c, q in izip(bamrecord.seq, bamrecord.query_qualities):\n if q >= bqmin:\n counter[c] += 1\n return 1, ()\n\n qref = get_ref(bamrecord.seq, md)\n qlen = len(bamrecord.seq) - 1\n start = bamrecord.pos\n variants = []\n for idx, (r, a, q) in enumerate(izip(qref, bamrecord.seq,\n bamrecord.query_qualities)):\n if q >= bqmin:\n counter[r] += 1\n if r != a:\n if idx > 0:\n prev = qref[idx - 1]\n else:\n prev = 'N'\n if idx < qlen:\n next = qref[idx + 1]\n else:\n next = 'N'\n variants.append((start + idx, r, a, prev, next))\n\n return 1, variants", "def simulate_read(self):\n\n fastafile = ps.FastaFile(self.genome_fa)\n # left split read\n\n insert = int(np.random.normal(self.insert_size, (self.insert_size / 12), 1))\n start = int(np.random.randint(self.chr_pos_start, (self.chr_pos_end + 1)))\n left_end = start + self.read_length\n total_end = start + int(np.round(insert))\n right_start = total_end - self.read_length\n if total_end > self.chr_pos_end:\n # split read scenario or insert spanning split read scenario\n if left_end > self.chr_pos_end:\n # left read spanning split read scenario\n # left_read\n left_dntps = self.chr_pos_end - start\n right_dntps = self.read_length - left_dntps\n\n # the error could be here\n left_split_read = fastafile.fetch(self.chr, start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n left_read = left_split_read + right_split_read\n\n # right_read\n right_start = self.chr_pos_start + int(round(self.insert_size - left_dntps - self.read_length))\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n\n # assertion to check the error here\n\n common_id = \"%s|%s|%s:%s-%s:%s|%s:%s|1|%s\" % (\n self.read_number,\n self.chr,\n start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start + right_dntps),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n else:\n if right_start > self.chr_pos_end:\n # insert spanning split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n right_start = self.chr_pos_start + (right_start - self.chr_pos_end)\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|3|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n else:\n # right split read scenario\n assert right_start <= self.chr_pos_end\n assert (right_start + self.read_length) > self.chr_pos_end\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n\n # compute right dntps\n left_dntps = self.chr_pos_end - right_start\n right_dntps = self.read_length - left_dntps\n left_split_read = fastafile.fetch(self.chr, right_start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n right_read = left_split_read + right_split_read\n common_id = \"%s|%s|%s:%s|%s:%s-%s:%s|2|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start, right_dntps),\n self.circle_id,\n )\n\n else:\n # non split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n # correct right read start\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|0|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n return (right_read, left_read, common_id)", "def pull_read(self, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.random.randint(0, self.total)\n return self.reads[index, :]", "def CheckRead(read_name, seq, break_line, quality, fastq_fp, line_num):\n if not read_name[0] == \"@\":\n raise Exception(\"Read name does not start with @, line # {}\\n File: {}\".format(\n line_num, fastq_fp))\n for x in seq.upper():\n if x not in [\"A\",\"C\",\"T\",\"G\",\"N\"]:\n raise Exception(\"Sequence value {} not recognized. Line # {}\\n File: {}\".format(\n x, line_num + 1, fastq_fp))\n if not break_line[0] == \"+\":\n raise Exception(\"Break line not '+'. Instead '{}'. Line # {}\\n File: {}\".format(\n break_line[0],line_num + 2, fastq_fp))\n if not len(quality) == len(seq):\n raise Exception(\"Quality line wrong length. Lines # {}\\n File: {}\".format(\n line_num + 3, fastq_fp))", "def read_qual_score_filter(seq, qual, max_run_length, threshold):\r\n last_good_slice_end_pos = 0\r\n bad_run_length = 0\r\n for i in range(len(seq)):\r\n if qual[i] <= threshold:\r\n bad_run_length += 1\r\n else:\r\n bad_run_length = 0\r\n last_good_slice_end_pos = i + 1\r\n\r\n if bad_run_length > max_run_length:\r\n return seq[:last_good_slice_end_pos],\\\r\n qual[:last_good_slice_end_pos]\r\n\r\n # There were no runs that were too bad for too long\r\n return seq, qual", "def get_best_read_position(ref_genome, read, positions, thresh):\n least = 100\n best_pos = None\n for p in positions:\n num_mismatches = get_num_mismatches(read, ref_genome, p)\n if num_mismatches < thresh and num_mismatches < least:\n least = num_mismatches\n best_pos = p\n\n return best_pos", "def parse_sam(self, sam_handle, append_chr=False):\n vargroup_reads = np.asarray([read\n for read in sam_handle.fetch(self.get_chr(append_chr), self.pos, self.end)\n if not read.is_duplicate])\n\n # Convert some key read information into a dataframe to speed up filtering\n read_df = pd.DataFrame(columns=['rn', 'start', 'end', 'read', 'indels'],\n data=[(rn, read.reference_start, read.aend, read, get_indel_from_cigar(read.cigar))\n for rn, read in enumerate(vargroup_reads)])\n\n reads_coverage = np.zeros((len(vargroup_reads), len(self.variant_list)))\n reads_existence = np.zeros((len(vargroup_reads), len(self.variant_list)))\n\n if len(vargroup_reads) == 0:\n print('Warning: No reads found at {}:{}-{}'.format(self.chrom, self.pos, self.end))\n return self._build_existence_matrix(reads_existence, reads_coverage)\n\n # pylint: disable=invalid-name\n for vn, variant in enumerate(self.variant_list):\n # Cache variant properties: those lookups are expensive in PySam\n var_type = variant.var_type\n is_indel = variant.is_indel\n is_deletion = variant.is_deletion\n\n read_overlap_mask = (read_df['start'] <= variant.POS) & (read_df['end'] >= variant.POS)\n\n # Coverage is easy: all reads which overlap this variant get a coverage of 1\n reads_coverage[read_overlap_mask, vn] = 1\n\n # SNPs\n if var_type == 'snp':\n # for rn, read, indels in itertools.izip(read_df[read_overlap_mask]['rn'], # python2\n for rn, read, indels in zip(read_df[read_overlap_mask]['rn'], read_df[read_overlap_mask]['read'],\n read_df[read_overlap_mask]['indels']):\n # get start position using the cigar string to find the offset\n variant_start = self._get_start(variant, read.reference_start, read.cigar, ignore_softclip=True)\n # If the base matches the alternate read add it to the existence array\n read_alt = read.query[variant_start: variant.end - variant.POS + variant_start + 1]\n if read_alt == variant.ALT[0].sequence:\n reads_existence[rn, vn] = 1\n\n # Insertions/Deletions\n elif is_indel:\n # for rn, read, indels in itertools.izip(read_df[read_overlap_mask]['rn'], # python2\n for rn, read, indels in zip(read_df[read_overlap_mask]['rn'], read_df[read_overlap_mask]['read'],\n read_df[read_overlap_mask]['indels']):\n iloc = self._get_indel_pos(variant.POS, read)\n # If the insertion/deletion exist in the cigar string add it to the existence array\n if is_deletion and iloc in indels and indels[iloc][0] == 'D': # Deletions\n reads_existence[rn, vn] = 1\n elif not is_deletion and iloc in indels and indels[iloc][0] == 'I': # Insertions\n if variant.ALT[0] == read.seq[iloc:iloc + 1 + indels[iloc][1]]:\n reads_existence[rn, vn] = 1\n else:\n print('Warning: Unknown type found: {}'.format(variant.var_type))\n\n return self._build_existence_matrix(reads_existence, reads_coverage)", "def extractMappedRead(self, aln, windowStart):\n if isinstance(aln, CmpH5Alignment):\n die(\"Arrow does not support CmpH5 files!\")\n\n assert aln.referenceSpan > 0\n\n def baseFeature(featureName):\n if aln.reader.hasBaseFeature(featureName):\n rawFeature = aln.baseFeature(featureName, aligned=False, orientation=\"native\")\n return rawFeature.clip(0,255).astype(np.uint8)\n else:\n return np.zeros((aln.readLength,), dtype=np.uint8)\n\n name = aln.readName\n chemistry = aln.sequencingChemistry\n strand = cc.StrandType_REVERSE if aln.isReverseStrand else cc.StrandType_FORWARD\n read = cc.Read(name,\n aln.read(aligned=False, orientation=\"native\"),\n cc.Uint8Vector(baseFeature(\"Ipd\").tolist()),\n cc.Uint8Vector(baseFeature(\"PulseWidth\").tolist()),\n cc.SNR(aln.hqRegionSnr),\n chemistry)\n return cc.MappedRead(read,\n strand,\n int(aln.referenceStart - windowStart),\n int(aln.referenceEnd - windowStart))", "def read(self, nbytes: int, /) -> Optional[bytes]:", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def readinto(self, buf: bytes, nbytes: int, /) -> Optional[int]:", "def get_RISCC_pos_from_read_pos(read_aln_or_pos, cassette_end, relative_read_direction='inward', immutable_position=True):\n check_valid_end_info(cassette_end, relative_read_direction)\n imaginary_relative_direction= ('outward' if relative_read_direction=='inward' else 'inward')\n imaginary_cassette_position = get_insertion_pos_from_flanking_region_pos(read_aln_or_pos, cassette_end, \n imaginary_relative_direction)\n if imaginary_cassette_position in SPECIAL_POSITIONS.all_undefined:\n return imaginary_cassette_position\n real_strand = ('-' if imaginary_cassette_position.strand=='+' else '+')\n return Insertion_position(imaginary_cassette_position.chromosome, real_strand, \n full_position=imaginary_cassette_position.full_position, immutable=immutable_position)", "def add_read(self, HTSeq_alignment, position=SPECIAL_POSITIONS.unknown, read_count=1, dataset_name=None):\n readcount_data_container = self._check_dataset_name_return_data(dataset_name)\n Insertional_mutant.add_read(readcount_data_container, HTSeq_alignment, position, read_count)", "def is_raw_read(command): \n if command.startswith('<READ') and command.endswith('>') and \\\n is_valid_raw(command):\n return True\n else: \n return False\n # end if", "def align_reads(read_fp, # FASTQ file path\n db_fp, # Local path to DB\n temp_folder, # Folder for results\n query_gencode=11, # Genetic code\n threads=1, # Threads\n min_score=20, # Minimum alignment score\n blocks=4, # Memory block size\n top=10, # Report alignments >10% from max\n min_id=80, # Minimum alignment identity\n qcov=95): # Minimum query coverage\n\n align_fp = \"{}.aln\".format(read_fp)\n logging.info(\"Input reads: {}\".format(read_fp))\n logging.info(\"Reference database: {}\".format(db_fp))\n logging.info(\"Genetic code: {}\".format(query_gencode))\n logging.info(\"Threads: {}\".format(threads))\n logging.info(\"Output: {}\".format(align_fp))\n\n run_cmds([\n \"diamond\",\n \"blastx\",\n \"--query\", read_fp, # Input FASTQ\n \"--out\", align_fp, # Alignment file\n \"--threads\", str(threads), # Threads\n \"--db\", db_fp, # Reference database\n \"--outfmt\", \"6\", # Output format\n \"qseqid\", \"sseqid\",\n \"pident\", \"length\",\n \"mismatch\", \"gapopen\",\n \"qstart\", \"qend\",\n \"sstart\", \"send\",\n \"evalue\", \"bitscore\",\n \"qlen\", \"slen\",\n \"--min-score\", str(min_score), # Minimum alignment score\n \"--query-cover\", str(qcov), # Minimum query coverage\n \"--id\", str(min_id), # Minimum alignment identity\n \"--top\", str(top), # Report alignments >10% from max\n \"--block-size\", str(blocks), # Memory block size\n \"--query-gencode\", # Genetic code\n str(query_gencode),\n \"--unal\", \"0\", # Don't report unaligned reads\n ])\n\n return align_fp", "def read(self, nbytes: Optional[int] = None) -> Optional[bytes]:\n ...", "def read(self, nbytes, /) -> bytes | None:" ]
[ "0.57073915", "0.55153537", "0.5060607", "0.4848363", "0.48103166", "0.47505322", "0.4749512", "0.47271252", "0.46359605", "0.46293926", "0.4597865", "0.4596303", "0.45541385", "0.4539164", "0.45315552", "0.45100403", "0.45100108", "0.44856367", "0.448304", "0.44561794", "0.44454443", "0.44386494", "0.44269437", "0.44251367", "0.44089183", "0.4379779", "0.43614843", "0.43541542", "0.43355522", "0.43077958" ]
0.6515178
0
Like getOneRead, but writes the sequence to stream `out' instead of returning it. If the read would overlap a header line, return False without writing anything.
def writeOneRead(self, pos, probs, out): if not self.isValid(pos, pos + self.readlen): return False f = self.stream f.seek(pos) n = 0 while True: b = f.read(1) if b not in "ACGTNXacgtnx": continue if random.random() < probs[n]: while True: nb = random.choice('ACGT') if nb != b: b = nb break out.write(b) n += 1 if n == self.readlen: break return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_single_end(self, reads):\n if len([read for read in reads if read.get(\"IsIndexedRead\",\"N\") == \"N\"]) == 1:\n return True\n return False", "def is_first_read(flag):\n IS_FIRST_SEGMENT = 0x40\n return (int(flag) & IS_FIRST_SEGMENT) != 0", "def lookup_sync(self, flag=0):\n if flag == 1 or self.ser.read() == self.sync[3]:\n if self.ser.read() == self.sync[2]:\n if self.ser.read() == self.sync[1]:\n if self.ser.read() == self.sync[0]:\n return True\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n else:\n return False", "def check(header, out):\r\n for i in range(len(header)):\r\n if header[i] > 0:\r\n if header[i] != int(out[i]):\r\n return False\r\n return True", "def read(reader: BitStreamReader, _index: int) -> bool:\n\n return reader.readBool()", "def read(pipe, line):\n\n c = pipe.read(1)\n if c != \"\":\n o = c.decode('utf-8')\n if o != '\\n':\n line += o\n return line, False\n else:\n return line, True\n else:\n return line, False", "def readable(self):\n # cannot use the old predicate, it violates the claim of the\n # set_terminator method.\n #return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)\n return self.receive", "def sequential_read(out_file_name, num_reads, offset, num_address_bits, num_data_bits):\n\twith open(out_file_name, 'w') as out_file:\n\t\tout_file.write('v2.0 raw\\n')\n\t\tread = 1 << (num_address_bits + num_data_bits + 1)\n\t\tdone = 1 << (num_address_bits + num_data_bits + 1 + 1)\n\t\tfor i in range(offset, offset+num_reads):\n\t\t\taddress = i << num_data_bits\n\t\t\tout_file.write('%x #read addr %d\\n' % ((read | address), i))\n\t\t\t\n\t\tout_file.write('%x #do nothing\\n' % (0)) #just a quick pause\n\t\tout_file.write('%x #complete testing and do nothing\\n' % (done))", "def has_more_lines(self):\n pos = self.stream.tell()\n res = self.stream.readline() != ''\n self.stream.seek(pos)\n return res", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None", "def was_read(link):\n return neg.find_one({'hash': get_hash(link)}) or pos.find_one({'hash': get_hash(link)}) \\\n or saved.find_one({'hash': get_hash(link)})", "def _read_thread(proc, ready_event):\n ready = False\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n\n if output_lines is not None:\n output_lines.append(line)\n\n if not ready and indicator in line:\n ready = True\n ready_event.set()", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def more_data(pipe_out):\n r, _, _ = select.select([pipe_out], [], [], 0)\n return bool(r)", "def IsDuplicate(self, header, payload_string, cur_time): # pylint: disable=unused-argument\n last_seq = self._recv_seq_nums[(header.source, header.type)]\n last_time = self._recv_times[(header.source, header.type)]\n cur_seq = header.sequence\n\n # Sequence numbers expire after maximum latency.\n if cur_time - last_time < aio_header.AIO_EXPIRATION_TIME_US * 1e-6:\n # Expected duplication.\n if cur_seq == last_seq:\n return True\n # Out of order.\n if (cur_seq - last_seq) % 2**16 > aio_header.AIO_ACCEPTANCE_WINDOW:\n return True\n return False", "def test_fasta_one_sequence(self):\n record = list(SeqIO.parse(\"Registry/seqs.fasta\", \"fasta\"))[0]\n input_file = \"seq.fasta\"\n with open(input_file, \"w\") as handle:\n SeqIO.write(record, handle, \"fasta\")\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n self.add_file_to_clean(input_file)\n self.standard_test_procedure(cline)", "def write_out_on_get_next(self, arg: Name):\n res = self.get_next(arg)\n while res and self.check_end_streaming(res) is False:\n self.write_out(res)\n res = self.get_next(arg)\n self.last_write_out()", "def next(self):\r\n\t\tself.index += 1\r\n\t\treturn not self.eof()", "def at_eof(self):\n return self.tell() == len(self)", "def at_eof(self):\n return self._eof and not self._buffer", "def is_next(first: Node, second: Node) -> bool:\n dests = first.out_port(0).get_destinations()\n if node_has_one_consumer(first):\n return second.id == dests[0].node.id\n elif first.soft_get('maybe_part_of_sequence', False):\n return len(dests) == 2 and second.id in [d.node.id for d in dests]\n return False", "def _recv(self, timeout=0):\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException: # pragma: debug\n # Use this to catch case where close called during receive.\n # In the future this should be handled via a lock.\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug(\"Advanced to %d\", self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return (flag, out)", "def readable (self):\r\n return len(self.ac_in_buffer) <= self.ac_in_buffer_size", "def getOut(self) -> bool:\n res, resp = self.send('GOUT{}')\n if not res or resp is None:\n raise SSP_9081_Exception(\"no response\")\n return '1' in resp", "def has_next(self):\n regf = self.first_hbin().parent()\n if regf.hbins_size() + regf.first_hbin_offset() == self._offset_next_hbin:\n return False\n\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "async def read_one_message(self):\n if not self.connected:\n return None\n\n try:\n header = await self.reader.readexactly(2)\n except SocketError as err:\n if err.errno == errno.ECONNRESET:\n self.log.error('Connection reset by peer')\n self.connected = False\n if err.errno == errno.EHOSTUNREACH:\n self.log.error('Spa unreachable')\n self.connected = False\n else:\n self.log.error('Spa socket error: {0}'.format(str(err)))\n return None\n except Exception as e:\n self.log.error('Spa read failed: {0}'.format(str(e)))\n return None\n\n if header[0] == M_START:\n # header[1] is size, + checksum + M_END (we already read 2 tho!)\n rlen = header[1]\n else:\n return None\n\n # now get the rest of the data\n try:\n data = await self.reader.readexactly(rlen)\n except Exception as e:\n self.log.errpr('Spa read failed: {0}'.format(str(e)))\n return None\n\n full_data = header + data\n # don't count M_START, M_END or CHKSUM (remember that rlen is 2 short)\n crc = messages.Message.crc(full_data[1:rlen - 1])\n if crc != full_data[-2]:\n self.log.error('Message had bad CRC, discarding')\n return None\n\n # self.log.error('got update: {}'.format(full_data.hex()))\n return full_data", "def can_write_eof(self):\n return True", "def isComplete(self):\n return self.bytesToRead == 0", "def _get_next_packet(self):\n offset_check = self.packet_counter * CsvAbstractReader.BUFFER_SIZE\n header = {'Range': 'bytes={}-{}'.format(offset_check, offset_check + CsvAbstractReader.BUFFER_SIZE - 1)}\n try:\n packet = self.s3_file.get_contents_as_string(headers=header, encoding='utf-8')\n return True, packet\n except:\n return False, \"\"" ]
[ "0.54492384", "0.5370275", "0.5135002", "0.5129447", "0.5108733", "0.5068222", "0.5045536", "0.5004825", "0.49932212", "0.4987386", "0.49309695", "0.48679584", "0.48513126", "0.48447204", "0.4844478", "0.48289055", "0.4799689", "0.47929904", "0.47920433", "0.47609288", "0.47599202", "0.47468495", "0.47444603", "0.4722997", "0.47045863", "0.47005254", "0.46971294", "0.4696403", "0.46945226", "0.46757916" ]
0.62499744
0
Return the sequence of a read starting at position `s' using quality scores `q' from stream `f'.
def getOneRead(self, f, q, s): probs = np.power(10, q / -10) bases = [] f.seek(s) n = 0 while True: b = f.read(1) if b == "\n": continue if random.random() < probs[n]: b = random.choice('ACGT') else: b = self.getAllele(b, f.tell() - 1) bases.append(b) n += 1 if n == self.readlen: break return bases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readfq(fp): # this is a generator function\n last = None # this is a buffer keeping the last unprocessed line\n while True: # mimic closure; is it a bad idea?\n if not last: # the first record or a record following a fastq\n for l in fp: # search for the start of the next record\n if l[0] in '>@': # fasta/q header line\n last = l[:-1] # save this line\n break\n if not last: break\n name, seqs, last = last[1:].partition(\" \")[0], [], None\n for l in fp: # read the sequence\n if l[0] in '@+>':\n last = l[:-1]\n break\n seqs.append(l[:-1])\n if not last or last[0] != '+': # this is a fasta record\n yield name, ''.join(seqs), None # yield a fasta record\n if not last: break\n else: # this is a fastq record\n seq, leng, seqs = ''.join(seqs), 0, []\n for l in fp: # read the quality\n seqs.append(l[:-1])\n leng += len(l) - 1\n if leng >= len(seq): # have read enough quality\n last = None\n yield name, seq, ''.join(seqs); # yield a fastq record\n break\n if last: # reach EOF before reading enough quality\n yield name, seq, None # yield a fasta record instead\n break", "def readFastq(filename):\n sequences = []\n qualities = []\n \n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() #read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rstrip() # base quality line\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n \n return sequences, qualities", "def readFastq(filename):\n\tsequences = []\n\tqualities = []\n\twith open(filename, 'r') as f:\n\t\twhile True: \n\t\t\tf.readline() # skip name line\n\t\t\tseq = f.readline().rstrip()\n\t\t\tf.readline() # skip place holder line \n\t\t\tq = f.readline().rstrip()\n\t\t\tif len(seq) ==0:\n\t\t\t\tbreak \n\t\t\tsequences.append(seq)\n\t\t\tqualities.append(q)\n\treturn sequences, qualities", "def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),", "def read_seqs(f):\n while True:\n # Read the sequence ID. If there's nothing to read, then we're done.\n try:\n seq_id = read_line(f)\n except EOFError:\n return\n\n # If we successfully read a sequence ID, then running out of stuff to\n # read means a truncated record.\n try:\n seq = str_to_byte_array(read_line(f))\n qual_id = read_line(f)\n qual = str_to_byte_array(read_line(f))\n except EOFError:\n raise EOFError('EOF while reading sequence.')\n\n # Some simple checks of the data.\n if seq_id[0] != '@':\n raise ValueError(\"Sequence ID doesn't begin with '@'.\")\n if qual_id[0] != '+':\n raise ValueError(\"Quality ID doesn't begin with '+'.\")\n if len(seq) != len(qual):\n raise ValueError(\"Sequence and quality are different lengths.\")\n\n yield (seq_id, seq, qual_id, qual)", "def stream_fastq(fqfile):\n\n if fqfile.endswith('.gz'):\n qin = gzip.open(fqfile, 'rb')\n else:\n qin = open(fqfile, 'r')\n\n while True:\n header = qin.readline()\n if not header:\n break\n header = header.strip()\n seqidparts = header.split(' ')\n seqid = seqidparts[0]\n seq = qin.readline()\n seq = seq.strip()\n qualheader = qin.readline()\n qualscores = qin.readline()\n qualscores = qualscores.strip()\n header = header.replace('@', '', 1)\n yield seqid, header, seq, qualscores", "def get_sequence( f ):\r\n sequence = ''\r\n line = f.readline().rstrip()\r\n while line:\r\n sequence += line\r\n line = f.readline().rstrip()\r\n return sequence", "def process_fastq_single_end_read_file(fastq_read_f,\r\n fastq_barcode_f,\r\n barcode_to_sample_id,\r\n store_unassigned=False,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length_fraction=0.75,\r\n rev_comp=False,\r\n rev_comp_barcode=False,\r\n seq_max_N=0,\r\n start_seq_id=0,\r\n filter_bad_illumina_qual_digit=False,\r\n log_f=None,\r\n histogram_f=None,\r\n barcode_correction_fn=None,\r\n max_barcode_errors=1.5,\r\n strict_header_match=True,\r\n phred_to_ascii_f=None):\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n seq_id = start_seq_id\r\n # grab the first lines and then seek back to the beginning of the file\r\n try:\r\n fastq_read_f_line1 = fastq_read_f.readline()\r\n fastq_read_f_line2 = fastq_read_f.readline()\r\n fastq_read_f.seek(0)\r\n except AttributeError:\r\n fastq_read_f_line1 = fastq_read_f[0]\r\n fastq_read_f_line2 = fastq_read_f[1]\r\n\r\n post_casava_v180 = is_casava_v180_or_later(fastq_read_f_line1)\r\n if post_casava_v180:\r\n offset = 33\r\n check_header_match_f = check_header_match_180_or_later\r\n else:\r\n offset = 64\r\n check_header_match_f = check_header_match_pre180\r\n\r\n # compute the barcode length, if they are all the same.\r\n # this is useful for selecting a subset of the barcode read\r\n # if it's too long (e.g., for technical reasons on the sequencer)\r\n barcode_lengths = set([len(bc)\r\n for bc, sid in barcode_to_sample_id.items()])\r\n if len(barcode_lengths) == 1:\r\n barcode_length = barcode_lengths.pop()\r\n else:\r\n barcode_length = None\r\n\r\n # compute the minimum read length as a fraction of the length of the input\r\n # read\r\n min_per_read_length = min_per_read_length_fraction * \\\r\n len(fastq_read_f_line2)\r\n\r\n # prep data for logging\r\n input_sequence_count = 0\r\n count_barcode_not_in_map = 0\r\n count_too_short = 0\r\n count_too_many_N = 0\r\n count_bad_illumina_qual_digit = 0\r\n count_barcode_errors_exceed_max = 0\r\n sequence_lengths = []\r\n seqs_per_sample_counts = {}\r\n for bc_data, read_data in izip(\r\n parse_fastq(fastq_barcode_f, strict=False, phred_offset=offset),\r\n parse_fastq(fastq_read_f, strict=False, phred_offset=offset)):\r\n input_sequence_count += 1\r\n # Confirm match between barcode and read headers\r\n if strict_header_match and \\\r\n (not check_header_match_f(bc_data[header_index], read_data[header_index])):\r\n raise FastqParseError(\"Headers of barcode and read do not match. Can't continue. \"\r\n \"Confirm that the barcode fastq and read fastq that you are \"\r\n \"passing match one another.\")\r\n else:\r\n header = read_data[header_index]\r\n\r\n # Grab the barcode sequence\r\n if barcode_length:\r\n # because thirteen cycles are sometimes used for\r\n # techical reasons, this step looks only at the\r\n # first tweleve bases. note that the barcode is\r\n # rev-comp'ed after this step if requested since\r\n # the thirteen base is a technical artefact, not\r\n # barcode sequence.\r\n barcode = bc_data[sequence_index][:barcode_length]\r\n else:\r\n barcode = bc_data[sequence_index]\r\n if rev_comp_barcode:\r\n barcode = str(DNA(barcode).rc())\r\n # Grab the read sequence\r\n sequence = read_data[1]\r\n # Grab the read quality\r\n quality = read_data[2]\r\n\r\n # correct the barcode (if applicable) and map to sample id\r\n num_barcode_errors, corrected_barcode, correction_attempted, sample_id = \\\r\n correct_barcode(\r\n barcode,\r\n barcode_to_sample_id,\r\n barcode_correction_fn)\r\n # skip samples with too many errors\r\n if (num_barcode_errors > max_barcode_errors):\r\n count_barcode_errors_exceed_max += 1\r\n continue\r\n\r\n # skip unassignable samples unless otherwise requested\r\n if sample_id is None:\r\n if not store_unassigned:\r\n count_barcode_not_in_map += 1\r\n continue\r\n else:\r\n sample_id = 'Unassigned'\r\n\r\n quality_filter_result, sequence, quality =\\\r\n quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length,\r\n phred_quality_threshold,\r\n min_per_read_length,\r\n seq_max_N,\r\n filter_bad_illumina_qual_digit)\r\n\r\n # process quality result\r\n if quality_filter_result != 0:\r\n # if the quality filter didn't pass record why and\r\n # move on to the next record\r\n if quality_filter_result == 1:\r\n count_too_short += 1\r\n elif quality_filter_result == 2:\r\n count_too_many_N += 1\r\n elif quality_filter_result == 3:\r\n count_bad_illumina_qual_digit += 1\r\n else:\r\n raise ValueError(\r\n \"Unknown quality filter result: %d\" %\r\n quality_filter_result)\r\n continue\r\n\r\n sequence_lengths.append(len(sequence))\r\n\r\n try:\r\n seqs_per_sample_counts[sample_id] += 1\r\n except KeyError:\r\n seqs_per_sample_counts[sample_id] = 1\r\n\r\n if rev_comp:\r\n sequence = str(DNA(sequence).rc())\r\n quality = quality[::-1]\r\n\r\n fasta_header = '%s_%s %s orig_bc=%s new_bc=%s bc_diffs=%d' %\\\r\n (sample_id, seq_id, header, barcode,\r\n corrected_barcode, num_barcode_errors)\r\n yield fasta_header, sequence, quality, seq_id\r\n seq_id += 1\r\n\r\n # Add sample IDs with zero counts to dictionary for logging\r\n for curr_sample_id in barcode_to_sample_id.values():\r\n if curr_sample_id not in seqs_per_sample_counts.keys():\r\n seqs_per_sample_counts[curr_sample_id] = 0\r\n\r\n if log_f is not None:\r\n log_str = format_split_libraries_fastq_log(count_barcode_not_in_map,\r\n count_too_short,\r\n count_too_many_N,\r\n count_bad_illumina_qual_digit,\r\n count_barcode_errors_exceed_max,\r\n input_sequence_count,\r\n sequence_lengths,\r\n seqs_per_sample_counts)\r\n log_f.write(log_str)\r\n\r\n if len(sequence_lengths) and histogram_f is not None:\r\n counts, bin_edges = make_histograms(sequence_lengths)\r\n histogram_str = format_histogram_one_count(counts, bin_edges)\r\n histogram_f.write(histogram_str)\r\n histogram_f.write('\\n--\\n\\n')", "def FastqIterator(fh):\n def readTotitle(fh, titleChar):\n \"\"\"returns a tuple ([lines before the next title line], next tile line)\n \"\"\"\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith(titleChar):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)\n\n if type(fh) in StringTypes:\n fh = file(fh)\n\n preLines,nextTitleLine =readTotitle(fh,'@')\n\n while nextTitleLine != None:\n seqTitle = nextTitleLine[1:].rstrip()\n preLines,nextTitleLine=readTotitle(fh,'+')\n qualTitle = nextTitleLine[1:].rstrip()\n if len(qualTitle.strip()) > 0 and seqTitle != qualTitle:\n print seqTitle\n print preLines\n print qualTitle\n raise hmmErrors.InvalidFastq, \"Error in parsing: @title sequence entry must be immediately followed by corresponding +title quality entry.\"\n seqLines = preLines\n qualLines = []\n for i in range(len(seqLines)): # Quality characters should be the same length as the sequence\n qualLines.append( fh.readline().strip() )\n\n preLines,nextTitleLine=readTotitle(fh,'@')\n\n yield (seqTitle, ''.join(seqLines), ''.join(qualLines))", "def quality_matcher(fasta, full_fastq, filt_fastq, trunclen):\n with open(fasta, \"r\") as fasta, open(full_fastq, \"r\") as fastq, open(filt_fastq, \"w\") as new_fastq:\n #make lists of the fasta and fastq files, where every successive value is a successive line\n #purpose of -1: to avoid the \"\\n\" newline character at the end of the lines\n fastq_list = [line[:-1] for line in fastq]\n fasta_list = [line[:-1] for line in fasta]\n #iterate through the sequence ids in the fasta file\n for fasta_index, fasta_id in enumerate(fasta_list):\n if fasta_id[0] == \">\":\n #get the list index of the matching sequence id in the metagenomic fastq file\n fastq_index = fastq_list.index(\"@{}\".format(fasta_id[1:]))\n #print and write a new fastq entry with the quality scores string truncated to the same length as the sequence from the fasta file\n print(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))\n new_fastq.write(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))", "def parse_qual_score(infile, value_cast_f=int):\r\n id_to_qual = dict([rec for rec in MinimalQualParser(infile, value_cast_f)])\r\n return id_to_qual", "def get_features(fastq_input,\n label=None,\n subportions=3,\n positions=(0, 0),\n header=False,\n reduced=True,\n output=sys.stdout,\n debug=False):\n def get_qual_features(qual_ascii, reduced=False):\n seq_qual_prob = list(\n map(lambda x: transform_phred_to_prob(x, offset=offset),\n qual_ascii))\n return quality_features(seq_qual_prob, reduced=reduced)\n\n reader = SeqReader(fastq_input, file_type='fastq')\n position = 0\n x, y = positions[0], positions[1]\n if x and y and y < x:\n x, y = y, x\n count = 0\n my_header = []\n if header:\n if reduced:\n my_header = HEADER[0:4]\n else:\n my_header = HEADER.copy()\n for i in range(subportions + subportions - 1):\n for item in my_header:\n my_header.append(item + \"_\" + str(i + 1))\n if label:\n my_header.append(\"label\")\n if output:\n print(\"\\t\".join(my_header), file=sys.stdout)\n all_features = []\n all_labels = []\n for record in reader:\n features = []\n _, read, qual, _ = record\n position += 1\n if len(qual) == 0 or (x and (position < x or position > y)):\n continue\n count += 1\n offset = get_offset(qual)\n if debug:\n print(\"{} Qual len:{} Offset: {}\".format(count, len(qual), offset),\n file=sys.stderr)\n features += get_qual_features(qual, reduced=reduced)\n totallength = int(len(read) / subportions)\n halflength = int(totallength / 2)\n for i in range(subportions + subportions - 1):\n if i == subportions + subportions - 2:\n finallength = len(read)\n else:\n finallength = i * halflength + totallength\n if debug:\n sys.stderr.write(\n \"{} Read len:{} Offset: {} Begin: {}, End: {} {} \\n\".\n format(count, len(read), offset, i * halflength,\n finallength, qual[i * halflength:finallength]))\n sys.stderr.flush()\n features += get_qual_features(qual[i * halflength:finallength],\n reduced)\n if label and output:\n features.append(label)\n elif label and not output:\n all_labels.append(label)\n if output:\n print(\"\\t\".join(map(str, features)), file=output)\n else:\n all_features.append(features)\n if not output:\n return count, all_features, all_labels, my_header\n return count", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def fqs(tokens, q):\n\n totalLength = len(tokens)\n for length in range(1, totalLength + 1): #need to look at all possible subsets of tokens\n for start in range(totalLength - length + 1):\n\n end = start + length\n pTokens = tokens[start:end] #the subphrase we're examining\n\n b = 0.\n if not start: #phrase begins at the start of the sentence\n b += 1.\n if end == totalLength: #phrase ends at end of tokens\n b += 1.\n\n f = q**(2. - b)*(1 - q)**(length - 1.) #this is our formulation for probability\n\n #spit out a tuple of coordinates and prob of the phrase, but only if prob is nonzero\n if f:\n yield [start, end] , f", "def q(self, s, a):\n # The Q value of the current state is based on the max Q value of the next state.\n next_state_max_q = max([self.qtable[s[0]+x][s[1]+y] for (x,y) in self.maze.moves()])\n self.qtable[s[0]+a[0]][s[1]+a[1]] = (self.qtable[s[0]+a[0]][s[1]+a[1]]\n + self.alpha * (self.r(s,a) + self.gamma * next_state_max_q\n - self.qtable[s[0]+a[0]][s[1]+a[1]]))\n\n return self.qtable[s[0]+a[0]][s[1]+a[1]]", "def sequence(self, f, asstring=True):\n\n assert \"chr\" in f, \"`chr` field required\"\n name = f[\"chr\"]\n\n assert name in self, \"feature: %s not in `%s`\" % (f, self.filename)\n\n fasta = self[f[\"chr\"]]\n\n seq = Fasta.subseq(fasta, f.get(\"start\"), f.get(\"stop\"), f.get(\"strand\"))\n\n if asstring:\n return str(seq)\n\n return seq", "def _parse_fastq(f):\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def format_read_as_fna(read, qual=False):\r\n # TODO: Move to PyCogent\r\n out = StringIO()\r\n out.write('>%s' % read['Name'])\r\n\r\n # Roche uses 1-based indexing, where the right index is inclusive.\r\n # To transform to 0-based indices, where the right index is not\r\n # inclusive, we subtract 1 from the left index, but leave the\r\n # right index intact.\r\n\r\n start_idx = read['clip_qual_left'] - 1\r\n end_idx = read['clip_qual_right']\r\n\r\n # A surprising result is produced if the number of cycles are\r\n # adjusted such that no bases remain past clip_qual_left. In the\r\n # clipping routine, the Roche software sets clip_qual_left to be\r\n # equal to the number of bases. Using our indexing scheme, the\r\n # resulting sequence is of length 1 after clipping (one would\r\n # expect a length of 0). We would fix this issue, if the effect\r\n # were not present in the output from Roche's sffinfo program. We\r\n # retain this arguably incorrect behavior to be consistent with\r\n # the reference implementation.\r\n\r\n out.write(' length=%d' % (end_idx - start_idx))\r\n\r\n timestamp, _, region, location = decode_accession(read['Name'])\r\n out.write(' xy=%04d_%04d' % location)\r\n out.write(' region=%d' % region)\r\n out.write(' run=R_%d_%02d_%02d_%02d_%02d_%02d_' % timestamp)\r\n out.write('\\n')\r\n\r\n if qual:\r\n scores = read['quality_scores'][start_idx:end_idx]\r\n out.write(' '.join(['%d' % s for s in scores]))\r\n else:\r\n bases = read['Bases'][start_idx:end_idx]\r\n out.write(bases)\r\n out.write('\\n')\r\n return out.getvalue()", "def graphQualityPerPosition(inputFastq):\n\n histD = {}\n\n count = 0\n for (titleStr, seqStr, qualityStr) in FastqIterator(inputFastq):\n count += 1\n if count < 200000:\n continue\n if count > 1200000:\n break\n\n qInts = convertQualityStr(qualityStr) \n for i in range(len(qInts)):\n q = qInts[i]\n if q < 0 or q > 40:\n raise Exception(\"Invalid quality value %s at position %s of %s\" % (q, i, qualityStr))\n\n if not histD.has_key(i):\n histD[i] = [0]*41\n\n histD[i][q] += 1\n\n print \"Histogram of quality score per position\"\n allk = histD.keys()\n allk.sort()\n for k in allk:\n print \"%s|\" % k, \"|\".join(str(x) for x in histD[k])", "def scarf_to_fastq(infile=sys.stdin, outfile=sys.stdout):\n infile = open_gzipped(infile)\n outfile = open_gzipped(outfile, 'wt')\n for line in infile:\n fields = line.rstrip().split(':')\n qual = fields.pop()\n seq = fields.pop()\n outfile.write('{0}\\n{1}\\n+\\n{2}\\n'.format(\n '@' + ':'.join(fields),\n seq,\n qual))", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def nuc2nqual_se(records, args):\n for record in records:\n changepos = [i for i, x in enumerate(record.letter_annotations['phred_quality']) if x < args.q]\n outread = SeqRecord.SeqRecord(record.seq.tomutable())\n outread.id = record.id\n outread.name = record.name\n outread.description = record.description\n outread.letter_annotations = record.letter_annotations\n for i in changepos:\n outread.seq[i] = 'N'\n yield outread", "def single_reads(reads_pos):\n #TODO: use more complicated estimator?\n return (len(reads_pos) + 1) / (len(reads_pos) - 1) * (reads_pos[-1] - reads_pos[0])", "def fastq(args):\n from jcvi.formats.fastq import FastqLite\n\n p = OptionParser(fastq.__doc__)\n p.add_option(\"--qv\", type=\"int\", help=\"Use generic qv value\")\n\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n fastqfile = fastafile.rsplit(\".\", 1)[0] + \".fastq\"\n fastqhandle = open(fastqfile, \"w\")\n num_records = 0\n\n if opts.qv is not None:\n qv = chr(ord(\"!\") + opts.qv)\n logging.debug(\"QV char '{0}' ({1})\".format(qv, opts.qv))\n else:\n qv = None\n\n if qv:\n f = Fasta(fastafile, lazy=True)\n for name, rec in f.iteritems_ordered():\n r = FastqLite(\"@\" + name, str(rec.seq).upper(), qv * len(rec.seq))\n print(r, file=fastqhandle)\n num_records += 1\n\n else:\n qualfile = get_qual(fastafile)\n for rec in iter_fasta_qual(fastafile, qualfile):\n SeqIO.write([rec], fastqhandle, \"fastq\")\n num_records += 1\n\n fastqhandle.close()\n logging.debug(\"A total of %d records written to `%s`\" % (num_records, fastqfile))", "def iseq_to_qseq_fields(line, barcode_in_header,\r\n barcode_length, barcode_qual_c='b'):\r\n record = line.strip().split(':')\r\n rec_0_1, rec_0_2 = record[0].split('_')\r\n rec_4_1, rec_4_23 = record[4].split('#')\r\n rec_4_2, rec_4_3 = rec_4_23.split('/')\r\n if barcode_in_header:\r\n barcode = rec_4_2[:barcode_length]\r\n sequence = record[5]\r\n barcode_qual = barcode_qual_c * barcode_length\r\n sequence_qual = record[6]\r\n else:\r\n barcode = record[5][:barcode_length]\r\n sequence = record[5][barcode_length:]\r\n barcode_qual = record[6][:barcode_length]\r\n sequence_qual = record[6][barcode_length:]\r\n return (rec_0_1, rec_0_2, record[1], record[2], record[3],\r\n rec_4_1, rec_4_2, rec_4_3), sequence, sequence_qual,\\\r\n barcode, barcode_qual", "def trim_quality(self, reads):\n cut = self.quality_cutoff * 3\n start = 0\n qscores = reads[0][3]\n qual = ord(qscores[0]) + ord(qscores[1]) + ord(qscores[2]) - 99\n while qual < cut:\n start += 1\n try:\n qual += ord(qscores[start + 2]) - ord(qscores[start - 1])\n except IndexError:\n break\n stop = len(qscores)\n qual = ord(qscores[-1]) + ord(qscores[-2]) + ord(qscores[-3]) - 99\n while qual < cut:\n stop -= 1\n try:\n qual += ord(qscores[stop - 3]) - ord(qscores[stop])\n except IndexError:\n break\n reads[0][1] = reads[0][1][start:stop]\n reads[0][3] = reads[0][3][start:stop]", "def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)", "def qscore(seq):\n return [ord(i)-33 for i in seq.upper()]", "def est_read_len(fq, reads=100):\n if is_gz_file(fq):\n openf = gzip.open\n else:\n openf = open\n readlens = []\n with openf(fq) as f:\n try: # File less than 4*reads lines\n for _ in range(reads):\n next(f)\n readlens.append(len(next(f).strip()))\n next(f), next(f)\n except:\n pass\n return median(readlens)" ]
[ "0.6454674", "0.63423574", "0.6075785", "0.5916024", "0.5780577", "0.5768713", "0.5536341", "0.5424657", "0.53865683", "0.538002", "0.5313396", "0.53032666", "0.5296372", "0.52853894", "0.5264895", "0.5221116", "0.52116334", "0.51880556", "0.5100524", "0.5074263", "0.50645083", "0.50601923", "0.5057293", "0.5035454", "0.5014935", "0.49758312", "0.49714828", "0.49549475", "0.49314705", "0.49307618" ]
0.64760786
0
Returns a list of quality values sampling from the qavgs and qstdevs distribution.
def genQuality(self): return np.clip(np.random.normal(self.qavgs, self.qstdevs), 0, 40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def samples(self):\n if self._samples:\n return self._samples\n if SAMPLE_DF_KEY not in self or self[SAMPLE_DF_KEY] is None:\n _LOGGER.debug(\"No samples are defined\")\n return []", "def get_parameters_affecting_quality(self, quality: str) -> List[str]:\n return [p for p, q in self.correlating_pq_tuples if q == quality]", "def get_distribution(counts, n_qubits):\n\tprobabilities = np.zeros(2 ** n_qubits) # Array of zeros and with the correct size for the measured qubits\n\tfor key in counts.keys(): # Iterate over the measured outputs\n\t\t# Transform the key from binary to decimal, and them save the probability\n\t\tprobabilities[int(key, 2)] = counts[key] / NUM_SHOTS\n\treturn probabilities", "def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "def get_q_values(self, state):\n raise NotImplemented", "def GetSampleDistribution(self) :\r\n\t\ttry :\r\n\t\t\tSetList = self.SQLCMDs['SetList']\r\n\t\t\tSetIDs = [0,]*len(SetList)\r\n\t\t\tSetDistr = [0,]*len(SetList)\r\n\t\t\tfor ii,setname in enumerate(SetList) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectSetID'],(setname,))\r\n\t\t\t\tSetIDs[ii] = self.DB_Cursor.fetchone()[0]\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SampleSetCount'],(SetIDs[ii],))\r\n\t\t\t\tSetDistr[ii] = float(self.DB_Cursor.fetchone()[0])\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.info(\"Failed to retrieve sample set distribution: %s\"%detail)\r\n\t\t\tSetIDs = (0,1,2)\r\n\t\t\tSetDistr = (0,0,0)\r\n\t\treturn SetIDs,SetDistr", "def extract_q_values(results):\n return results['max_q_values'].tolist()", "def get_means_and_scales_from_q(self):\n means = np.zeros(len(self.q))\n scale = np.zeros(len(self.q))\n for i in range(len(self.q)):\n means[i] = self.q[i].mu0\n scale[i] = self.q[i].sigma0 \n return means, scale", "def q_values(self, state):\n return self.sess.run(self.graph.target_q_values,\n feed_dict={self.graph.states: [state]}).reshape(-1)", "def _getScalesRand(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate(\n (_scales, sp.array([sp.sqrt(self.jitter[term_i])])))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales", "def get_qvalues(self, states):\n model_device = next(self.parameters()).device\n states = torch.tensor(states, device=model_device, dtype=torch.float)\n qvalues = self.forward(states)\n return qvalues.data.cpu().numpy()", "def get_qvalues(self, states):\n model_device = next(self.parameters()).device\n states = torch.tensor(states, device=model_device, dtype=torch.float)\n qvalues = self.forward(states)\n return qvalues.data.cpu().numpy()", "def samples(self):\n return self._values[:self.nsamples]", "def get_samples_list(self):\n return self.samples_list", "def get_q_values(self, observations, twin_q=False):\n if twin_q:\n q_model = self.twin_q_model\n else:\n q_model = self.main_q_model\n\n q_vals = q_model([observations[self.vf_obs_key]])\n return tf.reshape(q_vals, [-1, self.num_outputs])", "def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])", "def get_qvalues(self, states):\n model_device = next(self.parameters()).device\n states = torch.tensor(states, device=model_device, dtype=torch.float32)\n qvalues = self.forward(states)\n return qvalues.data.cpu().numpy()", "def test_get_qual_stats(self):\r\n\r\n qual_bins = [[1, 2, 6], [1, 2, 3], [2, 4], [4]]\r\n\r\n expected_ave_bins = [3, 2, 3, 4]\r\n expected_std_dev_bins = [2.16, 0.816, 1.0, 0]\r\n expected_total_bases_bins = [3, 3, 2, 1]\r\n score_min = 25\r\n\r\n actual_ave_bins, actual_std_dev_bins, actual_total_bases_bins,\\\r\n suggested_trunc_pos = get_qual_stats(qual_bins, score_min)\r\n\r\n # Should give correct suggested truncation position, where the quality\r\n # score average went below 25, in this case, at base 0\r\n expected_trunc_pos = 0\r\n\r\n self.assertEqual(suggested_trunc_pos, expected_trunc_pos)\r\n\r\n # Round standard deviation calculations\r\n for n in range(len(actual_std_dev_bins)):\r\n actual_std_dev_bins[n] = round(actual_std_dev_bins[n], 3)\r\n\r\n self.assertEqual(actual_ave_bins, expected_ave_bins)\r\n self.assertEqual(actual_std_dev_bins, expected_std_dev_bins)\r\n self.assertEqual(actual_total_bases_bins, expected_total_bases_bins)", "def _samples(self):\n finite_types = \\\n [QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],\n ['C', 3], ['C', 5], ['D', 2], ['D', 5],\n [\"E\", 6], [\"E\", 7], [\"E\", 8], [\"F\", 4],\n [\"G\", 2]]]\n affine_types = \\\n [QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]\n elliptic_types = \\\n [QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]\n mutation_finite_types = \\\n [QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]\n mutation_infinite_types = \\\n [QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]\n\n return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types", "def samples(self):\n return glob.glob(os.path.join(self.production.rundir, \"extrinsic_posterior_samples.dat\"))", "def samples(self):\n return self._samples", "def _quantiles(self):\n\n trials = []\n for trial, state in self._trial_state.items():\n if state.last_score is not None and not trial.is_finished():\n trials.append(trial)\n trials.sort(key=lambda t: self._trial_state[t].last_score)\n\n if len(trials) <= 1:\n return [], []\n else:\n num_trials_in_quantile = int(\n math.ceil(len(trials) * self._quantile_fraction))\n if num_trials_in_quantile > len(trials) / 2:\n num_trials_in_quantile = int(math.floor(len(trials) / 2))\n return (trials[:num_trials_in_quantile],\n trials[-num_trials_in_quantile:])", "def sample(self):\n ret = []\n for _ in range(self._count_dist_fn()):\n ret.extend(self._pwm.sample())\n return \"\".join(ret)", "def Qps(self):\n return [elem['Qp'] for elem in self.__compartments]", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def __init__(self, quantity, dist_weights, gauss_params, upper_bound, lower_bound):\n self.dist_weights = dist_weights\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n if len(self.dist_weights) != len(gauss_params):\n print(\n \"Number of distribution weights do not match number of distributions!\"\n )\n diff = len(gauss_params) - len(dist_weights)\n if diff < 0:\n print(\"Ignoring trailing distribution weights\")\n self.dist_weights = self.dist_weights[: len(dist_weights) + diff]\n else:\n print(\"Assuming default weights of 1\")\n self.dist_weights.extend([1] * diff)\n # normalize weights\n self.dist_weights = np.array(\n [float(i) / sum(self.dist_weights) for i in self.dist_weights]\n )\n # create samples\n self.samples = []\n self.gauss_params = gauss_params\n sample_size = quantity\n self.sample_min, self.sample_max = [float(\"inf\"), -float(\"inf\")]\n while True:\n # determine the gaussian to sample from for each sample\n mixture_idx = np.random.choice(\n len(self.dist_weights),\n size=sample_size,\n replace=True,\n p=self.dist_weights,\n )\n # create the samples from the respective gaussian\n temp = np.fromiter(\n (ss.norm.rvs(*(gauss_params[i])) for i in mixture_idx), dtype=np.float64\n )\n # remember mixed sampled extremas for plotting\n self.sample_min = min(self.sample_min, temp.min())\n self.sample_max = max(self.sample_max, temp.max())\n # add those samples that are within the bounds\n self.samples = np.concatenate(\n [\n self.samples,\n np.fromiter(\n [x for x in temp if x <= upper_bound and x >= lower_bound],\n dtype=np.float64,\n ),\n ]\n )\n sample_size = quantity - len(self.samples)\n if sample_size == 0:\n break", "def make_samples(\n context_qas: List[EncodedContextQuestionAnswer]\n ) -> List[EncodedSample]:\n return [\n EncodedSample(ctx.word_encoding, ctx.char_encoding, qa)\n for ctx in context_qas\n for qa in ctx.qas\n ]", "def getValues(self):\n return [self._rng.normal(25,1)]", "def sampler(self, *args, **kwargs):\n\n return (samples_subgraphs ** 2).tolist()", "def __call__(self, *args):\n r = np.random.rand(*args)\n if type(r) is float:\n samples = self.values[(r < self.p).nonzero()[0][0]]\n elif type(r) is np.ndarray:\n samples = np.array(\n [self.values[np.nonzero(x < self.p)[0][0]] \n for x in r.flat]).reshape(r.shape)\n return samples" ]
[ "0.5885404", "0.580384", "0.56202406", "0.5590326", "0.5522683", "0.5520481", "0.55016017", "0.5484402", "0.5439344", "0.5426934", "0.540806", "0.540806", "0.54041284", "0.54027855", "0.5381106", "0.5370229", "0.5364764", "0.53576636", "0.53525275", "0.5342002", "0.53403366", "0.53310597", "0.5326639", "0.5314769", "0.5294029", "0.5279156", "0.5278774", "0.5275251", "0.5266498", "0.5251582" ]
0.6887802
0
Hook in to application startup and start background NmapPingTask.
def postStartup(self): daemon = component.getUtility(interfaces.ICollector) task = NmapPingTask( "NmapPingTask", "NmapPingTask", taskConfig=daemon._prefs ) # introduce a small delay to can have a chance to load some config task.startDelay = 5 daemon._scheduler.addTask(task) if not daemon.options.disableCorrelator: correlationBackend = daemon.options.correlationBackend task._correlate = component.getUtility( IPingTaskCorrelator, correlationBackend ) task.disable_correlator = daemon.options.disableCorrelator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_start(self):\n self.run_in_background(self.__run_client)", "def start(self):\n self._logger.debug(\"%s: request to start pinger\",\n self.ping_address)\n self.stop()\n self._task = asyncio.ensure_future(self._pinger(), loop=self._loop)", "def on_startup(self) -> None:\n ...", "def startapp():", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def on_start(self):\n self.init()", "def on_start(self):\n App.on_start(self)\n self.root.register()", "def on_pre_enter(self):\n self.setup()\n self.start()", "def startup(self):\n pass", "def on_start(self):\n ProxyServerHandler.current.handler_ready(self)", "def pibooth_startup(cfg, app):", "def activate(self):\n if not self._env.enable_registration:\n return\n legacy_key = '{}:{}'.format(self._env.flask_host, self._env.flask_port)\n self._key = self._env.get('my_ident', legacy_key, 'microservice')\n LoopingCall(self.ping).start(5, now=False)", "def start(self):\n self.configure_app()\n for task in self.tasks: # pragma: no cover\n self._running_tasks.append(asyncio.Task(task))\n self.hub.add_subscriptions(self.subscriptions)\n self.log.info(\"Started plugin `%s` (%d task(s))\",\n self.name, len(self._running_tasks))", "def run():\r\n autostartup()", "def startup(self) -> None:", "def on_start(self):\n self.logger.debug(\"Starting...\")\n pass", "async def startup(self):", "async def startup(self):", "def start(self):\n run(self.app, host=self.host, port=self.port, server=AsyncServer,\n quiet=True, debug=False)", "def on_start(self, ctx):\n pass", "async def on_start(self):\n\t\t\n\t\t# Register callback.\n\t\tawait self.instance.command_manager.register(\n\t\t\tCommand(command='muffin', target=self.muffin, admin=False, description='Bake a muffin').add_param(name='login', required=True))", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()" ]
[ "0.63465035", "0.6073031", "0.60578746", "0.59798384", "0.5913347", "0.5913347", "0.5913347", "0.5913347", "0.5913347", "0.5913347", "0.5913347", "0.5913347", "0.59071606", "0.5884065", "0.58758944", "0.5852766", "0.58509463", "0.58503497", "0.5830008", "0.5796353", "0.5778302", "0.5770652", "0.5770257", "0.5769731", "0.5763549", "0.5763549", "0.5705994", "0.5702911", "0.5671159", "0.56681406" ]
0.8040481
0
Detect whether the Ping Cycle Time is too short.
def _detectCycleInterval(self): cycleInterval = self._daemon._prefs.pingCycleInterval minCycleInterval = MIN_PING_TIMEOUT + MAX_NMAP_OVERHEAD newValue = cycleInterval >= minCycleInterval if self._cycleIntervalReasonable != newValue: self._cycleIntervalReasonable = newValue if self._cycleIntervalReasonable is False: raise nmap.ShortCycleIntervalError(cycleInterval) self._sendShortCycleInterval(cycleInterval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exceeds_hop_latency(self,ping_time):\n # remote ' ms' from ping time\n ping_as_float = float(ping_time.replace(\" ms\",\"\"))\n\tprint \"Compare {0} to {1}\".format(ping_as_float, self.LATENCY_THRESHOLD)\n\n return ping_as_float >= self.LATENCY_THRESHOLD", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def pingLatencyThresholdExceeded(self):\n\n return self.latency_exceeded", "def exceeded(self):\r\n return int(time.time()) - self.start_time >= self.length", "def ping_timeout(self) -> timedelta:\n return self._ping_timeout", "def check_last_cycle_duration(self):\n min_pm_time = timedelta(seconds=self.args.min_pm_time)\n max_pm_time = timedelta(seconds=self.args.max_pm_time)\n if self.args.pm_timestamp:\n pm_timestamp = datetime.fromtimestamp(self.args.pm_timestamp)\n now = datetime.now()\n pm_time = now - pm_timestamp\n if pm_time < min_pm_time:\n raise TestFailed(\n \"{0} time less than expected: {1} < {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, min_pm_time\n )\n )\n if pm_time > max_pm_time:\n raise TestFailed(\n \"{0} time greater than expected: {1} > {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, max_pm_time\n )\n )\n\n logging.info(\n \"{0} time: {1}\".format(self.args.pm_operation.capitalize(), pm_time)\n )", "def timedout(self):\n\n return self.duration() > self.check.timeout", "def overtime(self):\n if self._overtime != '':\n return True\n return False", "def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0", "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def _check_pulse(self):\n timedelta = time.time() - self.heartbeat\n update_delay = float(1/self.qbpm.frequency)\n time_to_update = False\n if timedelta > update_delay:\n time_to_update = True\n self.heartbeat = time.time()\n return time_to_update", "def is_exceeded(self):\n\n if self.stopwatch.check_time() > self.duration:\n self.stopwatch.start()\n self.num_processed_this_interval = 0\n return False\n\n return self.num_processed_this_interval >= self.max_per_interval", "def check_time(self):\n while True:\n for name in self.neighbors:\n if not self.neighbors[name].is_killed:\n if not self.neighbors[name].update_ready and time.time() - self.neighbors[name].send_timer > self.timeout:\n self.neighbors[name].update_ready = True\n if time.time() - self.neighbors[name].kill_timer > 3 * self.timeout:\n self.neighbors[name].is_killed = True", "def ping(self):\r\n start = time.time()\r\n response = self.get(\"ping\")\r\n duration = (time.time() - start) * 1000\r\n assert response == \"pong\"\r\n return duration", "def _sendShortCycleInterval(self, cycleInterval):\n if self._cycleIntervalReasonable:\n msg = \"ping cycle time (%.1f seconds) is fine\" % cycleInterval\n severity = _CLEAR\n else:\n minimum = MIN_PING_TIMEOUT + MAX_NMAP_OVERHEAD\n msg = (\n \"ping cycle time (%.1f seconds) is too short \"\n \"(keep it under %.1f seconds)\" % (cycleInterval, minimum)\n )\n severity = _CRITICAL\n evt = dict(\n device=self.collectorName,\n eventClass=ZenEventClasses.Status_Ping,\n eventGroup=\"Ping\",\n eventKey=\"cycle_interval\",\n severity=severity,\n summary=msg,\n )\n self._eventService.sendEvent(evt)", "def check_timeout(flag: Callable, limit: float) -> bool:\n timed_out = False\n if HAS_SUPERVISOR:\n start = supervisor.ticks_ms()\n while not timed_out and not flag():\n if ticks_diff(supervisor.ticks_ms(), start) >= limit * 1000:\n timed_out = True\n else:\n start = time.monotonic()\n while not timed_out and not flag():\n if time.monotonic() - start >= limit:\n timed_out = True\n return timed_out", "def check_timeout(self, transport, earlier_time, interval, error_msg):\n now = datetime.datetime.now()\n secs = int((now - earlier_time).total_seconds())\n if secs >= interval:\n self.connection_lost(transport, f'{error_msg}: {secs} seconds')", "def LingerTime(self) -> int:", "def is_over(self, time):\n over = (not self.enable_loop()) and (time >= self.get_duration())\n return over", "def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False", "def _is_connection_stale(self):\n\n if time.time() - self.last_ping > HEART_BEAT_PING_TIME:\n self._ping()\n\n return (time.time() - self.last_pong) > HEART_BEAT_PING_TIME + HEART_BEAT_PONG_TIME", "def valid(t):\n return float(t) > time.time()", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def check_timeout(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"check_timeout\")", "def _timeout_request(start_time, request_timeout):\n return int(round(time() * 1000)) - start_time >= request_timeout", "def ping_interval(self) -> timedelta:\n return self._ping_interval", "def check_ping(self):\n # If we're still connecting, deny the connection\n if self.state == self.STATE_CONNECTING:\n if self.duration() > self.main_factory.websocket_connect_timeout:\n self.serverReject()\n elif self.state == self.STATE_OPEN:\n if (time.time() - self.last_data) > self.main_factory.ping_interval:\n self._sendAutoPing()\n self.last_data = time.time()", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def haveTime(self):\n if self.timeout is None:\n return True\n return time.time() <= self._stop", "def time_is_out(self):\n return self.get_simulation_time() > self.config.max_time" ]
[ "0.7162169", "0.6673382", "0.65730745", "0.6475383", "0.64306843", "0.6398188", "0.6313421", "0.6286024", "0.62831277", "0.61869854", "0.6170411", "0.6166333", "0.60736674", "0.6058907", "0.60438025", "0.6022601", "0.60170096", "0.60063773", "0.60038066", "0.60017705", "0.59958863", "0.5934144", "0.59230083", "0.5895884", "0.5888513", "0.5880765", "0.58444095", "0.58403534", "0.5840056", "0.58305746" ]
0.6818477
1
Send/Clear event to show that ping cycle time is short/fine.
def _sendShortCycleInterval(self, cycleInterval): if self._cycleIntervalReasonable: msg = "ping cycle time (%.1f seconds) is fine" % cycleInterval severity = _CLEAR else: minimum = MIN_PING_TIMEOUT + MAX_NMAP_OVERHEAD msg = ( "ping cycle time (%.1f seconds) is too short " "(keep it under %.1f seconds)" % (cycleInterval, minimum) ) severity = _CRITICAL evt = dict( device=self.collectorName, eventClass=ZenEventClasses.Status_Ping, eventGroup="Ping", eventKey="cycle_interval", severity=severity, summary=msg, ) self._eventService.sendEvent(evt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def ping(self, ctx):\n start = time.time()\n msg = await ctx.send(embed=\n discord.Embed(\n title=\"**Pong!**\",\n colour=discord.Color.green(),\n description=\"Pinging...\"\n )\n )\n end = time.time()\n between = int((end - start)*1000)\n await msg.edit(embed=\n discord.Embed(\n title=\"**Pong!**\",\n colour=discord.Color.green(),\n description=f\"*{between} ms*\"\n )\n )", "async def ping(self, ctx):\n\n t_1 = time.perf_counter()\n await ctx.trigger_typing()\n t_2 = time.perf_counter()\n ping = round((t_2 - t_1) * 1000)\n embed = discord.Embed(color=self.bot.embed_color)\n embed.title = 'Pong! :ping_pong:'\n embed.description = f'That took {ping}ms!'\n await ctx.send(embed=embed)", "async def ping(ctx):\n latencies = {\n \"websocket\": bot.latency,\n }\n\n def comp_message():\n msgs = []\n for title in latencies:\n msgs.append(f\"{title.title()}: {(latencies[title] * 1000):.0f}ms\")\n return '\\n'.join(msgs)\n\n start = time.perf_counter()\n await ctx.respond(comp_message())\n end = time.perf_counter()\n\n latencies[\"round trip\"] = end - start\n\n await ctx.edit(content=comp_message())", "async def ping(self, ctx: commands.Context):\r\n await ctx.send(f'Bot\\' latency is {self.bot.latency*1000:.3f}ms')", "async def ping(self, ctx):\r\n embed = discord.Embed(\r\n title = \"Ping\",\r\n description = \"Pinging...\",\r\n color = Config.MAINCOLOR\r\n )\r\n t1 = time.perf_counter()\r\n msg = await ctx.send(embed = embed)\r\n t2 = time.perf_counter()\r\n embed = discord.Embed(\r\n title = \"🏓 Pong!\",\r\n description = f\"API latency is {round((t2 - t1) * 1000)}ms\\nHost latency is {round(self.bot.latency * 1000, 2)}ms\",\r\n color = Config.MAINCOLOR\r\n )\r\n await msg.edit(embed = embed)", "async def ping(self, ctx):\n\n msg = f\"{(self.bot.ws.latency * 1000):.2f} ms\"\n await ctx.info(f\"Bot Latency: {msg}\")", "async def ping_command(ctx):\n await ctx.send(f\"Current ping is: **{round(ctx.bot.latency, 2)} seconds**\")", "async def ping(self, ctx: commands.Context):\n latency = str(round(self.bot.latency * 1000, 1))\n await ctx.send(\n embed=Embed(title=\"Pong!\", description=f\"{latency}ms\", color=Color.blue())\n )", "async def ping(self, ctx):\n embed = Embed(\n title=\"Pong! Websocket Latency:\",\n description=f\"{self.bot.ws.latency * 1000:.4f} ms\",\n color=self.bot.main_color,\n )\n return await ctx.send(embed=embed)", "def ping(self):\r\n start = time.time()\r\n response = self.get(\"ping\")\r\n duration = (time.time() - start) * 1000\r\n assert response == \"pong\"\r\n return duration", "async def ping(self, ctx):\n pong_msg = await ctx.send(\":ping_pong:\")\n sr_lat = (pong_msg.created_at - ctx.message.created_at).total_seconds() * 1000\n await pong_msg.edit(content=f\"Command latency = `{sr_lat}ms`\\n\"\n f\"API heartbeat = `{self.client.latency * 1000:.1f}ms`\")\n self.logger.info(misolog.format_log(ctx, f\"\"))", "async def ping(ctx):\n em = discord.Embed()\n em.title ='Pong! Websocket Latency:'\n em.description = f\"{bot.ws.latency * 1000:.4f} ms\"\n await ctx.send(embed=em)", "async def ping(self, ctx: MyContext):\n _ = await ctx.get_translate_function()\n\n t_1 = time.perf_counter()\n await ctx.trigger_typing() # tell Discord that the bot is \"typing\", which is a very simple request\n t_2 = time.perf_counter()\n time_delta = round((t_2 - t_1) * 1000) # calculate the time needed to trigger typing\n await ctx.send(_(\"Pong. — Time taken: {milliseconds}ms\",\n milliseconds=time_delta)) # send a message telling the user the calculated ping time", "async def ping(self, ctx : commands.Context) -> None:\n\n embed = Embed(\n title = \"🏓 Pong!\",\n description = f\"Gateway latency is {int(round(self.bot.latency * 1000, 2))}ms.\",\n color = maincolor\n )\n await ctx.send(embed = embed)", "async def ping(self, ctx):\n await ctx.send(f'Pong! {round(self.client.latency * 1000)}ms')", "def ping(self):\n self._write(f'PING :{self.server.name}')\n self.awaiting_pong_since = datetime.datetime.now()", "async def ping_command(self, ctx):\n ping = int(self.client.latency * 1000)\n embed = Embed(\n title=\"Pong!\", description=f\"My ping is {ping}ms.\", color=Color.green()\n )\n await ctx.send(embed=embed)", "async def ping(self, ctx):\n msg_time = ctx.message.created_at\n cur_time = datetime.utcnow()\n delay = (cur_time - msg_time) / timedelta(milliseconds=1)\n await ctx.send(f\"Pong! ({str(delay)} ms)\")", "async def _ping(self, ctx):\n latency = self.bot.latency * 1000\n e = discord.Embed(title=\"Pong.\", color=discord.Color.red())\n e.add_field(name=\"Discord API\", value=f\"```{str(round(latency))} ms```\")\n e.add_field(name=\"Typing\", value=\"```calculating ms```\")\n\n before = time.monotonic()\n message = await ctx.send(embed=e)\n typlatency = (time.monotonic() - before) * 1000\n\n e = discord.Embed(title=\"Pong.\", color=discord.Color.green())\n e.add_field(name=\"Discord API\", value=f\"```py\\n{str(round(latency))} ms```\")\n e.add_field(name=\"Typing\", value=f\"```py\\n{str(round(typlatency))} ms```\")\n\n await message.edit(embed=e)", "async def ping(self, ctx):\n # getting the time that the message that called this command was created at\n message_time = ctx.message.created_at\n # getting the current time and then calculating the difference\n localtime = datetime.datetime.utcnow()\n lag_time = (localtime - message_time) / 2\n # sending a plain message back showing the latency of the command and the websocket\n return await ctx.send(f\"Command Latency: **{lag_time.microseconds/1000}ms**.\"\n f\"\\nWebsocket Latency: **{(ctx.bot.latency*1000):.2f}ms**.\")", "async def ping(self, ctx):\n await ctx.send(f'Pong! {round(self.bot.latency * 1000)}ms')\n await ctx.message.delete(delay=3)", "async def ping(self, ctx: commands.Context) -> None:\n # datetime.datetime objects do not have the \"milliseconds\" attribute.\n # It must be converted to seconds before converting to milliseconds.\n bot_ping = (datetime.utcnow() - ctx.message.created_at).total_seconds() * 1000\n if bot_ping <= 0:\n bot_ping = \"Your clock is out of sync, could not calculate ping.\"\n else:\n bot_ping = f\"{bot_ping:.{ROUND_LATENCY}f} ms\"\n\n # Discord Protocol latency return value is in seconds, must be multiplied by 1000 to get milliseconds.\n discord_ping = f\"{self.bot.latency * 1000:.{ROUND_LATENCY}f} ms\"\n\n embed = Embed(title=\"Pong!\")\n\n for desc, latency in zip(DESCRIPTIONS, [bot_ping, discord_ping]):\n embed.add_field(name=desc, value=latency, inline=False)\n\n await ctx.send(embed=embed)", "def on_tick(self, time):\n pass", "async def ping(self, ctx):\n ms = round(self.bot.latency * 1000)\n await self.send_message(ctx, 'Pong! ({} ms)'.format(ms))", "def onPing(self, payload):\n super().onPing(payload)\n self.log.debug(\"Pinged: {}\".format(payload))\n self.log.debug(self.last_ping)\n self.log.debug(time.time() - self.last_ping)\n # If groupme's \"ping\" takes longer than 32 seconds, reset connection\n if not (not (time.time() - self.last_ping > 32) and not (time.time() - self.start_time > self.timeout)):\n self.sendClose()", "def _ping(self):\n\n self.last_ping = time.time()\n try:\n logger.debug(\"(%s) PING\", self.device[\"ip\"])\n _send_request(self.device, tf.HEART_BEAT)\n except socket.error:\n self.force_reconnect = True", "async def ping(self, ctx):\n m = await ctx.send(\"One moment...\")\n t1 = ctx.message.created_at\n t2 = m.created_at\n rc = (t2 - t1).total_seconds()\n emoji = '☠️' if rc > 50 else ('😭' if rc > 5 else ('😨' if rc > 1 else '👌'))\n await m.edit(content=\"Pong! `{0:.3f}s` {1}\\n\".format(rc, emoji))", "async def ping(ctx):\n latency = bot.latency\n await ctx.send(\"Pong! \" + str(latency))", "async def ping(self, ctx):\n botlatency = round(self.bot.latency * 1000, 3)\n embed = discord.Embed(title = \"Pong!\", description = f\":ping_pong: `{botlatency}ms`\", color = discord.Color.blurple())\n await ctx.send(embed = embed)", "async def ping(self, ctx):\n current_time = int(round(time.time() * 1000))\n m = await say(ctx, \"Pong! ---ms\")\n last_time = int(round(time.time() * 1000))\n last_time -= current_time\n await m.edit(content=\"Pong! {}ms\".format(last_time))" ]
[ "0.6756339", "0.6682347", "0.66572905", "0.6634368", "0.6632559", "0.662904", "0.6613886", "0.6611981", "0.6609368", "0.6565207", "0.65384394", "0.648648", "0.6478771", "0.64708966", "0.6428788", "0.64074665", "0.6401151", "0.6391863", "0.6389115", "0.636785", "0.6364683", "0.63317144", "0.6326958", "0.6325585", "0.6322459", "0.6292618", "0.6244926", "0.62176543", "0.6215375", "0.62096816" ]
0.7109918
0
Send/Clear event to show that correlation is executed properly.
def _correlationExecution(self, ex=None): if ex is None: msg = "correlation executed correctly" severity = _CLEAR else: msg = "correlation did not execute correctly: %s" % ex severity = _CRITICAL evt = dict( device=self.collectorName, eventClass=ZenEventClasses.Status_Ping, eventGroup="Ping", eventKey="correlation_execution", severity=severity, summary=msg, ) self._eventService.sendEvent(evt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def acknowledged(self):\n ...", "def on_response(self, ch, method, props, body):\n if self.corr_id == props.correlation_id:\n self.response = body", "def corr(self):\n pass", "def _set_correlations(self) -> None:\n pass", "def __timer_event(self, event):\n self.__refresh_grid()\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])\n\n # Set status message\n self.SetStatusText(f\"Status updated at {self.__cor.get_last_calculation():%d-%b %H:%M:%S}.\", 1)", "def test_clear_dispatched_events(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n msg = msg_helper.make_ack()\n self._add_to_dispatched(\n worker_helper.broker, 'fooconn.event', msg)\n self.assertNotEqual(\n worker_helper.broker.dispatched['vumi']['fooconn.event'], [])\n worker_helper.clear_dispatched_events('fooconn')\n self.assertEqual(\n worker_helper.broker.dispatched['vumi']['fooconn.event'], [])", "def test_clear_dispatched_events_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(connector_name='fooconn')\n msg = msg_helper.make_ack()\n self._add_to_dispatched(\n worker_helper.broker, 'fooconn.event', msg)\n self.assertNotEqual(\n worker_helper.broker.dispatched['vumi']['fooconn.event'], [])\n worker_helper.clear_dispatched_events()\n self.assertEqual(\n worker_helper.broker.dispatched['vumi']['fooconn.event'], [])", "def after_send(self):", "def trace(self, correlation_id: Optional[str], message: str, *args: Any, **kwargs: Any):\n pass", "def testClear(self):\n \n messager.clear()\n messager.send('a')\n messager.send('b')\n messager.send('c')\n # No methods should have been called.\n self.assertEqual(self.logger.log,[])", "def on_x(self):\r\n self.log()", "def eventReceived(self, event):\n print repr(event)", "def fire(self):\n pass", "def _event_receiver_side_effect(self, **kwargs): # pylint: disable=unused-argument\n self.receiver_called = True", "def _event_receiver_side_effect(self, **kwargs): # pylint: disable=unused-argument\n self.receiver_called = True", "def on_sync(self):\r\n self.log()", "def debug(self, correlation_id: Optional[str], message: str, *args: Any, **kwargs: Any):\n pass", "def signal(self):\n pass", "def connectionMade(self):\n self.factory.debug = False\n self._snd(\"Codigo de operador:\")", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def on_delivered(self, frame):\n pass", "def corr():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n c = dagpype_c.Correlator()\n try:\n while True:\n x, y = (yield)\n c.push(float(x), float(y))\n except GeneratorExit:\n target.send(c.corr())\n target.close()\n return _dagpype_internal_fn_act", "def __clear_history(self, event):\n # Clear the history\n self.__cor.clear_coefficient_history()\n\n # Reload graph if we have a coefficient selected\n self.__log.info(\"History cleared. Reloading graph.\")\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])\n\n # Reload the table\n self.__refresh_grid()", "def on_delivery_confirmation(self, method_frame):\n confirmation_type = method_frame.method.NAME.split('.')[1].lower()\n\n self.logger.info('received %s for %s', confirmation_type, method_frame.method.delivery_tag)\n if confirmation_type == 'ack':\n self._acked += 1\n elif confirmation_type == 'nack':\n self._nacked += 1\n\n self._deliveries.remove(method_frame.method.delivery_tag)\n self.logger.info('published %i messages, %i yet to confirm, %i acked and %i nacked', self._message_number,\n len(self._deliveries), self._acked, self._nacked)\n self.stop()", "def notify(self, correlation_id: Optional[str], args: Parameters):\n raise NotImplementedError('Method from interface definition')", "def fire(self):", "def test__resend(self, mock_send):\n track = self._clean_track()\n fromcall = \"KFART\"\n tocall = \"KHELP\"\n message = \"somthing\"\n msg = messaging.TextMessage(fromcall, tocall, message)\n msg.last_send_attempt = 3\n track.add(msg)\n\n track._resend(msg)\n msg.send.assert_called_with()\n self.assertEqual(0, msg.last_send_attempt)", "def send(self, event, message):\n pass", "def on_R1(self):\r\n self.log()", "def _emited(self, *args):\n\t\tdebug(\"OnEventDeferred : event catched\")\n\t\tself.callback(*args)\n\t\tself._clean()" ]
[ "0.61915857", "0.6143062", "0.61174214", "0.6090135", "0.5885771", "0.5878563", "0.5741486", "0.56712615", "0.5606218", "0.5476562", "0.54708314", "0.5470178", "0.54279435", "0.5422348", "0.5422348", "0.5417193", "0.53912276", "0.5373222", "0.53426033", "0.5299313", "0.5284871", "0.527289", "0.5263194", "0.5234608", "0.5231514", "0.5221826", "0.5218027", "0.5211847", "0.5205535", "0.5204386" ]
0.71676165
0
Send/Clear event to show that nmap is executed properly.
def _nmapExecution(self, ex=None): if ex is None: msg = "nmap executed correctly" severity = _CLEAR else: msg = "nmap did not execute correctly: %s" % ex severity = _CRITICAL evt = dict( device=self.collectorName, eventClass=ZenEventClasses.Status_Ping, eventGroup="Ping", eventKey="nmap_execution", severity=severity, summary=msg, ) self._eventService.sendEvent(evt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_cb(self, msg):\n self.map = Map(msg, dilate=5)\n if VERBOSE:\n rospy.loginfo(\"Map received! Origin: \" + str(msg.info.origin))", "def do_nmap__Auxilary(self, param):\n if self.which(\"nmap\"):\n self.do_shell(\"nmap \" + param)", "def address_mapped_event(self, event):\r\n pass", "def update(self):\n self.send_tf_msg()\n super(Map).update()", "def onMapToolActivated(self, e):\n pass", "def show_map(self):\n self.m1.display()", "def send_noop():\n send_command(0xE3)", "def map_callback(self,msg):\n self.map = np.array(msg.data).reshape((msg.info.height, msg.info.width))\n self.map_info = msg.info", "def memoryMapChanged(self, mem: ghidra.program.database.mem.MemoryMapDB) -> None:\n ...", "def map_event(self, widget, event):\n #self.configure_window(width, height)\n return self.make_callback('map')", "def __init__(self, out):\r\n super(PHPTMAPIMapHandler, self).__init__()\r\n self._out = out\r\n self._counter = 0", "def send_map(self, publisher, map, lm_y, lm_x, rows, columns):\n\n msg = map_communication()\n msg.message_id = self.generateID()\n msg.agent_id = self._agent_name\n msg.map = map\n msg.lm_x = lm_x\n msg.lm_y = lm_y\n msg.rows = rows\n msg.columns = columns\n publisher.publish(msg)", "def __empty_event_handler(self, govee, device, raw_data):\n\n pass", "def run(self):\n self.iface.mapCanvas().setMapTool(self.tool)", "def address_mapped_event(self, event):\r\n output = [event.event_name, event.from_addr, event.to_addr, \r\n time.asctime(event.when)]\r\n plog(\"DEBUG\", \" \".join(output))", "def startTopicMap(self):\r\n super(PHPTMAPIMapHandler, self).startTopicMap()\r\n self._out.write(_HEADER)", "def outConnectEvent(self):\r\n pass", "def onafterresetting(self, event):\n print('onafterresetting; event: %s, %s->%s' % (event.event, event.src, event.dst))", "def after_send(self):", "def costmap_callback(self, msg):\n \n self.metadata = msg.info\n self.w = self.metadata.width\n self.h = self.metadata.height\n self.fix_offset() \n self.costmap = np.array(msg.data).reshape((self.h,self.w))\n self.find_trajectory()", "def onSetRelayOutput(self, event):", "def send(self, event, message):\n pass", "def send(event=None): # event is passed by binders.\r\n msg = my_msg.get()\r\n my_msg.set(\"\") # Clears input field.\r\n client_socket.send(bytes(msg, \"utf8\"))\r\n if msg == \"{quit}\":\r\n client_socket.close()\r\n top.quit()", "def console_request(self, evt, proto):\n if evt.kind == sugar.transport.ServerMsgFactory.TASK_RESPONSE:\n threads.deferToThread(self.on_broadcast_tasks, evt, proto)", "def forget_unicast_address(self):\n self.send_packet('\\xb3')", "def run(self):\n while not self.setup() and self.running:\n pass\n\n while self.running:\n # Create a byte array to receive the computed maps\n mapb = bytearray(self.MAP_SIZE_PIXELS * self.MAP_SIZE_PIXELS)\n\n # Get final map \n self.slam.getmap(mapb)\n try:\n self.connection.send(mapb)\n except socket.error:\n print \"MapServer: Client disconnected\"\n if self.running:\n self.setup()", "def log(self, msg):\n self.xymap.log(msg)", "def sm_output_on(self):\n self.sm.output_on()\n #self.sm_restore_display()", "def etape_n(event):\n etape()", "def onenterready(self, event):\n print('onenterready; event: %s, %s->%s' % (event.event, event.src, event.dst))" ]
[ "0.6076413", "0.60371435", "0.5883904", "0.58613986", "0.5440963", "0.54270107", "0.54088324", "0.5406641", "0.53981125", "0.5395532", "0.53375036", "0.53336084", "0.5309096", "0.5281287", "0.5242296", "0.52364504", "0.52032447", "0.5127933", "0.5124582", "0.51173687", "0.5097094", "0.50951046", "0.50759244", "0.50755316", "0.5066528", "0.50626516", "0.5059352", "0.50553817", "0.50482523", "0.5037911" ]
0.7199875
0
Iterate the daemons task list and find PingTask tasks that are IPV4.
def _getPingTasks(self): tasks = self._daemon._scheduler._tasks pingTasks = {} for configName, task in tasks.iteritems(): if isinstance(task.task, PingTask): if task.task.config.ipVersion == 4: pingTasks[configName] = task.task return pingTasks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)", "def pingMany(self, ipList):\n results = yield executeNmapForIps(sorted(ipList))\n self.log.info(\"Found %s addresses\", len(results))\n if self.log.isEnabledFor(logging.DEBUG):\n self.log.debug(\n \"Addresses found: %s\", \", \".join(a for a in results)\n )\n defer.returnValue(results.values())", "def _batchPing(self):\n # find all devices to Ping\n ipTasks = self._getPingTasks()\n if len(ipTasks) == 0:\n log.debug(\"No ips to ping!\")\n raise StopIteration() # exit this generator\n\n # Make sure the cycle interval is not unreasonably short.\n self._detectCycleInterval()\n\n # only increment if we have tasks to ping\n self._pings += 1\n with tempfile.NamedTemporaryFile(prefix=\"zenping_nmap_\") as tfile:\n ips = []\n for taskName, ipTask in ipTasks.iteritems():\n ips.append(ipTask.config.ip)\n ipTask.resetPingResult() # clear out previous run's results\n ips.sort()\n for ip in ips:\n tfile.write(\"%s\\n\" % ip)\n tfile.flush()\n\n # ping up to self._preferences.pingTries\n tracerouteInterval = self._daemon.options.tracerouteInterval\n\n # determine if traceroute needs to run\n doTraceroute = False\n if tracerouteInterval > 0:\n if self._pings == 0 or (self._pings % tracerouteInterval) == 0:\n doTraceroute = True # try to traceroute on next ping\n\n import time\n\n i = 0\n for attempt in range(0, self._daemon._prefs.pingTries):\n\n start = time.time()\n results = yield executeNmapCmd(\n tfile.name,\n traceroute=doTraceroute,\n num_devices=len(ipTasks),\n dataLength=self._daemon.options.dataLength,\n pingTries=self._daemon._prefs.pingTries,\n pingTimeOut=self._preferences.pingTimeOut,\n pingCycleInterval=self._daemon._prefs.pingCycleInterval,\n )\n elapsed = time.time() - start\n log.debug(\"Nmap execution took %f seconds\", elapsed)\n\n # only do traceroute on the first ping attempt, if at all\n doTraceroute = False\n\n # record the results!\n for taskName, ipTask in ipTasks.iteritems():\n i += 1\n ip = ipTask.config.ip\n if ip in results:\n result = results[ip]\n ipTask.logPingResult(result)\n else:\n # received no result, log as down\n ipTask.logPingResult(PingResult(ip, isUp=False))\n # give time to reactor to send events if necessary\n if i % _SENDEVENT_YIELD_INTERVAL:\n yield twistedTask.deferLater(reactor, 0, lambda: None)\n\n self._cleanupDownCounts()\n dcs = self._down_counts\n delayCount = self._daemon.options.delayCount\n pingTimeOut = self._preferences.pingTimeOut\n for taskName, ipTask in ipTasks.iteritems():\n i += 1\n if ipTask.isUp:\n if taskName in dcs:\n del dcs[taskName]\n log.debug(\"%s is up!\", ipTask.config.ip)\n ipTask.delayedIsUp = True\n ipTask.sendPingUp()\n averageRtt = ipTask.averageRtt()\n if averageRtt is not None:\n if (\n averageRtt / 1000.0 > pingTimeOut\n ): # millisecs to secs\n ipTask.sendPingDegraded(rtt=averageRtt)\n else:\n ipTask.clearPingDegraded(rtt=averageRtt)\n else:\n dcs[taskName] = (dcs[taskName][0] + 1, datetime.now())\n if dcs[taskName][0] > delayCount:\n log.debug(\n \"%s is down, %r\", ipTask.config.ip, ipTask.trace\n )\n ipTask.delayedIsUp = False\n else:\n log.debug(\n \"%s is down. %s ping downs received. \"\n \"Delaying events until more than %s ping \"\n \"downs are received.\",\n ipTask.config.ip,\n dcs[taskName][0],\n delayCount,\n )\n\n ipTask.storeResults()\n # give time to reactor to send events if necessary\n if i % _SENDEVENT_YIELD_INTERVAL:\n yield twistedTask.deferLater(reactor, 0, lambda: None)\n\n if not self.disable_correlator:\n try:\n yield defer.maybeDeferred(self._correlate, ipTasks)\n except Exception as ex:\n self._correlationExecution(ex)\n log.critical(\n \"There was a problem performing correlation: %s\", ex\n )\n else:\n self._correlationExecution() # send clear\n else:\n downTasks = (\n ipTask\n for ipTask in ipTasks.values()\n if not (ipTask.isUp or ipTask.delayedIsUp)\n )\n for ipTask in downTasks:\n ipTask.sendPingDown()\n\n self._nmapExecution()", "def parallel_ping(host_list):\n global __parallel_ping_results\n __parallel_ping_results = {}\n for host in host_list:\n #print host\n t = Thread(target=ping_host, args=([host, True]))\n t.start()\n start_time = time.time()\n while len(__parallel_ping_results.keys()) < len(host_list) \\\n and time.time() - start_time < 30: # 30s timeout\n time.sleep(0.1)\n return __parallel_ping_results", "async def list_tasks():", "def ip_check():\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 '+host+' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)", "def all_hosts(self):\n ...", "def main(self):\n results = []\n for t in Config.APPHOSTS:\n custom_domain = t[\"custom_domain\"]\n heroku_host = t[\"heroku_host\"]\n result = self.ip_update(custom_domain, heroku_host)\n results.append(result)\n return results", "def find_transfer_ip(allocated):\n for ip_addr in ip_network(TRANSFER_NET_IP4).hosts():\n if ip_addr in allocated:\n continue\n\n # create matching ipv6 address\n ip6_addr = ip_address(\"fec0::a:cf:%X:%X\" % (ip_addr.packed[2],\n ip_addr.packed[3]))\n\n if ip6_addr not in allocated:\n yield [str(ip_addr), str(ip6_addr)]", "def test_ping_host4(self, chirouter_runner):\n chirouter_runner.start_mininet(\"3router.json\")\n mn = chirouter_runner.mininet\n\n ping = chirouter_runner.ping(\"host1\", \"10.4.0.42\", count=4)\n\n ping.validate_output_success(num_expected=4, expected_source=\"10.4.0.42\")", "def get_hosts(marathon_url, app_id):\n\n api_endpoint = '/v2/apps/'\n headers = {'Content-Type': 'application/json'}\n url = marathon_url + api_endpoint + app_id\n print(url)\n r = requests.get(url, headers=headers)\n print(r.status_code)\n hosts = []\n for h in r.json()['app']['tasks']:\n hosts.append(h['host'])\n return hosts", "def get_migrating_vms_to_host(self, node_id):\n result = []\n for server_id in self.__migrating_tasks.keys():\n if self.__migrating_tasks[server_id] == node_id:\n result.append(server_id)\n return result", "def running_instances(hostnames=None):\n\n global api\n\n all_inst = []\n try:\n all_inst = api.get_all_instances()\n except Exception, e:\n logging.error(\"Can't get list of instances (maybe wrong credentials?)\")\n return None\n\n # Resolve IPs\n if hostnames is not None:\n ips = []\n for h in hostnames:\n try:\n ipv4 = gethostbyname(h)\n ips.append(ipv4)\n except Exception:\n # Don't add host if IP address could not be found\n logging.warning(\"Ignoring hostname %s: can't reslove IPv4 address\" % h)\n ips=list(set(ips))\n\n if hostnames is not None:\n logging.debug(\"Input hostnames: %s\" % (','.join(hostnames)))\n logging.debug(\"Input IPs: %s\" % (','.join(ips)))\n else:\n logging.debug(\"No input hostnames given\")\n\n # Add only running instances\n inst = []\n for i in all_inst:\n if i.status(token_id=api.keystone.token_id) == 'running':\n if hostnames is None:\n # Append all\n inst.append(i)\n else:\n found = False\n for ipv4 in ips:\n if i.network_ip(network_name=cf[\"api\"][\"network_name\"]) == ipv4:\n inst.append(i)\n logging.debug(\"Found IP %s corresponding to instance\" % ipv4)\n found = True\n break\n if not found:\n logging.warning(\"Cannot find instance %s in the list of known IPs\" % i.network_ip(network_name=cf[\"api\"][\"network_name\"]))\n\n return inst", "def get_ports_services(host):\n services_per_host =[]\n for h in host:\n services = h.findAll(\"service\")\n for service in services:\n port_service = check_if_unicode(service['name'])\n # print port_service\n services_per_host.append(port_service)\n return services_per_host", "def tcp_ping_nodes(self, timeout=20.0):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.check_version_request(timeout)", "def map_int_ext_hosts(self):\n int_hosts = []\n ext_hosts = []\n dp_hosts = {self.dp_name(dp_index): ([], []) for dp_index in range(self.NUM_DPS)}\n for host_id, options in self.host_options.items():\n host = self.host_information[host_id]['host']\n if options.get('loop_protect_external', False):\n ext_hosts.append(host)\n int_or_ext = 1\n else:\n int_hosts.append(host)\n int_or_ext = 0\n for link in self.host_links[host_id]:\n dp_hosts[self.dp_name(link)][int_or_ext].append(host)\n return set(int_hosts), set(ext_hosts), dp_hosts", "def get_hosts(self, target, listener_type):", "def ping_many_iter(self, hosts, *args, **kwargs):\n for state, ip in self.ping_many_updown_iter(hosts, *args, **kwargs):\n if state=='up':\n yield ip", "def discover_interfaces(self, hostips):\n args = [(h, (h,), {}) for h in hostips]\n return ThreadPool(self._discover_interfaces, args)", "def test_ipam_ip_addresses_list(self):\n pass", "def _get_ipv4_addresses(self, host: str) -> Dict[str, List[IPv4Address]]:\n if host == \"self\":\n command = \"show ip address\"\n elif host == \"peer\":\n command = \"failover exec mate show ip address\"\n\n show_ip_address = self.show(command)\n re_ip_addresses = RE_SHOW_IP_ADDRESS.findall(show_ip_address)\n\n results = {\n interface: [IPv4Interface(f\"{address}/{netmask}\")] for interface, address, netmask in re_ip_addresses\n }\n log.debug(\"Host %s: ip interfaces %s\", self.host)\n return results", "def _scan_hosts(self):\n results = []\n for item in glob.glob(self._pattern):\n results.append(item)\n return results", "def _resolve(addresses):\n\n for addr in addresses:\n _, _, ips = socket.gethostbyname_ex(addr)\n for ip in ips:\n yield ip", "def IterateAllPorts(tasklist, func, ctx, include_psets, follow_busyports, should_log):\n global port_iteration_do_print_taskname\n global intransit_idx, taskports_idx, thports_idx, registeredport_idx, excports_idx\n\n ## XXX: also host special ports\n\n entry_port_type_mask = 0x00070000\n if include_psets:\n entry_port_type_mask = 0x000f0000\n\n if tasklist is None:\n tasklist = kern.tasks\n tasklist += kern.terminated_tasks\n\n tidx = 1\n\n for t in tasklist:\n # Write a progress line. Using stderr avoids automatic newline when\n # writing to stdout from lldb. Blank spaces at the end clear out long\n # lines.\n if should_log:\n procname = \"\"\n if not t.active:\n procname = 'terminated: '\n if t.halting:\n procname += 'halting: '\n t_p = Cast(t.bsd_info, 'proc *')\n if unsigned(t_p) != 0:\n procname += str(t_p.p_name)\n elif unsigned(t.task_imp_base) != 0 and hasattr(t.task_imp_base, 'iit_procname'):\n procname += str(t.task_imp_base.iit_procname)\n sys.stderr.write(\" checking {:s} ({}/{})...{:50s}\\r\".format(procname, tidx, len(tasklist), ''))\n tidx += 1\n\n port_iteration_do_print_taskname = True\n space = t.itk_space\n num_entries = int(space.is_table_size)\n is_tableval = space.is_table\n idx = 0\n while idx < num_entries:\n entry_val = GetObjectAtIndexFromArray(is_tableval, idx)\n entry_bits= unsigned(entry_val.ie_bits)\n entry_obj = 0\n entry_str = ''\n entry_name = \"{:x}\".format( (idx << 8 | entry_bits >> 24) )\n\n entry_disp = GetDispositionFromEntryType(entry_bits)\n\n ## If the entry in the table represents a port of some sort,\n ## then make the callback provided\n if int(entry_bits) & entry_port_type_mask:\n eport = Cast(entry_val.ie_object, 'ipc_port_t')\n ## Make the callback\n func(t, space, ctx, idx, entry_val, eport, entry_disp)\n\n ## if the port has pending messages, look through\n ## each message for ports (and recurse)\n if follow_busyports and unsigned(eport) > 0 and eport.ip_messages.data.port.msgcount > 0:\n ## collect all port references from all messages\n kmsgp = Cast(eport.ip_messages.data.port.messages.ikmq_base, 'ipc_kmsg_t')\n kmsgheadp = kmsgp\n while unsigned(kmsgp) > 0:\n p_refs = set()\n CollectKmsgPortRefs(t, eport, kmsgp, p_refs)\n for (port, pdisp, ptype) in p_refs:\n func(t, space, ctx, intransit_idx, None, port, pdisp)\n kmsgp = kmsgp.ikm_next\n if kmsgp == kmsgheadp:\n break\n\n idx = idx + 1\n ## while (idx < num_entries)\n\n ## Task ports (send rights)\n if unsigned(t.itk_sself) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_sself, 17)\n if unsigned(t.itk_host) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_host, 17)\n if unsigned(t.itk_bootstrap) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_bootstrap, 17)\n if unsigned(t.itk_seatbelt) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_seatbelt, 17)\n if unsigned(t.itk_gssd) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_gssd, 17)\n if unsigned(t.itk_debug_control) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_debug_control, 17)\n if unsigned(t.itk_task_access) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_task_access, 17)\n\n ## Task name port (not a send right, just a naked ref)\n if unsigned(t.itk_nself) > 0:\n func(t, space, ctx, taskports_idx, 0,t.itk_nself, 0)\n\n ## task resume port is a receive right to resume the task\n if unsigned(t.itk_resume) > 0:\n func(t, space, ctx, taskports_idx, 0, t.itk_resume, 16)\n\n ## registered task ports (all send rights)\n tr_idx = 0\n tr_max = sizeof(t.itk_registered) / sizeof(t.itk_registered[0])\n while tr_idx < tr_max:\n tport = t.itk_registered[tr_idx]\n if unsigned(tport) > 0:\n try:\n func(t, space, ctx, registeredport_idx, 0, tport, 17)\n except Exception, e:\n print(\"\\texception looking through registered port {:d}/{:d} in {:s}\".format(tr_idx,tr_max,t))\n pass\n tr_idx += 1\n\n ## Task exception ports\n exidx = 0\n exmax = sizeof(t.exc_actions) / sizeof(t.exc_actions[0])\n while exidx < exmax: ## see: osfmk/mach/[arm|i386]/exception.h\n export = t.exc_actions[exidx].port ## send right\n if unsigned(export) > 0:\n try:\n func(t, space, ctx, excports_idx, 0, export, 17)\n except Exception, e:\n print(\"\\texception looking through exception port {:d}/{:d} in {:s}\".format(exidx,exmax,t))\n pass\n exidx += 1\n\n ## XXX: any ports still valid after clearing IPC space?!\n\n for thval in IterateQueue(t.threads, 'thread *', 'task_threads'):\n ## XXX: look at block reason to see if it's in mach_msg_receive - then look at saved state / message\n\n ## Thread port (send right)\n if unsigned(thval.ith_sself) > 0:\n thport = thval.ith_sself\n func(t, space, ctx, thports_idx, 0, thport, 17) ## see: osfmk/mach/message.h\n ## Thread special reply port (send-once right)\n if unsigned(thval.ith_special_reply_port) > 0:\n thport = thval.ith_special_reply_port\n func(t, space, ctx, thports_idx, 0, thport, 18) ## see: osfmk/mach/message.h\n ## Thread voucher port\n if unsigned(thval.ith_voucher) > 0:\n vport = thval.ith_voucher.iv_port\n if unsigned(vport) > 0:\n vdisp = GetDispositionFromVoucherPort(vport)\n func(t, space, ctx, thports_idx, 0, vport, vdisp)\n ## Thread exception ports\n if unsigned(thval.exc_actions) > 0:\n exidx = 0\n while exidx < exmax: ## see: osfmk/mach/[arm|i386]/exception.h\n export = thval.exc_actions[exidx].port ## send right\n if unsigned(export) > 0:\n try:\n func(t, space, ctx, excports_idx, 0, export, 17)\n except Exception, e:\n print(\"\\texception looking through exception port {:d}/{:d} in {:s}\".format(exidx,exmax,t))\n pass\n exidx += 1\n ## XXX: the message on a thread (that's currently being received)\n ## for (thval in t.threads)\n ## for (t in tasklist)", "def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]", "def get_worker_addresses(self) -> List[str]:", "def check_ping(self):\n # Print ping status of all of your hosts, minimum padding of 8 spaces\n padding_size = max(len(max(self.hosts, key=len)) + 4, 8)\n print('{:{padding_size}}{}'.format('Host', 'Status', padding_size=padding_size))\n for host in self.hosts:\n # Get output of ping command\n output = str(Popen('ping -n 1 {}'.format(host), stdout=PIPE).communicate()[0])\n\n result = '{:{padding_size}}'.format(host, padding_size=padding_size)\n if 'unreachable' in output:\n result = result + 'Offline - unreachable'\n self.offline_hosts.append(host)\n elif 'could not find' in output:\n result = result + 'Offline - could not find'\n self.offline_hosts.append(host)\n elif 'transmit failed' in output:\n result = result + 'Offline - transmit failed'\n self.offline_hosts.append(host)\n elif 'timed out' in output:\n result = result + 'Offline - timed out'\n self.offline_hosts.append(host)\n else:\n result = result + 'Online'\n print(result)\n print()", "def get_hosts_ports(marathon_url, app_id):\n\n api_endpoint = '/v2/apps/'\n headers = {'Content-Type': 'application/json'}\n url = marathon_url + api_endpoint + app_id\n print(url)\n r = requests.get(url, headers=headers)\n print(r.status_code)\n hosts_ports = []\n for hp in r.json()['app']['tasks']:\n hosts_ports.append(hp['ports'])\n return hosts_ports", "def icmp_ping(ip_addr, timeout=0.5, count=4):\n is_connect = 1\n\n for i in range(count):\n try:\n delay = ping_once(ip_addr, timeout)\n except socket.gaierror, e:\n print \"failed. (socket error: '%s')\" % e[1]\n if delay == None:\n print 'failed. (timeout within %s second.)' % timeout\n is_connect = 0\n else:\n pass\n result = [ip_addr, round(delay, 4), is_connect]\n return result", "def run(self):\n for req, resp in self.servings:\n resp.check_timeout()" ]
[ "0.6179824", "0.6031999", "0.5868787", "0.58304566", "0.5507003", "0.5498833", "0.545695", "0.5452734", "0.5425952", "0.54163104", "0.5371902", "0.5334249", "0.5294592", "0.52945757", "0.52848583", "0.5277899", "0.5275892", "0.526688", "0.52499354", "0.5246008", "0.52358633", "0.52318966", "0.5227866", "0.520903", "0.5198584", "0.5195668", "0.5188756", "0.5185218", "0.51851565", "0.517452" ]
0.68449944
0
Clear out old down counts so process memory utilization doesn't grow.
def _cleanupDownCounts(self): now = datetime.now() timeout = timedelta(minutes=DOWN_COUNT_TIMEOUT_MINUTES) for taskName, (down_count, last_time) in self._down_counts.iteritems(): if now - last_time > timeout: del self._down_counts[taskName]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def stats_reset(self):\n self.stats.reset()", "def stats_reset(self):\n self.stats.reset()", "def resetCounters(self):\n self.chain.zero_counters()\n counters = self.session.query(Counter).all()\n self.session.query(Counter).delete()", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset_count(self):\n self.count = 0", "def shutdown_instances(self):\r\n self.min_size = 0\r\n self.max_size = 0\r\n self.desired_capacity = 0\r\n self.update()", "def unfork_pc(self):\n self.program_counter.pop()", "def reset_memory_statistics(sender, **kwargs): # pylint: disable=unused-argument\n MemoryUsageData.start_counting()", "def unwatch(self):\n pass", "def clear(self):\n #for counterName in self.counters:\n # del self.counters[counterName]\n self.counters={}\n self.title=None", "def reset(self):\n for s in self.subsystems:\n s.uptime = 1", "def reset_counter(self) -> None:", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def clear_stats(self):\n self._stats = None", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self):\n self.restart()\n self.cycles = 0", "def clear(self):\n self._latencies = [0] * len(BUCKETS)", "def reset(self):\n reset_system_health_series()", "def clear(self):\n self.counts = [{} for _ in range(len(self.counts))]", "def reset(self):\n self._open_activity_count = 0\n self._decisions = []\n self._tasks = TaskRegistry()", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def reset(self):\n self.cumtime = 0\n self.start_time = self.time()", "def collect_garbage(self) -> None:\n pass", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def reset(self):\n for counterKey in self.counters.keys():\n self.counters[counterKey]=0\n self.title=None # 025 This is a hack of a hack. Trying to find if the counter was reset recently." ]
[ "0.66692394", "0.6468958", "0.6468958", "0.64442396", "0.64247274", "0.6397391", "0.6387521", "0.63292354", "0.6271622", "0.62635636", "0.625734", "0.62529767", "0.62223554", "0.6208025", "0.62053263", "0.62046486", "0.62046486", "0.62046486", "0.6183287", "0.6166497", "0.6159864", "0.61547625", "0.61346954", "0.6123868", "0.61120397", "0.60959494", "0.60956717", "0.60919404", "0.6078621", "0.6061706" ]
0.703562
0
The currentlyrunning event loop, if one exists.
def current_event_loop(self): loop = current_loop.get() if loop is None: loop = super().get_event_loop() return loop
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_event_loop():\n try:\n return asyncio.get_running_loop()\n except RuntimeError:\n return asyncio.new_event_loop()", "def getLoop():\n return asyncio.get_event_loop_policy().get_event_loop()", "def get_event_loop(self):\n try:\n task = trio.lowlevel.current_task()\n except RuntimeError:\n pass\n else:\n # Trio context. Note: NOT current_loop.get()! If this is called from\n # asyncio code, current_task() is the trio-asyncio loop runner task,\n # which has the correct loop set in its contextvar; but (on Python\n # 3.7+) our current context is quite possibly something different, and\n # might have the wrong contextvar value (e.g. in the case of a\n # loop1.call_later() in loop2's context).\n return task.context.get(current_loop)\n\n # Not Trio context\n if _faked_policy.policy is not None:\n return _faked_policy.policy.get_event_loop()\n\n # This will return the thread-specific event loop set using\n # set_event_loop(), or if none has been set, will call back into\n # our new_event_loop() to make a SyncTrioEventLoop and set it as\n # this thread's event loop.\n return super().get_event_loop()", "def get_event_loop(*args, **kwargs):\r\n\r\n return get_loop(*args, **kwargs)", "def get_test_event_loop():\n if not _IS_XOS_ASYNC:\n loop = _asynclib.get_event_loop()\n else:\n loop = _get_xos_async_test_event_loop()\n return loop", "def get_event_loop() -> KivyEventLoop:\n return asyncio.get_event_loop()", "def loop(self):\n return self.caller.location.ndb.event_line_loop", "def is_running(self):\n return self._event_loop is not None and self._event_loop.is_running()", "def event_loop(self):\n logging.warning('loop undefined')", "def check_event_loop():\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n asyncio.set_event_loop(asyncio.new_event_loop())", "def loop(self):\n\n return self._loop", "def _get_loop(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Optional[asyncio.AbstractEventLoop]:\n if callable(self.loop_getter):\n if self.loop_getter_need_context:\n return self.loop_getter(*args, **kwargs) # pylint: disable=not-callable\n return self.loop_getter() # pylint: disable=not-callable\n return self.loop_getter", "def _is_running_from_main_thread():\n return tornado.ioloop.IOLoop.current(instance=False)", "def current():\n try:\n THREAD_CONTEXT.current # does it exist?\n except AttributeError:\n if not ROOT_CONTEXT:\n return None\n THREAD_CONTEXT.current = ROOT_CONTEXT\n\n return THREAD_CONTEXT.current", "def _get_event_loop() -> Tuple[asyncio.AbstractEventLoop, bool]:\n try:\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n loop = asyncio.new_event_loop()\n should_close_loop = True\n else:\n should_close_loop = False\n except RuntimeError:\n loop = asyncio.new_event_loop()\n should_close_loop = True\n return loop, should_close_loop", "def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)", "def getInstance():\n if GameLoop.__instance==None:\n GameLoop()\n return GameLoop.__instance", "def engine(self):\n return self._engine_running", "def GetCurrent():\n global ENV\n return ENV[threading.current_thread().ident]", "def current_server():\n if not _current_server:\n create_server()\n return _current_server", "def get_current(cls):\n return cls.get_by_id(eventlet.corolocal.get_ident())", "def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager", "def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager", "def current_worker():\n try:\n return worker_thread_data.worker\n except AttributeError:\n return None", "def getcurrent():\n\n curr = coroutine.getcurrent()\n if curr is _main_coroutine:\n return _main_tasklet\n else:\n return curr", "def load_event_loop():\n while True:\n try:\n async_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(async_loop)\n return async_loop\n except:\n time.sleep(3)", "def set_asyncio_event_loop(event_loop_path: Optional[str]) -> AbstractEventLoop:\n if event_loop_path is not None:\n event_loop_class: Type[AbstractEventLoop] = load_object(event_loop_path)\n event_loop = event_loop_class()\n asyncio.set_event_loop(event_loop)\n else:\n try:\n with catch_warnings():\n # In Python 3.10.9, 3.11.1, 3.12 and 3.13, a DeprecationWarning\n # is emitted about the lack of a current event loop, because in\n # Python 3.14 and later `get_event_loop` will raise a\n # RuntimeError in that event. Because our code is already\n # prepared for that future behavior, we ignore the deprecation\n # warning.\n filterwarnings(\n \"ignore\",\n message=\"There is no current event loop\",\n category=DeprecationWarning,\n )\n event_loop = asyncio.get_event_loop()\n except RuntimeError:\n # `get_event_loop` raises RuntimeError when called with no asyncio\n # event loop yet installed in the following scenarios:\n # - Previsibly on Python 3.14 and later.\n # https://github.com/python/cpython/issues/100160#issuecomment-1345581902\n event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(event_loop)\n return event_loop", "def current_scheduler() -> \"Scheduler\":\n return current_task().scheduler", "def current_window(self):\n return self._impl.get_current_window().interface", "def while_loop(self):\n if self._loop is None:\n self._loop = self._while_loop()\n return self._loop\n return self._loop" ]
[ "0.7723177", "0.75770026", "0.7485644", "0.70032215", "0.68456113", "0.67435944", "0.6675096", "0.64809227", "0.6377319", "0.6373083", "0.63486046", "0.6326653", "0.62989074", "0.6260146", "0.6214039", "0.61226684", "0.60561204", "0.6007589", "0.5903972", "0.5897526", "0.5889749", "0.58769715", "0.58769715", "0.58657026", "0.58428913", "0.57760847", "0.5722399", "0.56969756", "0.5668447", "0.56486845" ]
0.8508764
0
Set the current event loop.
def set_event_loop(self, loop): if _in_trio_context(): current_loop.set(loop) elif _faked_policy.policy is not None: _faked_policy.policy.set_event_loop(loop) else: super().set_event_loop(loop)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_event_loop(self):\n loop = current_loop.get()\n if loop is None:\n loop = super().get_event_loop()\n return loop", "def __init__(self, loop=None):\n object.__setattr__(self, '_loop', loop or get_event_loop())", "def event_loop(self):\n logging.warning('loop undefined')", "def _io_event_loop_thread(self):\r\n io_event_loop = asyncio.get_event_loop_policy().new_event_loop()\r\n asyncio.set_event_loop(io_event_loop)\r\n assert isinstance(io_event_loop, AbstractEventLoop)\r\n self._io_event_loop = io_event_loop\r\n self._event_loop_started.release()\r\n self._io_event_loop.run_forever()", "def set_asyncio_event_loop_policy() -> None:\n _get_asyncio_event_loop_policy()", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def set_asyncio_event_loop(event_loop_path: Optional[str]) -> AbstractEventLoop:\n if event_loop_path is not None:\n event_loop_class: Type[AbstractEventLoop] = load_object(event_loop_path)\n event_loop = event_loop_class()\n asyncio.set_event_loop(event_loop)\n else:\n try:\n with catch_warnings():\n # In Python 3.10.9, 3.11.1, 3.12 and 3.13, a DeprecationWarning\n # is emitted about the lack of a current event loop, because in\n # Python 3.14 and later `get_event_loop` will raise a\n # RuntimeError in that event. Because our code is already\n # prepared for that future behavior, we ignore the deprecation\n # warning.\n filterwarnings(\n \"ignore\",\n message=\"There is no current event loop\",\n category=DeprecationWarning,\n )\n event_loop = asyncio.get_event_loop()\n except RuntimeError:\n # `get_event_loop` raises RuntimeError when called with no asyncio\n # event loop yet installed in the following scenarios:\n # - Previsibly on Python 3.14 and later.\n # https://github.com/python/cpython/issues/100160#issuecomment-1345581902\n event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(event_loop)\n return event_loop", "def get_event_loop(*args, **kwargs):\r\n\r\n return get_loop(*args, **kwargs)", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def set_main(self, main_loop):\n self.main_loop = main_loop", "def start_loop(self, loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()", "def update(self):\n asyncio.set_event_loop(asyncio.new_event_loop())\n self.listen(self.port)\n self.loop = IOLoop.instance()\n self.loop.start()", "def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)", "def startLoop(self):\n if(self.loop is not None):\n raise Exception(\"Event loop is already started!\")\n self.loop = asyncio.new_event_loop()\n self.thread = Thread(target=start_thread_loop, args=(self.loop,))\n self.thread.setDaemon(True)\n self.thread.start()", "def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()", "def update(self):\n asyncio.set_event_loop(asyncio.new_event_loop())\n self.listen(self.port)\n IOLoop.instance().start()", "def setup_test_loop():\n loop = asyncio.get_event_loop()\n # asyncio.set_event_loop(None)\n return loop", "def getLoop():\n return asyncio.get_event_loop_policy().get_event_loop()", "def load_event_loop():\n while True:\n try:\n async_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(async_loop)\n return async_loop\n except:\n time.sleep(3)", "def loop(self) -> AbstractEventLoop:", "def loop(self) -> AbstractEventLoop:", "def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )", "def event_loop(request):\n loop = asyncio.get_event_loop()\n yield loop", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "def SetCurrent(env):\n global ENV\n ENV[threading.current_thread().ident] = env", "def get_event_loop():\n try:\n return asyncio.get_running_loop()\n except RuntimeError:\n return asyncio.new_event_loop()" ]
[ "0.66105914", "0.6508577", "0.6366844", "0.63097066", "0.628712", "0.6116527", "0.6116527", "0.6116527", "0.6116527", "0.6111462", "0.59851605", "0.59842205", "0.5894794", "0.5893572", "0.57664776", "0.5733221", "0.57316995", "0.57202154", "0.56887686", "0.5618949", "0.5550684", "0.5458608", "0.5403928", "0.5401313", "0.5401313", "0.53996855", "0.53969693", "0.53874475", "0.53802496", "0.5373618" ]
0.7112107
0
Add a callback to run when a child process terminates.
def add_child_handler(self, pid, callback, *args): h = self._loop.trio_as_future(self._waitpid, pid, callback, *args) self._callbacks[pid] = h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_close(self, callback):\n self._close_callback = callback", "def shutdown_callback():\n pass", "def set_close_callback( callback ):", "def set_exit_callback(self: Self, func: Callable[[Self], None]) -> None:\n # XXX should this be a property instead?\n assert not inspect.iscoroutinefunction(\n func\n ), \"exit callback may not be a coroutine function\"\n assert callable(func), \"exit callback should be callable\"\n assert (\n self._state.pid is None\n ), \"cannot set exit callback when process already started\"\n self._exit_callback = func", "def register_termination_cb(self, termination_cb, cb_args=[]):\n self._termination_cbs.append((termination_cb, cb_args))", "def set_close_callback(self, callback):\n self._close_callback = stack_context.wrap(callback)", "def on_exit(self, function):\n\t\tself.exit_functions += [function]", "def set_finish_callback( callback ):", "def set_finish_callback( callback ):", "def on_close(self):\n # remove callback so it doesn't get called in the future\n self._notifier.remove_callback(self.process_notification)", "def _on_parent_process_kill(self):", "def on_exit(self) -> OnExitHandler:\n return self._on_exit", "def on_terminate(self):\n pass", "def on_parent_exit(signame):\n signum = getattr(signal, signame)\n def set_parent_exit_signal():\n # http://linux.die.net/man/2/prctl\n result = cdll['libc.so.6'].prctl(PR_SET_PDEATHSIG, signum)\n if result != 0:\n raise PrCtlError('prctl failed with error code %s' % result)\n return set_parent_exit_signal", "def on_del(self, callback):\n self._del_callback = callback if callable(callback) else _void", "def on_del(self, callback):\n self._del_callback = callback if callable(callback) else _void", "def add_on_connection_close_callback(self):\n logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)", "def on_terminate(self, agentName, process):\n self.log.info(\"%s's process with ID: %s has been terminated successfully\" % (agentName, process.pid))", "def add_on_connection_close_callback(self):\n self.logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)", "def add_on_channel_close_callback(self):\n self.logger.info('Adding channel close callback')\n self._channel.add_on_close_callback(self.on_channel_closed)", "def add_on_channel_close_callback(self):\n logger.info('Adding channel close callback')\n self._channel.add_on_close_callback(self.on_channel_closed)", "def remove_child_handler(self, pid):\n h = self._callbacks.pop(pid, None)\n if h is None:\n return False\n h.cancel()\n return True", "def cb_quit(event):\n sys.exit()", "def stop_server(request):\n def stop_callback():\n global process\n process.terminate()\n request.addfinalizer(stop_callback)", "def register_on_stop_callback(self, callback):\n is_already_stopped = False\n self.__condition.acquire()\n if self.__is_running:\n self.__on_stop_callbacks.append(callback)\n else:\n is_already_stopped = True\n self.__condition.release()\n\n # Invoke the callback if we are already stopped.\n if is_already_stopped:\n callback()", "def _on_exit(cls, error=None):\n # type: (Exception) -> None\n pass", "def async_on_close(self, func: CALLBACK_TYPE) -> None:\n self._on_close.append(func)", "def OnShutdown(cls, callback):\n\t\tif cls.SINGLETON is None:\n\t\t\tcls.SINGLETON = Signals()\n\t\tassert not cls.SINGLETON.signalsRegistered, \"OnShutdown must be called before Setup.\"\n\t\tcls.SINGLETON.onShutdown.append(callback)", "def __procFinished(self, exitCode, exitStatus):\n self.__finish()", "def on_detached(reason, crash):\n print(\"on_detached()\")\n print(\"reason:\", reason)\n print(\"crash:\", crash)\n sys.exit()" ]
[ "0.6766719", "0.66032535", "0.65404296", "0.6505321", "0.6328872", "0.624823", "0.6140321", "0.60114604", "0.60114604", "0.60041755", "0.5905687", "0.58477104", "0.58365023", "0.5831817", "0.58230436", "0.58230436", "0.58206904", "0.58144957", "0.580842", "0.5779761", "0.5763262", "0.5732967", "0.570087", "0.5690237", "0.5682938", "0.5680548", "0.5674694", "0.56701195", "0.5654015", "0.5644468" ]
0.6618512
1
Return number of threads for a running PID
def get_pid_threads_count(pid): process = psutil.Process(pid) assert process.is_running(), 'PID %d is not running' % pid threads_count = process.num_threads() return threads_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getThreads():\r\n return multiprocessing.cpu_count()", "def getThreads():\n if sys.platform == 'win32':\n return int(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return int(os.popen('grep -c cores /proc/cpuinfo').read())", "def getNumThreads(cls) -> int:\n return cls.NUMTHREADS", "def _get_threads():\n if sys.platform == 'win32':\n # return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n return 0 # save trouble, do not use multiprocessing on windows\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())", "def nThreads(self):\n return self._c_param.n_threads", "def get_ncpu():\n from multiprocessing import cpu_count\n return cpu_count()", "def cpu_count():\n num_available_cores = multiprocessing.cpu_count()\n return num_available_cores", "def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1", "def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers", "def number_of_workers():\n return (cpu_count() * 2) + 1", "def get_num_threads():\n\n num_cores = os.cpu_count()\n\n # the specific environment variable takes prescedence\n if \"PRA_NUM_THREADS\" in os.environ:\n return int(os.environ[\"PRA_NUM_THREADS\"])\n\n # we also respect OMP and MKL variables\n env_var = [\n \"OMP_NUM_THREADS\",\n \"MKL_NUM_THREADS\",\n ]\n\n all_limits = [int(getattr(os.environ, var, num_cores)) for var in env_var]\n\n return min(all_limits)", "def numcpu () :\n import multiprocessing\n return multiprocessing.cpu_count()", "def _get_num_processors():\n cores = 0\n try:\n cores = len(os.sched_getaffinity(0))\n except AttributeError:\n cores = cpu_count()\n return cores", "def num_workers(self) -> int:\n return sum(self.client.nthreads().values())", "def numRunningProcesses():\n try:\n proc = subprocess.run(\"ps -Af|grep -i \\\"online2-wav-nnet3-latgen-faster\\\"\", stdout=subprocess.PIPE, shell=True)\n np = (len(str(proc.stdout).split(\"\\\\n\")) - 3)\n if(np == None):\n np = 0\n return np\n except Exception as e:\n\t\t Tools.writeException(\"numRunningProcesses\", e)\n return -1", "def get_thread_count(self):\n return self.THREAD_COUNT", "def getThreadCountForNode(hostname,errStream,queue=SGE):\n if queue==SGE:\n return getThreadCountForSGENode(hostname, errStream)\n elif queue==SLURM:\n return getThreadsCountForSLURMNode(hostname, errStream)\n else:\n logging.warning(\"Unrecognized queue (%s), using 8 cores\" % (queue))\n return 8", "def __get_thread_count(self, conf):\n return conf[self.conf_item.get_thread_count()]", "def get_num_parallel_workers():\n return _config.get_num_parallel_workers()", "def numprocesses(self):\n info = self.info()\n return info['max_processes']", "def threads_per_core(self) -> int:\n return pulumi.get(self, \"threads_per_core\")", "def num_processes():\n return 1", "def get_number_executors(self):\n with self.__threads_lock:\n return self.__number_executors", "def cpu_count_cores():\n return cext.cpu_count_cores()", "def threads_per_core(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"threads_per_core\")", "def numRunning(self):\n #with self.__queueLock:\n # The size of the list does not change, only its contents, so I don't\n # think there should be any conflict if we are reading a variable from\n # one thread and updating it on the other thread.\n activeRuns = sum(run is not None for run in self.__running)\n\n return activeRuns", "def num_cores(self):\n return self.cores_per_socket * self.sockets_per_node * self.num_nodes", "def count_cpus():\r\n try:\r\n return multiprocessing.cpu_count()\r\n except Exception:\r\n logging.exception('can not get cpu count from'\r\n ' multiprocessing.cpu_count()')\r\n cpuinfo = get_cpuinfo()\r\n # Returns at least one cpu. Check comment #1 in crosbug.com/p/9582.\r\n return len(cpuinfo) or 1", "def determine_number_of_cpus():\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE)\n sc_stdout = sysctl.communicate()[0]\n res = int(sc_stdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudo_devices = os.listdir('/devices/pseudo/')\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudo_devices:\n if expr.match(pd) is not None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesg_process = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesg_process.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def get_n_workers(self):\n return self.df.worker.nunique()" ]
[ "0.7690271", "0.76181304", "0.7551392", "0.73519766", "0.7266163", "0.7219404", "0.7217407", "0.71540976", "0.71363544", "0.7090084", "0.7077626", "0.7074354", "0.70573896", "0.7002955", "0.7001179", "0.6996043", "0.6958745", "0.69556826", "0.69513303", "0.69494325", "0.69449836", "0.6944116", "0.69225967", "0.68609357", "0.6793971", "0.6788089", "0.6777602", "0.67582065", "0.672642", "0.6706915" ]
0.807942
0
Assert that we can determine whether a dict does not contain a key.
def testDictDoesNotContain(self): self.Check(""" d1 = {"x": 42} if "x" not in d1: print d1["nonsense"] # Dead code else: print d1["x"] d2 = {} if "x" not in d2: pass else: print d2["nonsense"] # Dead code d3 = {__any_object__: __any_object__} if "x" not in d3: print d3["y"] else: print d3["x"] """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dictionary_should_not_contain_key(self,dictionary,key,msg=None):\r\n default = \"Dictionary contains key '%s'\" %key\r\n _verify_condition(not dictionary.has_key(key),default,msg)", "def test_remove_key_not_found(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"Py\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"xxx\")\n\n self.assertEqual(expected, actual)", "def test_escape_no_value_present(self):\r\n testdict = escapeddict.EscapedDict({'key1': 'value1', 'key2': 'value2 ${key_not_present} ${key1}'})\r\n for key in testdict.keys():\r\n print testdict[key]\r\n assert testdict['key1'] == 'value1'\r\n assert testdict['key2'] == 'value2 ${key_not_present} value1'", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def dictionary_should_not_contain_value(self,dictionary,value,msg=None):\r\n default = \"Dictionary contains value '%s'\" %value\r\n _verify_condition(not value in dictionary.values(),default,msg)", "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def test_get_key_not_defined_yet(self):\n storage = SessionStorage()\n\n self.assertNotIn('key1', storage)\n s1 = storage['key1']\n self.assertIn('key1', storage)\n\n self.assertNotIn('key2', storage)\n s2 = storage['key2']\n self.assertIn('key2', storage)\n\n self.assertIsNot(s1, s2)", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n # Test of the case that a dict is not given\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def test_no_such_key():\n test = [{'key': 'val1'}, ['missing']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'missing' in str(t_result.failure())", "def dictionary_should_contain_key(self,dictionary,key,msg=None):\r\n default = \"Dictionary does not contain key'%s'\" %key\r\n _verify_condition(dictionary.has_key(key),default,msg)", "def testDictMaybeContains(self):\n ty = self.Infer(\"\"\"\\\n if __random__:\n x = {\"a\": 1, \"b\": 2}\n else:\n x = {\"b\": 42j}\n if \"a\" in x:\n v1 = x[\"b\"]\n if \"a\" not in x:\n v2 = x[\"b\"]\n \"\"\", deep=False)\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import Dict\n x = ... # type: Dict[str, int or complex]\n v1 = ... # type: int\n v2 = ... # type: complex\n \"\"\")", "def test_validate_non_included_keys():\n field = PartialDictField(included_keys=['a'], value_field=CharField(max_length=5),\n required=False)\n data = {'b': '123456'}\n try:\n field.run_validators(data)\n except ValidationError:\n assert False, 'Got a ValidationError for a non-included key'", "def assert_not_contains(self, result, key):\n result_d = self.type_convert(result)\n content = result_d.get(key)\n if not content:\n pass\n else:\n raise AssertionError(\"Unexpected response, missing param: \", key)", "def test_empty_dict():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test({\"foo\": \"bar\"})", "def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")", "def check(actual_dict, raise_error=True):\r\n missing = set(expected_keys) - set(actual_dict.keys())\r\n if not missing:\r\n return True\r\n if raise_error:\r\n raise InvalidTabsException(\r\n \"Expected keys '{0}' are not present in the given dict: {1}\".format(expected_keys, actual_dict)\r\n )\r\n else:\r\n return False", "def test_empty_dict_by_name():\n\n @type_checked\n def _run_test(thing:dict):\n assert isinstance(thing, dict)\n\n _run_test({\"baz\": True})", "def test_02_is_equal_false(self):\n\n dict1 = {\"a\": \"1\", \"b\": \"2\"}\n dict2 = {\"a\": \"1\", \"b\": \"3\"}\n items_equal = utils.is_equal(dict1, dict2)\n self.assertFalse(items_equal)", "def testDictContains(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" in d1:\n print d1[\"x\"]\n else:\n print d1[\"nonsense\"] # Dead code\n\n d2 = {}\n if \"x\" in d2:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" in d3:\n print d3[\"x\"]\n else:\n print d3[\"y\"]\n \"\"\")", "def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))", "def test_empty_dict_failure():\n\n @type_checked\n def _run_test(thing:{}): pass\n\n with pytest.raises(TypeError):\n _run_test(1)", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None", "def test_issue_74():\n patient = Patient(active=True, address=[])\n assert \"address\" not in patient.dict()\n assert patient.dict(exclude_none=False)[\"address\"] == []", "def testNotExistingPath(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f['data'] = 1\n\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=1):\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='raise')", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def check_keys_in_dict(dictionary, keys):\n if not all(key in dictionary for key in keys):\n raise KeyError(\"Dictionary missing key values.\"\n \"Requires: {}\".format(keys))\n return True", "def isKeyEmpty(k):\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False", "def provoke_and_handle_KeyError():\n test_dict = {}\n try:\n print(test_dict['to life'])\n except KeyError as ke:\n print(f\"Sorry! The key '{ke}' does not exist in test_dict!\")", "def _validate_input_dict(self, input):\n if isinstance(input, dict):\n required = {\"type\", \"value\"}\n not_found = required - set(input.keys())\n if not_found:\n raise SpecificationError(\n \"Required key(s) not found in input dictionary: {}\".format(\n \", \".join(not_found)\n )\n )\n else:\n raise Exception(\"input element has to be a dictionary\")" ]
[ "0.81113243", "0.7270285", "0.7054258", "0.7022218", "0.6932946", "0.69100094", "0.6858473", "0.6853848", "0.67975736", "0.6752067", "0.67427343", "0.6726948", "0.67186964", "0.6715598", "0.65655386", "0.6539576", "0.653925", "0.65203315", "0.64888024", "0.6468318", "0.6338368", "0.6324628", "0.63189787", "0.6312476", "0.6304728", "0.62963545", "0.62824726", "0.6275681", "0.6222436", "0.6175656" ]
0.767277
1
Decorator that clears context values when objects or attributes are modified.
def objects_attributes_change(f): def _f(*args): clear_cxt_vars(args[0]) f(*args) return _f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invalidate(self, context):\n self.dictionary = None", "def clean(_context):", "def keep_attributes(context, *attributes):\n attr_to_value = preserve_attributes(context, attributes)\n yield\n restore_attributes(context, attr_to_value)", "def keep_request_data(context):\n attr_to_value = preserve_attributes(\n context, [k for k, _ in default_request_data()])\n yield\n restore_attributes(context, attr_to_value)", "def context_reset(self):\n self._context_state = None\n logging.info('Resetting the context')", "def reset(self):\n\n self.type = None\n self.additional_context = \"\"\n super().reset()", "def clear_attrs(self):\n self._attributes.clear()", "def reset(self, *_):\n with self._context.lock:\n super().reset()\n self.__context_init()", "def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)", "def _clear_context():\n for var in [x for x in __context__ if x.startswith(\"lxc.\")]:\n log.trace(\"Clearing __context__['%s']\", var)\n __context__.pop(var, None)", "def __clear_context(self):\n self.calling_view = None\n self.calling_view_index = []\n self.calling_view_is_empty = False\n\n self.current_view = None\n self.current_history_entry = None", "def clear_cxt_vars(cxt):\n if hasattr(cxt, '_cl'):\n del cxt._cl\n if hasattr(cxt, '_pairs'):\n del cxt._pairs", "def clean(self, context):\n pass", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def clear_attributes(self):\n self.attrs = etad.AttributeContainer()", "def clear(self, *args, **kwargs): # real signature unknown\n pass", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def _reset(self):\n [delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]", "def _updated_context(self, **updates: CFNode) -> Generator[None, None, None]:\n context = self._context\n original_items = {\n label: context[label] for label in updates if label in context\n }\n try:\n self._context.update(updates)\n yield\n finally:\n for label in updates:\n if label in original_items:\n context[label] = original_items.pop(label)\n else:\n del context[label]", "def _reset(self, *args):\n self.dirty = True", "def clean(self):\n all_props = self.__class__.CONFIG_PROPERTIES\n for prop_name in self._modified:\n attr_config = all_props.get(prop_name)\n if attr_config and attr_config.input_func:\n self._config[prop_name] = attr_config.input_func(self._config[prop_name])\n self._modified.clear()", "def del_functions(self, *args):\n if len(args) > 0:\n attrs = args\n else:\n self._user_function.clear()", "def clear(self) :\n self.__dict__ = {}", "def clearProperties(*args):", "def clearProperties(*args):" ]
[ "0.659579", "0.6377247", "0.63353455", "0.6282862", "0.6166172", "0.60900927", "0.60400164", "0.6011574", "0.6009515", "0.59849083", "0.5958491", "0.58819735", "0.58578616", "0.5849856", "0.5849856", "0.5849856", "0.5849856", "0.576469", "0.573563", "0.57189983", "0.5696667", "0.5681012", "0.5678527", "0.566822", "0.5661147", "0.56428087", "0.55765194", "0.55449706", "0.5526796", "0.5526796" ]
0.70711803
0
Create a context from cross table and list of objects, list of attributes cross_table the list of bool lists objects the list of objects attributes the list of attributes
def __init__(self, cross_table=None, objects=None, attributes=None): # if not (isinstance(cross_table, list) and # all(isinstance(i, list) for i in cross_table)): # try: # cross_table = [list(i) for i in cross_table] # except: # raise NotTableException(cross_table) if len(cross_table) != len(objects): raise ValueError("Number of objects (=%i) and number of cross table" " rows(=%i) must agree" % (len(objects), len(cross_table))) elif (len(cross_table) != 0) and len(cross_table[0]) != len(attributes): raise ValueError("Number of attributes (=%i) and number of cross table" " columns (=%i) must agree" % (len(attributes), len(cross_table[0]))) _attributes = attributes[:] if len(set(attributes)) < len(attributes): for att in _attributes: if _attributes.count(att) > 1: indices = [i for i, x in enumerate(_attributes) if x == att] for i in indices: _attributes[i] = str(att) + '_{}'.format(i) message = "Not unique name of attribute '{}', ".format(att) message += "renamed to '{}_n', n \in {}".format(att, indices) module_logger.info(message) _objects = objects[:] if len(set(objects)) < len(objects): for obj in _objects: if _objects.count(obj) > 1: indices = [i for i, x in enumerate(_objects) if x == obj] for i in indices: _objects[i] = str(obj) + '_{}'.format(i) message = "Not unique name of object '{}', ".format(obj) message += "renamed to '{}_n', n \in {}".format(obj, indices) module_logger.info(message) self._objects = _objects self._attributes = _attributes self.np_table = np.array(cross_table, dtype=bool) self.cross_table = self.np_table.tolist() self.object_indices = {obj: ind for ind, obj in enumerate(_objects)} self.attribute_indices = {att: ind for ind, att in enumerate(_attributes)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transpose(self):\n new_objects = self.attributes[:]\n new_attributes = self.objects[:]\n new_cross_table = []\n for j in range(len(self.attributes)):\n line = []\n for i in range(len(self.objects)):\n line.append(self.table[i][j])\n new_cross_table.append(line)\n return Context(new_cross_table, new_objects, new_attributes)", "def clarify_objects(self): \n dict_cxt = dict(list(zip(list(map(tuple, self)), self.objects)))\n table = list(map(list, list(dict_cxt.keys())))\n objects = list(dict_cxt.values())\n return Context(table, objects, self.attributes)", "def __init__(self, object_list, table_name, crowdcontext):\n self.cc = crowdcontext\n self.data = {'id': range(len(object_list)), 'object':object_list}\n self.start_id = len(object_list)\n self.cols = [\"id\", \"object\"]\n self.table_name = table_name\n self.presenter = None\n self.project_id = None\n self.project_short_name = None\n self.project_name = None\n\n if type(object_list) is not list:\n raise Exception(\"'object_list' should be a list\")\n if table_name not in self.cc.show_tables():\n try:\n exe_str = \"CREATE TABLE '%s' (id integer, col_name BLOB, value BLOB DEFAULT NULL, PRIMARY KEY(id, col_name))\" %(table_name)\n self.cc.cursor.execute(exe_str)\n except sqlite3.OperationalError:\n raise", "def compound(self):\n complementary_cxt = self.complementary() \n compound_table = [self.table[i] + complementary_cxt.table[i]\n for i in range(len(self.objects))]\n return Context(compound_table,\n self.objects,\n self.attributes + complementary_cxt.attributes)", "def complementary(self):\n complementary_attributes = ['not ' + self.attributes[i]\n for i in range(len(self.attributes))]\n complementary_table = []\n for i in range(len(self.objects)):\n complementary_table.append([not self.table[i][j]\n for j in range(len(self.attributes))])\n return Context(complementary_table, self.objects, complementary_attributes)", "def make_random_context(num_obj, num_att, d):\n obj_ls = ['g' + str(x) for x in range(num_obj)]\n att_ls = ['m' + str(x) for x in range(num_att)]\n table = [[int(d > random.random())\n for _ in range(num_att)]\n for _ in range(num_obj)]\n return Context(table, obj_ls, att_ls)", "def get_context_data(self, **kwargs):\r\n context = super(SingleTableMixin, self).get_context_data(**kwargs)\r\n table = self.get_table()\r\n context[self.get_context_table_name(table)] = table\r\n return context", "def __init__(self, *args):\n _snap.TTableContext_swiginit(self, _snap.new_TTableContext(*args))", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def extract_subcontext_filtered_by_attributes(self, attributes_names,\n mode=\"and\"):\n values = dict( [(attribute, True) for attribute in attributes_names] )\n object_names, subtable = \\\n self._extract_subtable_by_attribute_values(values, mode)\n return Context(subtable,\n object_names,\n self.attributes)", "def _from_composite(self, name: str, context: Any) -> Any:\n attr_name = self.attribute_name\n comp_data = {}\n attribs = set(self.stash_by_attribute.keys())\n if self.load_keys is not None:\n attribs = attribs & self.load_keys\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'load attribs: {attribs}')\n for stash in self.stash_by_group.values():\n if len(stash.group & attribs) > 0:\n data = stash.load(name)\n logger.debug(f'loaded: {data}')\n if data is None:\n raise PersistableError(\n f'Missing composite data for id: {name}, ' +\n f'stash: {stash.group}, path: {stash.path}, ' +\n f'attribute: \\'{attr_name}\\'')\n if self.load_keys is None:\n comp_data.update(data)\n else:\n for k in set(data.keys()) & attribs:\n comp_data[k] = data[k]\n if context is not None:\n ordered_data = collections.OrderedDict()\n for k in context:\n if k in comp_data:\n ordered_data[k] = comp_data[k]\n comp_data = ordered_data\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'comp_data: {comp_data}')\n return comp_data", "def test_attributes_set_from_passed_values(self):\n\n expected_attributes = {\n \"columns\": [\"a\", \"b\", \"c\"],\n \"copy\": False,\n \"verbose\": True,\n }\n\n x = BaseTransformer(**expected_attributes)\n\n h.test_object_attributes(\n obj=x,\n expected_attributes=expected_attributes,\n msg=\"Attributes set in init from passed values\",\n )", "def make_context():\n\n context = [\"https://www.w3.org/ns/activitystreams#\",\n \"http://mementoweb.org/test-ns#\"]\n temp = {}\n temp[\"tracker\"] = \"http://tracker.mementoweb.org/ns#\"\n temp[\"prov\"] = \"http://www.w3.org/ns/prov#\"\n temp[\"schema\"] = \"https://schema.org/\"\n temp[\"prov:used\"] = {\n \"@type\": \"@id\",\n \"@container\": \"@set\"\n }\n temp[\"prov:wasInformedBy\"] = {\n \"@type\": \"@id\",\n \"@container\": \"@set\"}\n temp[\"prov:wasGeneratedBy\"] = {\"@type\": \"@id\"}\n temp[\"prov:softwareAgent\"] = {\"@type\": \"@id\"}\n temp[\"prov:generatedAtTime\"] = {\n \"@type\": \"http://www.w3.org/2001/XMLSchema#dateTime\"}\n context.append(temp)\n return context", "def make_context(source, frmat='table'):\n return Context.fromstring(source, frmat=frmat)", "def composite(self, t):\n if len(t) == 1:\n return t[0] # metadata and values - already processed\n\n key_token = t[0][0]\n attribute_dicts = t[1]\n\n if not isinstance(attribute_dicts, list):\n # always handle a list of attributes\n attribute_dicts = [attribute_dicts]\n\n key_name = self.key_name(key_token)\n composite_dict = CaseInsensitiveOrderedDict(CaseInsensitiveOrderedDict)\n composite_dict[\"__type__\"] = key_name\n\n if self.include_position:\n pd = self.create_position_dict(key_token, None)\n composite_dict[\"__position__\"] = pd\n\n if self.include_comments:\n comments_dict = composite_dict[\"__comments__\"] = OrderedDict()\n\n for d in attribute_dicts:\n keys = d.keys()\n if \"__type__\" in keys:\n k = d[\"__type__\"]\n if k in SINGLETON_COMPOSITE_NAMES:\n composite_dict[k] = d\n else:\n plural_key = self.plural(k)\n if plural_key not in composite_dict:\n composite_dict[plural_key] = []\n\n composite_dict[plural_key].append(d)\n else:\n # simple attribute\n pos = d.pop(\"__position__\")\n d.pop(\"__tokens__\", None) # tokens are no longer needed now we have the positions\n comments = d.pop(\"__comments__\", None)\n\n key_name = self.get_single_key(d)\n\n if key_name == \"config\":\n # there may be several config dicts - one for each setting\n if key_name not in composite_dict:\n # create an initial OrderedDict\n composite_dict[key_name] = CaseInsensitiveOrderedDict(CaseInsensitiveOrderedDict)\n # populate the existing config dict\n cfg_dict = composite_dict[key_name]\n cfg_dict.update(d[key_name])\n\n if self.include_position:\n if key_name not in pd:\n pd[key_name] = OrderedDict()\n\n subkey_name = self.get_single_key(d[key_name])\n pd[key_name][subkey_name] = pos\n\n elif key_name == \"points\":\n if key_name not in composite_dict:\n composite_dict[key_name] = d[key_name]\n else:\n # if points are already in a feature then\n # allow for multipart features in a nested list\n existing_points = composite_dict[key_name]\n\n def depth(L):\n return isinstance(L, (tuple, list)) and max(map(depth, L)) + 1\n\n if depth(existing_points) == 2:\n composite_dict[key_name] = [existing_points]\n\n if key_name not in composite_dict:\n composite_dict[key_name] = []\n composite_dict[key_name].append(d[key_name])\n\n if self.include_position:\n if key_name not in pd:\n pd[key_name] = pos\n else:\n existing_pos = pd[key_name]\n if isinstance(existing_pos, dict):\n pd[key_name] = [existing_pos]\n pd[key_name].append(pos)\n\n elif key_name in REPEATED_KEYS:\n if key_name not in composite_dict:\n composite_dict[key_name] = []\n\n composite_dict[key_name].append(d[key_name])\n\n if self.include_position:\n if key_name not in pd:\n pd[key_name] = []\n pd[key_name].append(pos)\n\n else:\n assert len(d.items()) == 1\n if self.include_position:\n # hoist position details to composite\n pd[key_name] = pos\n if self.include_comments and comments:\n # hoist comments to composite\n comments_dict[key_name] = comments\n\n composite_dict[key_name] = d[key_name]\n\n return composite_dict", "def polyCreaseCtx(*args, createSet: AnyStr=\"\", exists: bool=True, extendSelection: bool=True,\n image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3:\n Union[AnyStr, bool]=\"\", relative: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def filldf(df,features,CrossMethod):\n for i in CrossMethod.keys():\n for j in features:\n if i in j:\n p = j[1:-1].split(i)\n df[j] = CrossMethod[i](df[p[0]],df[p[1]])\n return df", "def build_context_target(data):\n\n\tcontexts = list()\n\ttargets = list()\n\n\tfor i in range(2, len(data) - 2):\n\n\t\tcntx = ' '.join([data[i-2], data[i-1], data[i+1], data[i+2]])\n\t\ttgt = data[i]\n\n\t\tcontexts.append(cntx)\n\t\ttargets.append(tgt)\n\n\tc2t = pd.DataFrame({'context' : contexts, 'targets':targets})\n\n\treturn c2t", "def __init__(self, tree):\n connector = db\n if isinstance(tree, list):\n for collection in tree:\n connector = connector[collection]\n else:\n connector = connector[tree]\n self.table = connector\n self.attributs = {}", "def _createContext(instance, args, kwargs, settings):\n context = kwargs.copy()\n args = list(args)\n context.update({name:getattr(instance, name, None) for name in settings.get('context', [])})\n context.update({key:args.pop(0) for key in settings.get('argsTokwargs', [])})\n return context", "def __init__(self, attck_obj = None, **kwargs):\n\n super(AttckTools, self).__init__(**kwargs)\n self.attck_obj = attck_obj\n\n self.id = self._set_id(kwargs)\n self.name = self._set_attribute(kwargs, 'name')\n self.alias = self._set_attribute(kwargs, 'aliases')\n self.description = self._set_attribute(kwargs, 'description')\n self.reference = self._set_reference(kwargs)\n self.created = self._set_attribute(kwargs, 'created')\n self.modified = self._set_attribute(kwargs, 'modified')\n self.stix = self._set_attribute(kwargs, 'id')\n self.type = self._set_attribute(kwargs, 'type')\n self.wiki = self._set_wiki(kwargs)\n self.contributor = self._set_attribute(kwargs, 'contributor')\n\n self.set_relationships(self.attck_obj)\n\n if AttckTools.__ATTCK_C2_DATASETS is None or AttckTools.__ATTCK_TOOLS_DATASETS is None:\n try:\n data = AttckDatasets().generated_attck_data()\n except:\n raise GeneratedDatasetException('Unable to retrieve generated attack data properties')\n if AttckTools.__ATTCK_C2_DATASETS is None:\n if 'c2_data' in data:\n AttckTools.__ATTCK_C2_DATASETS = data['c2_data']\n if AttckTools.__ATTCK_TOOLS_DATASETS is None:\n if 'tools' in data:\n AttckTools.__ATTCK_TOOLS_DATASETS = data['tools']\n\n self.c2_data = self.__get_c2_dataset()\n self.external_dataset = self.__get_tools_dataset()", "def _build_context(cls, context=None, vocab=None, base=None, language=None):\n VOCAB, BASE, LANGUAGE = \"@vocab\", \"@base\", \"@language\"\n if context is None:\n context = {}\n if vocab:\n context[VOCAB] = str(vocab)\n else:\n for x in cls.__mro__:\n if hasattr(x, \"vocab\"):\n v = x.vocab()\n if v:\n context[VOCAB] = v\n break\n\n if base:\n context[BASE] = str(base)\n\n if language:\n context[LANGUAGE] = language\n if context:\n cls.__annotations__.update(context)", "def make_shell_context():\n\n context = dict(app=app, db=db)\n for class_ in [FormData, CaseData, CaseIndex, Synclog, OwnershipCleanlinessFlag]:\n context[class_.__name__] = class_\n return context", "def get_contexts(config, vary_fast_math=False):\n\n class CtxCreator:\n def __init__(self, api, pnum, dnum, fast_math=None):\n platform = api.get_platforms()[pnum]\n device = platform.get_devices()[dnum]\n\n fm_suffix = {True:\",fm\", False:\",nofm\", None:\"\"}[fast_math]\n self.device_id = api.API_ID + \",\" + str(pnum) + \",\" + str(dnum)\n self.platform_name = platform.name\n self.device_name = device.name\n self.id = self.device_id + fm_suffix\n\n kwds = dict(device=device)\n if fast_math is not None:\n kwds['fast_math'] = fast_math\n\n self.create = lambda: api.Context.create(**kwds)\n\n def __call__(self):\n return self.create()\n\n def __str__(self):\n return self.id\n\n apis, _ = get_apis(config)\n\n if vary_fast_math:\n fm = config.option.fast_math\n fms = dict(both=[False, True], no=[False], yes=[True])[fm]\n else:\n fms = [None]\n\n include_devices = config.option.device_include_mask\n exclude_devices = config.option.device_exclude_mask\n include_platforms = config.option.platform_include_mask\n exclude_platforms = config.option.platform_exclude_mask\n\n def name_matches_masks(name, includes, excludes):\n if len(includes) > 0:\n for include in includes:\n if re.search(include, name):\n break\n else:\n return False\n\n if len(excludes) > 0:\n for exclude in excludes:\n if re.search(exclude, name):\n return False\n\n return True\n\n ccs = []\n seen_devices = set()\n for api in apis:\n for pnum, platform in enumerate(api.get_platforms()):\n\n seen_devices.clear()\n\n if not name_matches_masks(platform.name, include_platforms, exclude_platforms):\n continue\n\n for dnum, device in enumerate(platform.get_devices()):\n if not name_matches_masks(device.name, include_devices, exclude_devices):\n continue\n\n if (not config.option.include_duplicate_devices and\n device.name in seen_devices):\n continue\n\n seen_devices.add(device.name)\n\n for fm in fms:\n ccs.append(CtxCreator(api, pnum, dnum, fast_math=fm))\n\n return ccs, [str(cc) for cc in ccs]", "def __init__(self, priors, ids, row_priors=None): \n\n # mappings from feature ids to indexes\n self.rows = dict()\n # mapping from indexes to feature ids\n self.row_names = tuple(ids)\n # array and dict with row priors\n self.row_priors = [1.0] * len(ids)\n self.feature_priors = dict()\n for index, feature in enumerate(self.row_names):\n self.rows[feature] = index\n if row_priors is not None:\n self.row_priors[index] = row_priors[feature]\n self.feature_priors[feature] = row_priors[feature]\n else:\n self.feature_priors[feature] = 1.0\n \n # mappings from reference ids to indexes, reverse, and priors\n self.columns = dict()\n self.column_names = [None] * len(priors)\n self.column_priors = [1.0] * len(priors)\n self.reference_priors = priors.copy()\n for refname in priors.keys():\n index = len(self.columns)\n self.columns[refname] = index\n self.column_names[index] = refname\n self.column_priors[index] = priors[refname]\n \n # data store as nested arrays\n # first index is reference index, second is feature index \n self.data = [None] * len(self.columns)\n for _ in range(len(self.columns)):\n self.data[_] = [0.0] * len(self.rows)\n \n # map to ontology parents\n self.parents = None\n \n # cache for finding positive parents during FP inference calculations\n self.cache = dict()\n self.temp = Counter()", "def extract_subcontext(self, attribute_names):\n return Context(self._extract_subtable(attribute_names),\n self.objects,\n attribute_names)", "def make_context(self, args, **kwargs):\n #The following headers will be available from Auth filter:\n #'X-Tenant-Id', 'X-Tenant-Name', 'X-User-Id',\n #'X-User-Name', 'X-Roles'\n context_params = {'auth_tok' : args.headers['X-Auth-Token'],\n 'user' : args.headers['X-User-Id'],\n 'tenant' : args.headers['X-Tenant-Id'] }\n\n LOG.debug(\"Building context with params: %s\" % context_params)\n \n return ReddwarfContext(**context_params)", "def mkcontext(self,\n context= [],\n contextobj=None):\n if contextobj == None:\n raise ValueError, \"mkcontext: contextobj is None\"\n return jsoncall.do_call(\"mkcontext\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'contextobj':contextobj.__dict__},\n self.connection)", "def context(self) -> CONTEXT:", "def create_context_from_data_product(rdf_class, entity_file: Dict[str, Any], onto, export_onto_url: str, PREFIX='pot', VERSION=1.1) -> Dict[str, Any]:\n corresponds = {\n \"DataProductContext\": \"DataProductContext\",\n \"DataProductOutput\": \"DataProductOutput\",\n \"DataProductParameters\": \"DataProductParameters\",\n \"DataProductParameters\": \"DataProductParameters\",\n \"WeatherForecastDataProductContext\": \"Weather\",\n \"WeatherForecastDataProductOutput\": \"Weather\",\n \"WeatherForecastDataProductParameters\": \"Weather\",\n \"ForecastDataProductContext\": \"Forecast\",\n \"ForecastDataProductOutput\": \"Forecast\",\n \"ForecastDataProductParameters\": \"Forecast\",\n \"LtifDataProductContext\": \"Ltif\",\n \"LtifDataProductOutput\": \"Ltif\",\n \"LtifDataProductParameters\": \"Ltif\",\n \"SensorDataProductContext\": \"Sensor\",\n \"SensorDataProductOutput\": \"Sensor\",\n \"SensorDataProductParameters\": \"Sensor\",\n \"AccuWeatherForecastDataProductContext\": \"AccuWeather\",\n \"AccuWeatherForecastDataProductOutput\": \"AccuWeather\",\n \"AccuWeatherForecastDataProductParameters\": \"AccuWeather\",\n \"DocumentDataProductContext\": \"Document\",\n \"DocumentDataProductOutput\": \"Document\",\n \"DocumentDataProductParameters\": \"Document\",\n \"DocumentSigningDataProductContext\": \"Signing\",\n \"DocumentSigningDataProductOutput\": \"Signing\",\n \"DocumentSigningDataProductParameters\": \"Signing\",\n \"SignSpaceDataProductContext\": \"SignSpace\",\n \"SignSpaceDataProductOutput\": \"SignSpace\",\n \"SignSpaceDataProductParameters\": \"SignSpace\",\n \"PriceForecastDataProductContext\": \"Price\",\n \"PriceForecastDataProductOutput\": \"Price\",\n \"PriceForecastDataProductParameters\": \"Price\",\n \"ElectricityPriceForecastDataProductContext\": \"Electricity\",\n \"ElectricityPriceForecastDataProductOutput\": \"Electricity\",\n \"ElectricityPriceForecastDataProductParameters\": \"Electricity\"\n }\n\n if entity_file.get('id') not in ('DataProductContext', 'DataProductOutput', 'DataProductParameters'):\n entity_name = entity_file.get('id').split('/')\n new_path = []\n for e in entity_name:\n new_path.append(corresponds[e])\n entity_file['dir'], entity_file['filename'], entity_file['id'] = '/'.join(new_path[:-1]), f'{new_path[-1]}.jsonld', '/'.join(new_path) \n \n context_template = {\n '@version': VERSION,\n rdf_class.entity.name: {\"@id\": f'pot:{rdf_class.entity.name}'},\n '@schema': f\"{export_onto_url}Schema/{entity_file.get('id')}\",\n f'{PREFIX}': {\n '@id': f'{export_onto_url}Vocabulary/',\n '@prefix': True\n }\n }\n\n # Hard Code for now\n context_template[\"productCode\"] = {\"@id\": \"pot:productCode\"}\n context_template[\"timestamp\"] = {\"@id\": \"pot:timestamp\"}\n context_template[\"parameters\"] = {\"@id\": \"pot:parameters\"}\n\n # Define and fill propeties for each supported attribute\n total_attributes = build_attributes(rdf_class, onto)\n for rdf_attribute in total_attributes:\n attribute_properties = dict()\n attribute_properties['@id'] = f'{PREFIX}:{str(rdf_attribute).split(\".\")[1]}'\n attribute_properties['@nest'] = \"parameters\"\n\n context_template[rdf_attribute.name] = attribute_properties\n\n for dependent in rdf_class.entity.subclasses():\n context_template[dependent.name] = {\n 'rdfs:subClassOf': {\n '@id': f'{PREFIX}:{class_get_full_id(dependent).replace(f\"/{dependent.name}\", \"\")}'\n }\n }\n\n context_wrapper = {'@context': context_template}\n return context_wrapper" ]
[ "0.6420432", "0.62756455", "0.58896726", "0.57891214", "0.55413866", "0.5389268", "0.53224635", "0.5273347", "0.5262823", "0.49912634", "0.49382988", "0.49220228", "0.4912022", "0.49018574", "0.4884419", "0.4881911", "0.48671058", "0.48665777", "0.47704384", "0.47696188", "0.4765696", "0.47587678", "0.47441188", "0.47315368", "0.47157907", "0.47095826", "0.4707805", "0.46505058", "0.46426257", "0.46226135" ]
0.65482605
0
Return a set of corresponding attributes for row with index i.
def get_object_intent_by_index(self, i): obj_row = self.np_table[i, :] att_inds = obj_row.nonzero()[0] atts = [self.attributes[j] for j in att_inds] return set(atts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attr(self, index):\n return self.row[0, index]", "def iter_attributes(self):\n return iteritems(self.schema)", "def get_attribute_extent_by_index(self, j):\n att_col = self.np_table[:, j]\n obj_inds = att_col.nonzero()[0]\n objs = [self.objects[j] for j in obj_inds]\n return set(objs)", "def attributes(self):\n return { k: getattr(self, k) for k in self.__class__.columns().keys() }", "def getRow(self, i):\n return self.data[:,i]", "def iterrows(self):\n return (self.Row(*row_vals) for row_vals in izip(*self.columns))", "def iterall(self):\r\n return (column for name, column in self.iteritems())", "def _attributesFromRow(self, attributeList):\n for setAttribute, setValue in attributeList:\n setColumn = self.__attrmap__[setAttribute]\n if setColumn.model.type.name == \"timestamp\" and setValue is not None:\n setValue = parseSQLTimestamp(setValue)\n setattr(self, setAttribute, setValue)", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def row(self, i):\n return Vector(self.data[i], False)", "def column(self, i):\n return [self.data[ self.columns * row + (i - 1)] for row in range(self.rows)]", "def items(self):\r\n for column in self.table.columns:\r\n yield (column, self[column.name])", "def GetRowList(self, i):\n return _table.Table_GetRowList(self, i)", "def __iter__(self):\r\n for column_id in self._columns.keys():\r\n yield column_id", "def attributes(self) -> typing.Iterator[typing.Tuple[str]]:\n minimize = self.lattice._context._minimize(self._extent, self._intent)\n return (i.members() for i in minimize)", "def getAttrValue(self, ex):\n attrValue = {}\n for i in range(len(ex[0])):\n attrValue[i] = list(set([v for v in ex[:, i]]))\n return attrValue", "def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;", "def get_attr_cols(self):\n all_cols = np.arange(self.col_count)\n attr_cols = np.setdiff1d(all_cols, self.time_cols)\n return attr_cols", "def get_index(attribute, attributes):\n for i in range(14):\n if attribute == attributes[i]:\n return i", "def get_attributes(self):\n attrs = list()\n syms = list()\n for item in self.gradual_items:\n gi = item.as_integer()\n attrs.append(gi[0])\n syms.append(gi[1])\n return attrs, syms", "def row (self, i):\n return Vector(self._m[i])", "def get_elements_w_same_attributes(dataset):\n\n # Get the keys from the first attribute\n first_att = set(dataset.first().keys())\n return dataset.filter(lambda line: same_att(first_att, line))", "def get_attributes(self):\n return self.attributes", "def getAttributes(self):\n return self.attributes", "def getAttributes(self):\n return self.attributes", "def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)", "def get_attrs(self):\n attrs = []\n for attribute in self.__dict__.keys():\n attrs.append(attribute)", "def iteritems(self):\r\n for name in self.table.sequence:\r\n if name not in self.table.exclude:\r\n yield (name, self.columns[name])", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def get_attributes(file_name, res):\r\n # Open file in read mode to get indexes of target and replace\r\n with open(file_name + \".txt\") as f:\r\n content = f.readline()\r\n target = res[0].split()[1][:-1]\r\n target2 = res[0].split()[2]\r\n content = content.split()\r\n # Placement of column to update\r\n index1 = content.index(target) // 2\r\n index2 = content.index(target2) // 2\r\n indexes = {index1, index2}\r\n attributes = attributes_picker(index1, index2)\r\n return attributes, indexes" ]
[ "0.62024844", "0.5906992", "0.58736366", "0.57972324", "0.5740616", "0.573088", "0.5686372", "0.5554735", "0.55490464", "0.5513947", "0.5484823", "0.5460635", "0.5427901", "0.5394885", "0.5335074", "0.5317278", "0.5288471", "0.52869004", "0.52812207", "0.5275876", "0.5255269", "0.52471834", "0.5244954", "0.5242744", "0.5242744", "0.5234498", "0.523141", "0.5231313", "0.52235925", "0.52153313" ]
0.7309502
0
Compute the set of all attributes shared by given objects. Objects are specified by indices.
def oprime_inds(self, obj_inds): if type(obj_inds) == set: obj_inds = list(obj_inds) try: common_intent = self.np_table[obj_inds[0], :].copy() except IndexError: return set(range(len(self.attributes))) else: for obj_ind in obj_inds[1:]: common_intent &= self.np_table[obj_ind, :] return common_intent.nonzero()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_intent_by_index(self, i):\n obj_row = self.np_table[i, :]\n att_inds = obj_row.nonzero()[0]\n atts = [self.attributes[j] for j in att_inds]\n return set(atts)", "def partial_align(*objects, **kwargs):\n join = kwargs.pop('join', 'inner')\n copy = kwargs.pop('copy', True)\n exclude = kwargs.pop('exclude', set())\n if kwargs:\n raise TypeError('align() got unexpected keyword arguments: %s'\n % list(kwargs))\n\n joined_indexes = _join_indexes(join, objects, exclude=exclude)\n return tuple(obj.reindex(copy=copy, **joined_indexes) for obj in objects)", "def align(*objects, **kwargs):\n join = kwargs.pop('join', 'inner')\n copy = kwargs.pop('copy', True)\n if kwargs:\n raise TypeError('align() got unexpected keyword arguments: %s'\n % list(kwargs))\n\n joined_indexes = _join_indexes(join, objects)\n return tuple(obj.reindex(copy=copy, **joined_indexes) for obj in objects)", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def aprime_inds(self, att_inds):\n if type(att_inds) == set:\n att_inds = list(att_inds)\n try:\n common_extent = self.np_table[:, att_inds[0]].copy()\n except IndexError:\n return set(range(len(self.objects)))\n else:\n for att_ind in att_inds[1:]:\n common_extent &= self.np_table[:, att_ind]\n return common_extent.nonzero()[0]", "def merge(cls, objects, groups=None, overwrite_error=True):\n inserted = set()\n merged_data = cls()\n for i, obj in enumerate(objects):\n merged_data.attrs.update(**obj.attrs)\n for var in obj.vars(deep=True):\n if overwrite_error and var in inserted:\n raise KeyError(\"The variable '{}' occurred multiple \"\n \"times!\".format(var))\n else:\n if groups is not None:\n if groups[i] not in merged_data:\n merged_data[groups[i]] = cls()\n merged_data[groups[i]][var] = obj[var]\n else:\n merged_data[var] = obj[var]\n\n return merged_data", "def select(tobjects, indices):\n # if indices are outside the range of `tobjects`, the bins are set to zero\n # all `yields` and `efficiencies` must have the same binning\n # `yields` and `efficiencies` must have the same length\n assert len(tobjects) > 0\n assert len(tobjects[0]) == len(indices)\n # all `tobjects` must have the same number of bins\n assert all([len(_tobj) == len(tobjects[0]) for _tobj in tobjects[1:]])\n\n _new_tobject = _ROOTObjectFunctions._project_or_clone(tobjects[0])\n\n for _i_bin, (_bin_proxy, _obj_idx) in enumerate(zip(_new_tobject, indices)):\n # range check\n if _obj_idx.value >= 0 and _obj_idx.value < len(tobjects):\n _bin_proxy.value = tobjects[int(_obj_idx.value)][_i_bin].value\n _bin_proxy.error = tobjects[int(_obj_idx.value)][_i_bin].error\n else:\n _bin_proxy.value = 0\n _bin_proxy.error = 0\n\n return _new_tobject", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def index_object(idxs=None):", "def generate_object_attribute_values(self):\n values = [{} for _ in xrange(self.batch_size)]\n for name in self.randomized_attributes:\n #get the Object indices\n attr = self.world.object_attributes[name]\n indices = self._get_object_attribute_indices(attr)\n \n num_objects = np.max(indices) + 1\n \n #construct the base value array\n attr_values = np.empty((num_objects, attr.ndim), dtype=attr.dtype)\n \n for value in values:\n attr_values[indices, :] = attr._get_random_values(indices)\n \n value[name] = copy(attr_values)\n \n return values", "def get_objects(self, object_ids):\n requests = []\n for object_id in object_ids:\n req = { \"indexName\" : self.index_name, \"objectID\": object_id}\n requests.append(req)\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"POST\", \"/1/indexes/*/objects\", self.client.timeout, { \"requests\" : requests});", "def unique(objects):\n uniques = []\n for obj in objects:\n if obj not in uniques:\n uniques.append(obj)\n return uniques", "def union(self, *objects):\n roots = [self[x] for x in objects]\n # Find the heaviest root according to its weight.\n heaviest = max(roots, key=lambda r: self.weights[r])\n for r in roots:\n if r != heaviest:\n self.weights[heaviest] += self.weights[r]\n self.parents[r] = heaviest", "def overlaps(*objs):\n return set.intersection(*(set(range(*extent(obj))) for obj in objs))", "def check_for_indices(self, indices: Iterable[int]) -> Set[int]:\n cursor = self.mongo_database.cache.find(\n {\"session_id\": self.session_id, \"sample_identifier\": {\"$in\": indices}},\n {\"_id\": 0, \"sample_identifier\": 1},\n )\n return {i[\"sample_identifier\"] for i in cursor}", "def MatchObjectAttributes(target_ids, source_id=None):\n id = rhutil.coerceguid(target_ids, False)\n if id: target_ids = [id]\n source_attr = Rhino.DocObjects.ObjectAttributes()\n if source_id:\n source = rhutil.coercerhinoobject(source_id, True, True)\n source_attr = source.Attributes.Duplicate()\n rc = 0\n for id in target_ids:\n id = rhutil.coerceguid(id, True)\n if scriptcontext.doc.Objects.ModifyAttributes(id, source_attr, True):\n rc += 1\n if rc: scriptcontext.doc.Views.Redraw()\n return rc", "def _compute_commonindex(self, index):\n # Shorten the computations with direct access to raw object\n hist = self._hist\n\n # Support dict access\n if hasattr(index, \"items\"):\n indexes = [slice(None)] * hist.rank()\n for k, v in index.items():\n indexes[k] = v\n\n # Normalize -> h[i] == h[i,]\n else:\n if not isinstance(index, tuple):\n index = (index,)\n # Now a list\n indexes = _expand_ellipsis(index, hist.rank())\n\n if len(indexes) != hist.rank():\n raise IndexError(\"Wrong number of indices for histogram\")\n\n # Allow [bh.loc(...)] to work\n for i in range(len(indexes)):\n # Support sum and rebin directly\n if indexes[i] is sum or hasattr(indexes[i], \"factor\"):\n indexes[i] = slice(None, None, indexes[i])\n # General locators\n elif callable(indexes[i]):\n indexes[i] = indexes[i](self.axes[i])\n elif hasattr(indexes[i], \"__index__\"):\n if abs(indexes[i]) >= hist.axis(i).size:\n raise IndexError(\"histogram index is out of range\")\n indexes[i] %= hist.axis(i).size\n\n return indexes", "def ensure_indicies(self):\n # Search indicies for materials\n self.materials.ensure_index(self.materials.key)\n self.materials.ensure_index(self.materials.last_updated_field)\n\n # Search indicies for elasticity\n self.elasticity.ensure_index(self.elasticity.key)\n self.elasticity.ensure_index(self.elasticity.last_updated_field)\n\n # Search indicies for substrates\n self.substrates.ensure_index(self.substrates.key)\n self.substrates.ensure_index(self.substrates.last_updated_field)", "def retrieve(self, indices):\n obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []\n for i in indices:\n if i == 0:\n i = 1\n data = self._storage[i]\n obs_t, action, reward, obs_tp1, done = data\n obses_t.append(np.array(obs_t, copy=False))\n actions.append(np.array(action, copy=False))\n rewards.append(reward)\n obses_tp1.append(np.array(obs_tp1, copy=False))\n dones.append(done)\n return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)", "def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)", "def remove_objects(self, indexes):\n fields = [\n \"object_position\",\n \"object_velocity\",\n \"object_radius\",\n \"object_rotation\",\n \"object_type\",\n \"object_steps\",\n ]\n for field in fields:\n setattr(\n self,\n field,\n [x for i, x in enumerate(getattr(self, field)) if i not in indexes],\n )", "def get_attribute_extent_by_index(self, j):\n att_col = self.np_table[:, j]\n obj_inds = att_col.nonzero()[0]\n objs = [self.objects[j] for j in obj_inds]\n return set(objs)", "def indexes_to_objects(self, index_vector: np.ndarray) -> Sequence[Any]:\n return [self.idx_to_obj[idx] for idx in index_vector if idx in self.idx_to_obj]", "def common_entries(*dcts):\n if not dcts:\n return\n for i in set(dcts[0]).intersection(*dcts[1:]):\n yield (i,) + tuple(d[i] for d in dcts)", "def identify_member_sets(index):\n\n if index is None:\n return []\n queue = [index]\n ans = []\n while queue:\n s = queue.pop(0)\n if not isinstance(s, _SetProduct):\n ans.append(s)\n else:\n queue.extend(s.set_tuple)\n return ans", "def iterate_with_selected_objects(\n analysis_objects: Mapping[_T_Key, _T_Analysis], **selections: Any\n) -> Iterator[Tuple[_T_Key, _T_Analysis]]:\n for key_index, obj in analysis_objects.items():\n # If selections is empty, we return every object. If it's not empty, then we only want to return\n # objects which are selected in through the selections.\n selected_obj = not selections or all(\n [getattr(key_index, selector) == selected_value for selector, selected_value in selections.items()]\n )\n\n if selected_obj:\n yield key_index, obj", "def unique_objs(objs: List[object], unique_attrs: List[str]) -> List:\n\n seen_obj_footprints = set()\n unique = []\n footprint_func = attrgetter(*unique_attrs)\n for obj in objs:\n obj_footprint = footprint_func(obj)\n if obj_footprint in seen_obj_footprints:\n continue\n\n seen_obj_footprints.add(obj_footprint)\n unique.append(obj)\n return unique", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n yield (obj, obj.matrix_world.copy())\n\n if obj.instance_type != 'NONE':\n obj.dupli_list_create(scene)\n for dob in obj.dupli_list:\n obj_dupli = dob.object\n if obj_dupli.type == 'MESH':\n yield (obj_dupli, dob.matrix.copy())\n\n obj.dupli_list_clear()" ]
[ "0.588733", "0.55038106", "0.5487051", "0.53599745", "0.53599745", "0.53055453", "0.5282523", "0.52661836", "0.5225572", "0.5225572", "0.5223952", "0.5203928", "0.51347214", "0.5112523", "0.50871557", "0.50589556", "0.50493234", "0.50442564", "0.5041548", "0.50312746", "0.50258136", "0.49984175", "0.49953502", "0.49938348", "0.49850455", "0.49822414", "0.4976673", "0.4967084", "0.4947456", "0.4941089" ]
0.61166066
0
Return new context with transposed crosstable
def transpose(self): new_objects = self.attributes[:] new_attributes = self.objects[:] new_cross_table = [] for j in range(len(self.attributes)): line = [] for i in range(len(self.objects)): line.append(self.table[i][j]) new_cross_table.append(line) return Context(new_cross_table, new_objects, new_attributes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crosstab(self, x, y=[], w=None, f=None, ci='counts', base='auto', stats=False,\n sig_level=None, rules=False, decimals=1, xtotal=False,\n painted=True, text_key=None):\n def _rounding(x, dec):\n try:\n return np.round(x, decimals=dec)\n except:\n return x\n #######################################################################\n # prepare stack\n #######################################################################\n if isinstance(f, str) or not f:\n idx = self.manifest_filter(f)\n else:\n idx = self.take(f)\n data = self._data.copy().loc[idx]\n stack = qp.Stack(name='ct', add_data={'ct': (data, self._meta)})\n if xtotal or not y:\n y = ['@'] + self.unroll(y)\n else:\n y = self.unroll(y)\n test_y = [yk for yk in y if yk != '@']\n views = ['cbase']\n for i in ci:\n if not i in ['counts', 'c%']:\n raise ValueError(\"Provides only counts and c%\")\n else:\n views.append(i)\n # for the sig-tests to be calculatd, we need counts even though\n # they haven't been requested\n if ci == ['c%'] and sig_level:\n views.append('counts')\n if base == 'unweighted' and w is not None:\n views.remove('cbase')\n stack.add_link('ct', x=x, y=y, views=views, weights=w)\n # include unweighted base in stack\n if w is not None and base in ['both', 'unweighted']:\n stack.add_link('ct', x=x, y=y, views=['cbase'], weights=None) \n if stats:\n stats = ['mean', 'median', 'stddev', 'lower_q', 'upper_q']\n options = {\n 'stats': '',\n 'axis': 'x'}\n view = qp.ViewMapper()\n view.make_template('descriptives')\n for stat in stats:\n options['stats'] = stat\n view.add_method('stat', kwargs=options)\n stack.add_link('ct', x=x, y=y, views=view, weights=w)\n if sig_level and test_y:\n view = qp.ViewMapper().make_template(\n method='coltests',\n iterators={\n 'metric': ['props', 'means'],\n 'mimic': ['Dim'],\n 'level': sig_level})\n view.add_method(\n 'significance',\n kwargs = {\n 'flag_bases': [30, 100],\n 'test_total': None,\n 'groups': 'Tests'})\n stack.add_link('ct', x=x, y=y, views=view, weights=w)\n #######################################################################\n # prepare ViewManager\n #######################################################################\n vm = qp.ViewManager(stack)\n if ci == ['counts']:\n cellitems = 'counts'\n elif ci == ['c%']:\n cellitems = 'colpct'\n elif 'counts' in ci and 'c%' in ci:\n cellitems = 'counts_colpct'\n vm.get_views(\n data_key='ct',\n filter_key='no_filter',\n weight=w,\n freqs=True,\n stats=stats,\n tests=sig_level,\n cell_items=cellitems,\n bases=base)\n vm.set_bases(base, False, False, base)\n #######################################################################\n # prepare ChainManager\n #######################################################################\n cm = ChainManager(stack)\n cm.get(\n data_key = 'ct',\n filter_key = 'no_filter',\n x_keys = x,\n y_keys = y,\n views = vm.views,\n orient = 'x',\n prioritize = True,\n rules = rules,\n rules_weight = w or '',\n folder = 'ct')\n\n if painted:\n if text_key is not None:\n cm.paint_all(totalize=True, text_key=text_key)\n else:\n cm.paint_all(totalize=True)\n\n dfs = []\n for chain in cm['ct']:\n df = chain.dataframe\n dfs.append(df)\n all_df = pd.concat(dfs)\n for c in df.columns:\n all_df[c] = all_df[c].apply(lambda x: _rounding(x, decimals))\n return all_df", "def build_context_target(data):\n\n\tcontexts = list()\n\ttargets = list()\n\n\tfor i in range(2, len(data) - 2):\n\n\t\tcntx = ' '.join([data[i-2], data[i-1], data[i+1], data[i+2]])\n\t\ttgt = data[i]\n\n\t\tcontexts.append(cntx)\n\t\ttargets.append(tgt)\n\n\tc2t = pd.DataFrame({'context' : contexts, 'targets':targets})\n\n\treturn c2t", "def cTable(self, full=False, expand=False, factor=False, simplify=False):\n self._.c = rewriteTuple(self._.c, expand=expand, factor=factor,\n simplify=simplify)\n return tuple(self._.c) if full else self._.c[1:]", "def compound(self):\n complementary_cxt = self.complementary() \n compound_table = [self.table[i] + complementary_cxt.table[i]\n for i in range(len(self.objects))]\n return Context(compound_table,\n self.objects,\n self.attributes + complementary_cxt.attributes)", "def T(self):\n return Op('transpose', self)", "def long_to_gctx(df):\n df = df[[\"rid\", \"cid\", \"value\"]].pivot(index=\"rid\", columns=\"cid\", values=\"value\")\n gct = GCToo(df)\n\n # Ensure index is string\n gct.row_metadata_df.index = gct.row_metadata_df.index.astype(\"str\")\n gct.data_df.index = gct.data_df.index.astype(\"str\")\n\n return gct", "def transpose():", "def to_context_mat( iterable, context=FloatContext ):\n to_float = context.from_int\n return [[to_float(x) for x in row] for row in iterable]", "def transpose(self) -> None:\n ...", "def transpose(self):\n pass", "def transp(self, x1, x2, d):\n raise NotImplementedError", "def transpose(self):\n return self.conjugate()", "def triu_matrix(matrix, kind='upper'):\r\n matrix_ = pd.DataFrame(matrix)\r\n index = matrix_.index\r\n columns = matrix_.columns\r\n values = matrix_.values\r\n if kind == 'upper': \r\n index_lower = np.tril_indices(matrix_.shape[0], -1)\r\n values[index_lower] = values.T[index_lower]\r\n elif kind == 'lower':\r\n index_upper = np.triu_indices(matrix_.shape[0], 1)\r\n values[index_upper] = values.T[index_upper]\r\n return CategoryCov(pd.DataFrame(values, index=index, columns=columns))", "def get_context(x, w=2, normalize=True):\n\n # check if context exists\n# if os.path.isfile('contextdata.npy'):\n# print('loading context data from file')\n# return np.load('contextdata.npy')\n#\n input_dim = x.shape\n\n if normalize:\n x = np.reshape(x, [input_dim[0]*input_dim[1], input_dim[2]]) # for ease of normalization\n x = sklearn.preprocessing.normalize(x, norm='l2', axis=1)\n x = np.reshape(x, [input_dim[0], input_dim[1], input_dim[2]])\n\n # padding\n p = Context.pad(x, w)\n\n # extract context\n c = Context.slide(p, w)\n\n# np.save('contextdata.npy', c)\n\n return c", "def T(self):\n return F.Transpose.apply(self)", "def test_roundtrip_from_transpose2(self):\n transposed_array = np.array([[0, 1, 2], [2, 1, 0]]).T\n assert_array_equal(transposed_array, carray(transposed_array, dtype=transposed_array.dtype))", "def make_context(source, frmat='table'):\n return Context.fromstring(source, frmat=frmat)", "def crossover(v1, v2):\n idx1 = np.random.choice(v1.size, size=int(v1.size/2))\n idx2 = np.random.choice(v2.size, size=int(v2.size/2))\n data = np.array([v1.data[i] for i in idx1] +\n [v2.data[i] for i in idx2])\n idx = np.array([v1.indices[i] for i in idx1] +\n [v2.indices[i] for i in idx2])\n v3 = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),\n shape=v1.shape)\n return v3", "def Transpose(self):\n return _hypre.HypreParMatrix_Transpose(self)", "def test_roundtrip_from_transpose1(self):\n transposed_array = np.array([[0, 1, 2], [2, 1, 0]]).T\n assert_array_equal(transposed_array, carray(transposed_array, dtype=None))", "def _create_transpose(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('perm', op.perm),\n ])\n return node", "def _create_transpose(cls, onnx_node, inputs, opset_version):\n shape = inputs[0].shape\n perm = onnx_node.getattr(\"perm\", list(range(len(shape) - 1, -1, -1)))\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(perm)", "def transpose(self):\n return self._new(self.rep.transpose(), (self.cols, self.rows), self.domain)", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def getTransposeMatrix(self) -> CMatrix4:\n ...", "def cross_tabulation(actual, classified):\n crosstab = np.zeros((np.unique(actual).size, np.unique(actual).size))\n for i in range(0, actual.size):\n crosstab[classified[i]-1][actual[i]-1] += 1\n \n total = 0\n diagonal = 0\n for r in range(0, crosstab.shape[0]):\n for c in range(0, crosstab.shape[1]):\n total += crosstab[r][c]\n if (r == c):\n diagonal += crosstab[r][c]\n print(\"The overall accuracy is: \" + str(diagonal / total * 100) + \"%\")\n \n return crosstab", "def transpose(self, p=(1, 0)):\n res = self.empty_like()\n for k, v in self.sects.items():\n kt = tuple(map(k.__getitem__, p))\n res.sects[kt] = v.transpose(p)\n res.shape = list(map(self.shape.__getitem__, p))\n res.qhape = list(map(self.qhape.__getitem__, p))\n res.dirs = list(map(self.dirs.__getitem__, p))\n return res", "def enrich_context(self, ctx: Context) -> Context:\n new_ctx = Context(ctx.expressions[:], ctx.namespace)\n for _ in range(self.expression_levels):\n new_ctx.extend(list(self.properties(new_ctx)))\n new_ctx.extend(list(self.unary_ops(new_ctx)))\n new_ctx.extend(list(self.binary_ops(new_ctx)))\n new_ctx.extend(list(self.calls(new_ctx)))\n new_ctx.extend(list(self.comparisons(new_ctx)))\n new_ctx.extend(list(self.bool_ops(new_ctx)))\n return new_ctx", "def test_Contingency_table(observation, forecast, category_edges, dim, type):\n if \"ds\" in type:\n name = \"var\"\n observation = observation.to_dataset(name=name)\n forecast = forecast.to_dataset(name=name)\n if \"chunked\" in type:\n observation = observation.chunk()\n forecast = forecast.chunk()\n cont_table = Contingency(\n observation, forecast, category_edges, category_edges, dim=dim\n )\n assert cont_table", "def clarify_objects(self): \n dict_cxt = dict(list(zip(list(map(tuple, self)), self.objects)))\n table = list(map(list, list(dict_cxt.keys())))\n objects = list(dict_cxt.values())\n return Context(table, objects, self.attributes)" ]
[ "0.55264497", "0.54790974", "0.53508955", "0.5332192", "0.53310543", "0.5275063", "0.5258353", "0.5240064", "0.52142644", "0.51650935", "0.507828", "0.49838346", "0.4972646", "0.4959765", "0.4957016", "0.494776", "0.49308422", "0.49294916", "0.49254048", "0.49244452", "0.49107364", "0.4877301", "0.48710215", "0.48637757", "0.48471677", "0.4845139", "0.48344404", "0.48321977", "0.4811064", "0.48051065" ]
0.7405647
0
Create a subcontext with such objects that have given attributes
def extract_subcontext_filtered_by_attributes(self, attributes_names, mode="and"): values = dict( [(attribute, True) for attribute in attributes_names] ) object_names, subtable = \ self._extract_subtable_by_attribute_values(values, mode) return Context(subtable, object_names, self.attributes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_subcontext(self, attribute_names):\n return Context(self._extract_subtable(attribute_names),\n self.objects,\n attribute_names)", "def _createContext(instance, args, kwargs, settings):\n context = kwargs.copy()\n args = list(args)\n context.update({name:getattr(instance, name, None) for name in settings.get('context', [])})\n context.update({key:args.pop(0) for key in settings.get('argsTokwargs', [])})\n return context", "def clarify_objects(self): \n dict_cxt = dict(list(zip(list(map(tuple, self)), self.objects)))\n table = list(map(list, list(dict_cxt.keys())))\n objects = list(dict_cxt.values())\n return Context(table, objects, self.attributes)", "def __new__(cls, *args, **kwargs):\n ores = super(BaseDataObject, cls).__new__(cls)\n if cls.context is not None:\n ores.context = cls.context\n ores.add_contextualization(cls.context, ores)\n res = ores\n else:\n ores.context = None\n res = ores\n\n return res", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def context(self) -> CONTEXT:", "def make_context(self, info_name, args, parent=None, **extra):\n # log.info(term.blue('MAKE CONTEXT'))\n for key, value in click._compat.iteritems(self.context_settings):\n if key not in extra:\n extra[key] = value\n ctx = Context(self, info_name=info_name, parent=parent, **extra)\n with ctx.scope(cleanup=False):\n self.parse_args(ctx, args)\n return ctx", "def create_context(cls):\n pass", "def prepare_context(self, activity, context, typename=None):\n context.update({\n 'activity': activity,\n 'object': activity.snapshot,\n 'typename': typename,\n })\n return context", "def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def new_context(self):\n return dict()", "def make_context(self, args, **kwargs):\n #The following headers will be available from Auth filter:\n #'X-Tenant-Id', 'X-Tenant-Name', 'X-User-Id',\n #'X-User-Name', 'X-Roles'\n context_params = {'auth_tok' : args.headers['X-Auth-Token'],\n 'user' : args.headers['X-User-Id'],\n 'tenant' : args.headers['X-Tenant-Id'] }\n\n LOG.debug(\"Building context with params: %s\" % context_params)\n \n return ReddwarfContext(**context_params)", "def _make_context():\n\n return {\n 'app': app,\n 'db': db,\n 'User': User\n }", "def make_context():\n\n context = [\"https://www.w3.org/ns/activitystreams#\",\n \"http://mementoweb.org/test-ns#\"]\n temp = {}\n temp[\"tracker\"] = \"http://tracker.mementoweb.org/ns#\"\n temp[\"prov\"] = \"http://www.w3.org/ns/prov#\"\n temp[\"schema\"] = \"https://schema.org/\"\n temp[\"prov:used\"] = {\n \"@type\": \"@id\",\n \"@container\": \"@set\"\n }\n temp[\"prov:wasInformedBy\"] = {\n \"@type\": \"@id\",\n \"@container\": \"@set\"}\n temp[\"prov:wasGeneratedBy\"] = {\"@type\": \"@id\"}\n temp[\"prov:softwareAgent\"] = {\"@type\": \"@id\"}\n temp[\"prov:generatedAtTime\"] = {\n \"@type\": \"http://www.w3.org/2001/XMLSchema#dateTime\"}\n context.append(temp)\n return context", "def make_contexts(self, sites, rupture):\n sites, dctx = self.filter(sites, rupture)\n for param in self.REQUIRES_DISTANCES - set([self.filter_distance]):\n setattr(dctx, param, get_distances(rupture, sites, param))\n self.add_rup_params(rupture)\n # NB: returning a SitesContext make sures that the GSIM cannot\n # access site parameters different from the ones declared\n sctx = SitesContext(sites, self.REQUIRES_SITES_PARAMETERS)\n return sctx, dctx", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def _make_context():\n return {'app': app,\n 'db': db,\n 'User': User\n }", "def __init__(self):\n self._context = {}", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def trait_context ( self ):\n return { 'object': self }", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def make_shell_context():\n\n context = dict(app=app, db=db)\n for class_ in [FormData, CaseData, CaseIndex, Synclog, OwnershipCleanlinessFlag]:\n context[class_.__name__] = class_\n return context", "def _make_context():\n return {\n 'api': application.mounts['/api'],\n 'db': db,\n 'User': User,\n 'admin': application.mounts['/admin']\n }", "def filtered_context(context):\n\n ctx = Context(context.opt)\n for resource in context.resources():\n if resource.child:\n continue\n\n if resource.filtered():\n ctx.add(resource)\n\n return ctx" ]
[ "0.6639891", "0.61329204", "0.60786223", "0.5814791", "0.5736604", "0.5519445", "0.5488261", "0.5481172", "0.5458826", "0.5431118", "0.54173887", "0.54173887", "0.54173887", "0.54173887", "0.54173887", "0.54173887", "0.54009163", "0.53864425", "0.53735554", "0.53639424", "0.53198594", "0.53167164", "0.5304427", "0.5291812", "0.5272629", "0.5264346", "0.5258938", "0.5253259", "0.525189", "0.5248151" ]
0.6591381
1
Create a subcontext with only indicated attributes
def extract_subcontext(self, attribute_names): return Context(self._extract_subtable(attribute_names), self.objects, attribute_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_subcontext_filtered_by_attributes(self, attributes_names,\n mode=\"and\"):\n values = dict( [(attribute, True) for attribute in attributes_names] )\n object_names, subtable = \\\n self._extract_subtable_by_attribute_values(values, mode)\n return Context(subtable,\n object_names,\n self.attributes)", "def _createContext(instance, args, kwargs, settings):\n context = kwargs.copy()\n args = list(args)\n context.update({name:getattr(instance, name, None) for name in settings.get('context', [])})\n context.update({key:args.pop(0) for key in settings.get('argsTokwargs', [])})\n return context", "def make_context(self, info_name, args, parent=None, **extra):\n # log.info(term.blue('MAKE CONTEXT'))\n for key, value in click._compat.iteritems(self.context_settings):\n if key not in extra:\n extra[key] = value\n ctx = Context(self, info_name=info_name, parent=parent, **extra)\n with ctx.scope(cleanup=False):\n self.parse_args(ctx, args)\n return ctx", "def make_context(self, info_name, args, parent=None, **extra):\n for key, value in click._compat.iteritems(self.context_settings):\n if key not in extra:\n extra[key] = value\n ctx = SectionedContext(\n self, info_name=info_name, parent=parent, sections=self.sections, **extra\n )\n with ctx.scope(cleanup=False):\n self.parse_args(ctx, args)\n return ctx", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def make_context(self, args, **kwargs):\n #The following headers will be available from Auth filter:\n #'X-Tenant-Id', 'X-Tenant-Name', 'X-User-Id',\n #'X-User-Name', 'X-Roles'\n context_params = {'auth_tok' : args.headers['X-Auth-Token'],\n 'user' : args.headers['X-User-Id'],\n 'tenant' : args.headers['X-Tenant-Id'] }\n\n LOG.debug(\"Building context with params: %s\" % context_params)\n \n return ReddwarfContext(**context_params)", "def create_context(cls):\n pass", "def _extra_context(self):\r\n return {}", "def __new__(cls, *args, **kwargs):\n ores = super(BaseDataObject, cls).__new__(cls)\n if cls.context is not None:\n ores.context = cls.context\n ores.add_contextualization(cls.context, ores)\n res = ores\n else:\n ores.context = None\n res = ores\n\n return res", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def new_context(self):\n return dict()", "def prepare_context(self, activity, context, typename=None):\n context.update({\n 'activity': activity,\n 'object': activity.snapshot,\n 'typename': typename,\n })\n return context", "def context(self) -> CONTEXT:", "def svn_client_create_context(svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx", "def create_context(self, content, define=None, parent=None):\n return content", "def get_context(self, extra_ctx=None, **kwargs):\n ctx = {\n 'user': self.user,\n }\n if extra_ctx:\n ctx.update(extra_ctx)\n ctx.update(kwargs)\n return ctx", "def add_context(self):\n return {}", "def _make_context():\n\n return {\n 'app': app,\n 'db': db,\n 'User': User\n }", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)", "def polyCreaseCtx(*args, createSet: AnyStr=\"\", exists: bool=True, extendSelection: bool=True,\n image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3:\n Union[AnyStr, bool]=\"\", relative: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def _make_context():\n return {'app': app, 'db': db, 'User': User}", "def _make_context():\n return {'app': app,\n 'db': db,\n 'User': User\n }" ]
[ "0.6352824", "0.61252177", "0.59263754", "0.59086", "0.5668162", "0.5635775", "0.5630362", "0.5616696", "0.559728", "0.5584677", "0.55576473", "0.55489206", "0.5496291", "0.5392509", "0.53736514", "0.53543055", "0.5328164", "0.5325613", "0.52986324", "0.5296754", "0.5292805", "0.5287339", "0.5283487", "0.5283487", "0.5283487", "0.5283487", "0.5283487", "0.5283487", "0.5278301", "0.5272705" ]
0.65030646
0
Extract a subtable containing only rows that satisfy the condition. Return a list of object names and a subtable.
def _extract_subtable_by_condition(self, condition): indices = [i for i in range(len(self)) if condition(i)] return ([self.objects[i] for i in indices], [self.table[i] for i in indices])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtable(self):\n return self._subtable", "def _extract_subtable_by_attribute_values(self, values, \n mode=\"and\"):\n self._check_attribute_names(list(values.keys()))\n if mode == \"and\":\n indices = [i for i in range(len(self)) if self._has_values(i, values)]\n elif mode == \"or\":\n indices = [i for i in range(len(self)) if self._has_at_least_one_value(i, values)]\n return ([self.objects[i] for i in indices],\n [self.table[i] for i in indices])", "def extractSubTable(self,**dico):\n #\n if not len(dico):\n return self.copy()\n elif len(dico)>1:\n msg='only one argument allowed.'\n raise msg\n #\n cle=list(dico.keys())[0]\n val=dico[cle]\n cle=cle.lower()\n if cle=='withvaluesincolumn':\n model,numeroColonne=val[0],val[1]\n listTypeCheck(model,[IntType,FloatType])\n model=[float(x) for x in model]\n verifyType(numeroColonne,[StringType,\\\n IntType,FloatType])\n tttitres=self.getColumnNames()\n ttunits = self.getColumnUnits()\n if type(numeroColonne) is StringType:\n for tit in tttitres:\n if tit==numeroColonne:\n numCol=tttitres.index(tit)\n pass\n pass\n pass\n else:\n if numeroColonne>=len(tttitres):\n msg='The table does not have\\n'\n msg+='that number of columns : %s'%numeroColonne\n raise msg\n numCol=numeroColonne\n pass\n if len(val)==3:\n eps=val[2]\n pass\n else:\n eps=1.e-15\n pass\n new=DataTable(self.getName(),tttitres,ttunits)\n nlig=self.getNbRows()\n ip=0\n comp=[float(x) for x in self.getColumn(numCol)]\n for ip in range(len(model)):\n for i in range(len(comp)):\n value=comp[i]\n if ip==1 and i==len(comp)-1:\n pass\n if areClose(value,model[ip],eps,'rel'):\n new.addRow(self.getRow(i))\n pass\n if ip==len(model):\n break\n pass\n pass\n pass \n else:\n valeurs=toList(val)\n tttitres=self.getColumnNames()\n ttunits = self.getColumnUnits()\n for st in ['row','column']:\n if st=='row':nn=self.getNbRows()\n if st=='column':nn=self.getNbColumns()\n if cle.find(st)>=0:\n cleOk=st\n pass\n if cle==st:\n if len(valeurs) != 1:\n raise Exception(\" list length problem within the extractSubTable function\")\n if cle=='from'+st: valeurs=valeurs[valeurs[0]:nn-1]\n if cle=='to'+st: valeurs=valeurs[0:valeurs[0]]\n if cle.find('name')>=0:\n newv=[]\n for v in valeurs:\n for tit in tttitres:\n if tit==v:\n newv.append(tttitres.index(tit))\n break\n pass\n pass\n valeurs=newv\n pass\n pass\n if cleOk=='row':\n newtitres=tttitres\n newunits = ttunits\n pass\n if cleOk=='column':\n newtitres=[]\n newunits = []\n for i in valeurs:\n newtitres.append(tttitres[i])\n if len(ttunits): newunits.append(ttunits[i])\n pass\n pass\n new=DataTable(self.getName(),newtitres,newunits)\n for i in valeurs:\n if cleOk=='row':\n liste=self.getRow(i)\n new.addRow(liste)\n pass\n if cleOk=='column':\n liste=self.getColumn(i)\n new.addColumnValues(liste)\n pass\n pass\n pass\n return new", "def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)", "def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls", "def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable", "def generate_subtable(X_table, y_values, rules):\n X_new, y_new = [], []\n for row, label in zip(X_table, y_values):\n row_passed = True\n for index, value in rules:\n if(row[index] != value):\n row_passed = False\n break\n if(row_passed):\n X_new.append(row)\n y_new.append(label)\n\n return X_new, y_new", "def __and__(self, other):\n tmp = [ r for r in self.rows if r in other.rows ]\n return Table(tmp)", "def get_table_subset(table, batches):\n idxs = np.array([])\n for batch in batches:\n idxs = np.append(idxs, np.where(table['batch'] == batch)[0])\n\n idxs = idxs.astype(int)\n return table.iloc[idxs]", "def matching_objects(self, filter_deleted):\n from rome.core.orm.utils import get_literal_query\n from rome.lang.sql_parser import QueryParser\n from rome.core.rows.rows import construct_rows\n\n read_deleted = self.read_deleted\n if filter_deleted:\n read_deleted = \"no\"\n\n if self._autoflush:\n if self.session is not None:\n self.session.commit()\n\n if not self.query_tree:\n sql_query = get_literal_query(self.sa_query)\n parser = QueryParser()\n query_tree = parser.parse(sql_query)\n else:\n query_tree = self.query_tree\n\n if not self.entity_class_registry:\n self.entity_class_registry = self._extract_entity_class_registry()\n entity_class_registry = self.entity_class_registry\n\n # Collecting variables of sub queries\n subqueries_variables = {}\n for (variable_name, sub_query_tree) in query_tree.variables.iteritems():\n sub_query = Query()\n sub_query.set_query_tree(sub_query_tree)\n sub_query.set_entity_class_registry(entity_class_registry)\n result = sub_query.all()\n subqueries_variables[variable_name] = result\n\n rows = construct_rows(query_tree,\n entity_class_registry,\n read_deleted=read_deleted,\n subqueries_variables= subqueries_variables)\n\n def row_function(row, column_descriptions, decoder):\n from rome.core.session.utils import ObjectAttributeRefresher\n final_row = []\n one_is_an_object = False\n object_attribute_refresher = ObjectAttributeRefresher()\n for column_description in column_descriptions:\n if type(column_description[\"type\"]) in [Integer, String]:\n row_key = column_description[\"entity\"].__table__.name.capitalize(\n )\n property_name = column_description[\"name\"]\n value = None\n if row_key in row and property_name in row[row_key]:\n value = row[row_key].get(property_name, None)\n else:\n # It seems that we are parsing the result of a function call\n column_description_expr = column_description.get(\"expr\",\n None)\n if column_description_expr is not None:\n property_name = str(column_description_expr)\n value = row.get(property_name, None)\n if value is not None:\n final_row += [value]\n else:\n logging.error(\n \"Could not understand how to get the value of '%s' with this: '%s'\"\n % (column_description.get(\"expr\", \"??\"), row))\n elif type(column_description[\"type\"]) == DeclarativeMeta:\n one_is_an_object = True\n row_key = column_description[\"entity\"].__table__.name\n new_object = column_description[\"entity\"]()\n attribute_names = map(lambda x: x.key, list(\n column_description[\"entity\"].__table__.columns))\n for attribute_name in attribute_names:\n value = decoder.decode(row[row_key].get(attribute_name,\n None))\n setattr(new_object, attribute_name, value)\n\n if \"___version_number\" in row[row_key]:\n setattr(new_object, \"___version_number\", row[row_key][\"___version_number\"])\n\n load_options = None\n if hasattr(self.sa_query, \"_with_options\"):\n load_options = self.sa_query._with_options\n object_attribute_refresher.refresh(new_object, load_options=load_options)\n final_row += [new_object]\n else:\n logging.error(\"Unsupported type: '%s'\" %\n (column_description[\"type\"]))\n if not one_is_an_object:\n return [final_row]\n else:\n return final_row\n\n def row_function_subquery(row, attributes, decoder):\n result = []\n for attribute in attributes:\n tablename = attribute.split(\".\")[0]\n attribute_name = attribute.split(\".\")[1]\n result += [row[tablename][attribute_name]]\n return result\n\n decoder = Decoder()\n\n if len(self.sa_query.column_descriptions) > 0:\n final_rows = map(lambda r: row_function(\n r, self.sa_query.column_descriptions, decoder), rows)\n else:\n final_rows = map(lambda r: row_function_subquery(\n r, self.query_tree.attributes, decoder), rows)\n\n if len(self.sa_query.column_descriptions) <= 1:\n # Flatten the list\n final_rows = [item for sublist in final_rows for item in sublist]\n\n # Add watcher on objects\n if self.session is not None:\n for obj in final_rows:\n if hasattr(obj, \"id\"):\n self.session.watch(obj)\n\n return final_rows", "def where(self, predicate: WhereClause = lambda row: True) -> 'Table':\n where_table = Table(self.columns, self.types)\n for row in self.rows:\n if predicate(row):\n values = [row[column] for column in self.columns]\n where_table.insert(values)\n return where_table", "def get_subset(df, constraints):\n for constraint in constraints:\n subset = df.loc[df[constraint[0]].isin(constraint[1])]\n df = subset\n return subset", "def unsorted_not_distinct(table1, table2, subset=False):\n\n only_in_table1 = []\n if subset:\n # When subset, a row in table1 is not subset,\n # if its contains more instances of a row than table2\n for row in table1:\n count1 = table1.count(row)\n count2 = table2.count(row)\n if count1 > count2 or None in row.values():\n dic = row.copy()\n dic['count'] = count1\n only_in_table1.append(dic)\n\n else: # not Subset\n for row in table1:\n count1 = table1.count(row)\n count2 = table2.count(row)\n if count1 != count2 or None in row.values():\n dic = row.copy()\n dic['count'] = count1\n only_in_table1.append(dic)\n\n return only_in_table1", "def filter(self, name, filterfn) :\n\n ct = list(zip(self.get_cols(), self.get_types()))\n new_rows = [row for row in self if filterfn(row.as_dict())]\n new_table = self.factory.new_table(name, ct)\n new_table.add_rows(new_rows)\n return new_table", "def filter_by_employee(table, employee_id):\n operations = []\n employee_id_index = 1\n for record in table:\n id = record[employee_id_index]\n if id == employee_id:\n operations.append(record)\n return operations", "def get_list(self, table, q_filter=None):\n try:\n result = []\n with self.lock:\n for _, row in self._find(table, self._format_filter(q_filter)):\n result.append(deepcopy(row))\n return result\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))", "def find_table(self):\n tables = self.document.tables\n header = []\n for table in tables:\n for row in table.rows:\n header[:] = []\n for cell in row.cells:\n for para in cell.paragraphs:\n header.append(para.text.strip(' '))\n # new versions of final CAPA's keep project information in a table\n if 'Project Information' in header:\n self.read_new_format(table)\n # check if elements in findings is also in header\n cond = len(header) == 5 and header[4] == 'Rating'\n if cond or [x for x in self.findings for y in header if x in y] == self.findings:\n self.table = table\n return", "def filter(self, func):\n n = len(self.data['id'])\n new_table = []\n for i in range(n):\n row = dict([(col, self.data[col][i]) for col in self.cols])\n if func(row):\n new_table.append(row)\n for col in self.cols:\n self.data[col] = []\n for row in new_table:\n self.data[col].append(row[col])\n return self", "def _subset(self, idxs):\n vertices = [self.vertices[i] for i in idxs]\n if hasattr(self, \"data\"):\n data = Table(data=[self.data._data[i] for i in idxs], fields=self.data.fields)\n return type(self)(vertices, properties=self.properties, data=data, crs=self.crs)\n else:\n return type(self)(vertices, properties=self.properties, crs=self.crs)", "def filter(df, predicate):\n if not df:\n return []\n\n return [row for row in df if predicate(row)]", "def tab_unsorted(table1, table2, where_conditions, dw_rep):\n sql = \\\n \" SELECT * \" + \\\n \" FROM \" + table1 + \\\n \" AS table1 \" + \\\n \" WHERE NOT EXISTS\" \\\n \" ( \" + \\\n \" SELECT NULL \" + \\\n \" FROM \" + table2 + \\\n \" AS table2 \" + \\\n \" WHERE \" + \" AND \".join(where_conditions) + \\\n \" ) \"\n\n cursor = dw_rep.connection.cursor()\n cursor.execute(sql)\n return cursor.fetchall()", "def take(sourceTable, listOfFields):\n # print table\n # if not sourceTable:\n # print 'error: wrong separator'\n # exit()\n try:\n result = []\n [result.append([i[j] for i in sourceTable]) for j in listOfFields]\n except:\n print 'error: wrong column number'\n exit()\n return result", "def where(self, predicate=lambda row: True):\n where_table = Table(self.columns)\n where_table.rows = list(filter(predicate, self.rows))\n return where_table", "def where(self, label, filter_fn):\n new_label = []\n new_rows = []\n for x in self.column_labels:\n new_label.append(x)\n # filter(is_even, [1,2,3,4])\n \n for x in self.rows:\n if filter_fn(x[self.column_labels.index(label)]):\n new_row = []\n new_row += x\n new_rows.append(new_row)\n \n\n new_Table = T88ble(new_rows, new_label)\n\n return new_Table", "async def filter(message, *args, **kwargs):\r\n table = args[0]\r\n condition = \" \".join(args[1:])\r\n if not condition: output = DataTables[table]\r\n else: output = [e for e in DataTables[table] if safe_eval(condition, e)]\r\n fields = [\"ID\"]\r\n if \"name\" in DataTables[table][0]: fields.append(\"name\")\r\n if kwargs.get(\"fields\"):\r\n fields.extend((f.strip(\" \") for f in kwargs[\"fields\"].strip('\"').split(\",\")))\r\n if output: await reply(message, f\"```{tablestr(output, fields=fields)}```\")\r\n else: await reply(message, \"no match found\")", "def collect_predicates(subject, row, structure_row, files, stc, prefixes):\n related_predicates = set()\n for related_row in stc.iterrows():\n if (\n related_row[1][\"File\"] == row.File\n ) and (\n related_row[1][\"Sheet\"] == row.Sheet\n ) and (\n related_row[1][\"Indexed_Entity\"] == row.Column_Header\n ):\n if related_row[1][\"Type\"] == \"foreign key\":\n for foreign_pred in foreign(\n structure_row,\n related_row[1],\n files,\n stc,\n prefixes\n ):\n related_predicates.add(foreign_pred)\n elif (\n row[\"Definition or Relationship\"] in [\n \"rdfs:label\",\n \"schema:text\"\n ]\n ):\n related_predicates = related_predicates | label(\n row,\n structure_row,\n prefixes\n )\n tp = type_pred(row, prefixes)\n if tp:\n related_predicates.add(tp)\n return(related_predicates)", "def select(self, table_name: str, row_filter: dict) -> list:\n sql = 'SELECT * FROM ' + table_name + ' WHERE '\n for key, value in row_filter.items():\n if type(value) is tuple:\n sql += key + ' '\n sql += value[0] + ' '\n sql += \"'\" + value[1] + \"'\"\n elif type(value) == str:\n sql += key + ' = '\n sql += \"'\" + value + \"'\"\n elif value is None:\n sql += key + ' ISNULL '\n else:\n sql += key + ' = '\n sql += str(value)\n if not key == list(row_filter.keys())[-1]:\n sql += ' AND '\n return self.cursor.execute(sql).fetchall()", "def find_table_rows(self, table: Table, column: Column, operator: str, value: Any):\n self._requires_table(table)\n\n condition = to_condition(operator, value)\n\n matches = []\n for index in table.index:\n cell = table.get_cell(index, column)\n if condition(cell):\n matches.append(index)\n\n return table.get_table(matches)", "def tableSelFieldsFilter(tdata, columns):\n\tif areAllFieldsIncluded(tdata[0], columns):\n\t\tntdata = tdata\n\telse:\n\t\tntdata = list()\n\t\tfor rec in tdata:\n\t\t\t#print(rec)\n\t\t\t#print(columns)\n\t\t\tnrec = extractList(rec, columns)\n\t\t\tntdata.append(nrec)\n\treturn ntdata", "def read_all_rows(condition, database, table):\n connection = sqlite3.connect(database)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM ' + table + ' WHERE ' + condition)\n rows = cursor.fetchall()\n cursor.close()\n connection.close()\n return rows" ]
[ "0.6559272", "0.6510114", "0.5544194", "0.5477949", "0.53826314", "0.52908766", "0.5278037", "0.52699465", "0.52350736", "0.52335566", "0.5231618", "0.52294695", "0.5219934", "0.52114856", "0.5207663", "0.5198288", "0.51861113", "0.517153", "0.51542264", "0.5143596", "0.51411086", "0.51305205", "0.5103173", "0.5089904", "0.50849813", "0.507906", "0.5062058", "0.50333184", "0.4974264", "0.49489173" ]
0.82139236
0
Extract a subtable containing only rows with certain column values. Return a list of object names and a subtable.
def _extract_subtable_by_attribute_values(self, values, mode="and"): self._check_attribute_names(list(values.keys())) if mode == "and": indices = [i for i in range(len(self)) if self._has_values(i, values)] elif mode == "or": indices = [i for i in range(len(self)) if self._has_at_least_one_value(i, values)] return ([self.objects[i] for i in indices], [self.table[i] for i in indices])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _extract_subtable_by_condition(self, condition):\n indices = [i for i in range(len(self)) if condition(i)]\n return ([self.objects[i] for i in indices],\n [self.table[i] for i in indices])", "def subtable(self):\n return self._subtable", "def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)", "def tableSelFieldsFilter(tdata, columns):\n\tif areAllFieldsIncluded(tdata[0], columns):\n\t\tntdata = tdata\n\telse:\n\t\tntdata = list()\n\t\tfor rec in tdata:\n\t\t\t#print(rec)\n\t\t\t#print(columns)\n\t\t\tnrec = extractList(rec, columns)\n\t\t\tntdata.append(nrec)\n\treturn ntdata", "def extractSubTable(self,**dico):\n #\n if not len(dico):\n return self.copy()\n elif len(dico)>1:\n msg='only one argument allowed.'\n raise msg\n #\n cle=list(dico.keys())[0]\n val=dico[cle]\n cle=cle.lower()\n if cle=='withvaluesincolumn':\n model,numeroColonne=val[0],val[1]\n listTypeCheck(model,[IntType,FloatType])\n model=[float(x) for x in model]\n verifyType(numeroColonne,[StringType,\\\n IntType,FloatType])\n tttitres=self.getColumnNames()\n ttunits = self.getColumnUnits()\n if type(numeroColonne) is StringType:\n for tit in tttitres:\n if tit==numeroColonne:\n numCol=tttitres.index(tit)\n pass\n pass\n pass\n else:\n if numeroColonne>=len(tttitres):\n msg='The table does not have\\n'\n msg+='that number of columns : %s'%numeroColonne\n raise msg\n numCol=numeroColonne\n pass\n if len(val)==3:\n eps=val[2]\n pass\n else:\n eps=1.e-15\n pass\n new=DataTable(self.getName(),tttitres,ttunits)\n nlig=self.getNbRows()\n ip=0\n comp=[float(x) for x in self.getColumn(numCol)]\n for ip in range(len(model)):\n for i in range(len(comp)):\n value=comp[i]\n if ip==1 and i==len(comp)-1:\n pass\n if areClose(value,model[ip],eps,'rel'):\n new.addRow(self.getRow(i))\n pass\n if ip==len(model):\n break\n pass\n pass\n pass \n else:\n valeurs=toList(val)\n tttitres=self.getColumnNames()\n ttunits = self.getColumnUnits()\n for st in ['row','column']:\n if st=='row':nn=self.getNbRows()\n if st=='column':nn=self.getNbColumns()\n if cle.find(st)>=0:\n cleOk=st\n pass\n if cle==st:\n if len(valeurs) != 1:\n raise Exception(\" list length problem within the extractSubTable function\")\n if cle=='from'+st: valeurs=valeurs[valeurs[0]:nn-1]\n if cle=='to'+st: valeurs=valeurs[0:valeurs[0]]\n if cle.find('name')>=0:\n newv=[]\n for v in valeurs:\n for tit in tttitres:\n if tit==v:\n newv.append(tttitres.index(tit))\n break\n pass\n pass\n valeurs=newv\n pass\n pass\n if cleOk=='row':\n newtitres=tttitres\n newunits = ttunits\n pass\n if cleOk=='column':\n newtitres=[]\n newunits = []\n for i in valeurs:\n newtitres.append(tttitres[i])\n if len(ttunits): newunits.append(ttunits[i])\n pass\n pass\n new=DataTable(self.getName(),newtitres,newunits)\n for i in valeurs:\n if cleOk=='row':\n liste=self.getRow(i)\n new.addRow(liste)\n pass\n if cleOk=='column':\n liste=self.getColumn(i)\n new.addColumnValues(liste)\n pass\n pass\n pass\n return new", "def take(sourceTable, listOfFields):\n # print table\n # if not sourceTable:\n # print 'error: wrong separator'\n # exit()\n try:\n result = []\n [result.append([i[j] for i in sourceTable]) for j in listOfFields]\n except:\n print 'error: wrong column number'\n exit()\n return result", "def filter(self, func):\n n = len(self.data['id'])\n new_table = []\n for i in range(n):\n row = dict([(col, self.data[col][i]) for col in self.cols])\n if func(row):\n new_table.append(row)\n for col in self.cols:\n self.data[col] = []\n for row in new_table:\n self.data[col].append(row[col])\n return self", "def get_table_subset(table, batches):\n idxs = np.array([])\n for batch in batches:\n idxs = np.append(idxs, np.where(table['batch'] == batch)[0])\n\n idxs = idxs.astype(int)\n return table.iloc[idxs]", "def filter(self, name, filterfn) :\n\n ct = list(zip(self.get_cols(), self.get_types()))\n new_rows = [row for row in self if filterfn(row.as_dict())]\n new_table = self.factory.new_table(name, ct)\n new_table.add_rows(new_rows)\n return new_table", "def population_filter(metadata, subset=None, relation=None):\n\n pop = {'reference': None, 'qset': None}\n meta = pd.read_table(metadata, header=0)\n\n if subset is not None:\n pop['reference'] = list(meta.query(subset)['sample'])\n else:\n pop['reference'] = list(meta['sample'])\n\n if relation is not None:\n reference_meta = meta[meta['sample'].isin(pop['reference'])]\n group = reference_meta.groupby([relation])\n qset = []\n for _, df in group:\n qset.append(list(df['sample']))\n pop['qset'] = qset\n\n return pop\n\n # header_selection = '|'.join([s + '_' for s in population['sample']])\n # return dataframe.filter(regex=header_selection)", "def get_subset(df, constraints):\n for constraint in constraints:\n subset = df.loc[df[constraint[0]].isin(constraint[1])]\n df = subset\n return subset", "def _select_data(\n self, db: str, table: str, column_filters: Dict[str, str]\n ) -> List[List]:\n pass", "def select(self, table_name: str, row_filter: dict) -> list:\n sql = 'SELECT * FROM ' + table_name + ' WHERE '\n for key, value in row_filter.items():\n if type(value) is tuple:\n sql += key + ' '\n sql += value[0] + ' '\n sql += \"'\" + value[1] + \"'\"\n elif type(value) == str:\n sql += key + ' = '\n sql += \"'\" + value + \"'\"\n elif value is None:\n sql += key + ' ISNULL '\n else:\n sql += key + ' = '\n sql += str(value)\n if not key == list(row_filter.keys())[-1]:\n sql += ' AND '\n return self.cursor.execute(sql).fetchall()", "def filter_by_employee(table, employee_id):\n operations = []\n employee_id_index = 1\n for record in table:\n id = record[employee_id_index]\n if id == employee_id:\n operations.append(record)\n return operations", "def matching_objects(self, filter_deleted):\n from rome.core.orm.utils import get_literal_query\n from rome.lang.sql_parser import QueryParser\n from rome.core.rows.rows import construct_rows\n\n read_deleted = self.read_deleted\n if filter_deleted:\n read_deleted = \"no\"\n\n if self._autoflush:\n if self.session is not None:\n self.session.commit()\n\n if not self.query_tree:\n sql_query = get_literal_query(self.sa_query)\n parser = QueryParser()\n query_tree = parser.parse(sql_query)\n else:\n query_tree = self.query_tree\n\n if not self.entity_class_registry:\n self.entity_class_registry = self._extract_entity_class_registry()\n entity_class_registry = self.entity_class_registry\n\n # Collecting variables of sub queries\n subqueries_variables = {}\n for (variable_name, sub_query_tree) in query_tree.variables.iteritems():\n sub_query = Query()\n sub_query.set_query_tree(sub_query_tree)\n sub_query.set_entity_class_registry(entity_class_registry)\n result = sub_query.all()\n subqueries_variables[variable_name] = result\n\n rows = construct_rows(query_tree,\n entity_class_registry,\n read_deleted=read_deleted,\n subqueries_variables= subqueries_variables)\n\n def row_function(row, column_descriptions, decoder):\n from rome.core.session.utils import ObjectAttributeRefresher\n final_row = []\n one_is_an_object = False\n object_attribute_refresher = ObjectAttributeRefresher()\n for column_description in column_descriptions:\n if type(column_description[\"type\"]) in [Integer, String]:\n row_key = column_description[\"entity\"].__table__.name.capitalize(\n )\n property_name = column_description[\"name\"]\n value = None\n if row_key in row and property_name in row[row_key]:\n value = row[row_key].get(property_name, None)\n else:\n # It seems that we are parsing the result of a function call\n column_description_expr = column_description.get(\"expr\",\n None)\n if column_description_expr is not None:\n property_name = str(column_description_expr)\n value = row.get(property_name, None)\n if value is not None:\n final_row += [value]\n else:\n logging.error(\n \"Could not understand how to get the value of '%s' with this: '%s'\"\n % (column_description.get(\"expr\", \"??\"), row))\n elif type(column_description[\"type\"]) == DeclarativeMeta:\n one_is_an_object = True\n row_key = column_description[\"entity\"].__table__.name\n new_object = column_description[\"entity\"]()\n attribute_names = map(lambda x: x.key, list(\n column_description[\"entity\"].__table__.columns))\n for attribute_name in attribute_names:\n value = decoder.decode(row[row_key].get(attribute_name,\n None))\n setattr(new_object, attribute_name, value)\n\n if \"___version_number\" in row[row_key]:\n setattr(new_object, \"___version_number\", row[row_key][\"___version_number\"])\n\n load_options = None\n if hasattr(self.sa_query, \"_with_options\"):\n load_options = self.sa_query._with_options\n object_attribute_refresher.refresh(new_object, load_options=load_options)\n final_row += [new_object]\n else:\n logging.error(\"Unsupported type: '%s'\" %\n (column_description[\"type\"]))\n if not one_is_an_object:\n return [final_row]\n else:\n return final_row\n\n def row_function_subquery(row, attributes, decoder):\n result = []\n for attribute in attributes:\n tablename = attribute.split(\".\")[0]\n attribute_name = attribute.split(\".\")[1]\n result += [row[tablename][attribute_name]]\n return result\n\n decoder = Decoder()\n\n if len(self.sa_query.column_descriptions) > 0:\n final_rows = map(lambda r: row_function(\n r, self.sa_query.column_descriptions, decoder), rows)\n else:\n final_rows = map(lambda r: row_function_subquery(\n r, self.query_tree.attributes, decoder), rows)\n\n if len(self.sa_query.column_descriptions) <= 1:\n # Flatten the list\n final_rows = [item for sublist in final_rows for item in sublist]\n\n # Add watcher on objects\n if self.session is not None:\n for obj in final_rows:\n if hasattr(obj, \"id\"):\n self.session.watch(obj)\n\n return final_rows", "def find_table_rows(self, table: Table, column: Column, operator: str, value: Any):\n self._requires_table(table)\n\n condition = to_condition(operator, value)\n\n matches = []\n for index in table.index:\n cell = table.get_cell(index, column)\n if condition(cell):\n matches.append(index)\n\n return table.get_table(matches)", "def generate_subtable(X_table, y_values, rules):\n X_new, y_new = [], []\n for row, label in zip(X_table, y_values):\n row_passed = True\n for index, value in rules:\n if(row[index] != value):\n row_passed = False\n break\n if(row_passed):\n X_new.append(row)\n y_new.append(label)\n\n return X_new, y_new", "def unsorted_not_distinct(table1, table2, subset=False):\n\n only_in_table1 = []\n if subset:\n # When subset, a row in table1 is not subset,\n # if its contains more instances of a row than table2\n for row in table1:\n count1 = table1.count(row)\n count2 = table2.count(row)\n if count1 > count2 or None in row.values():\n dic = row.copy()\n dic['count'] = count1\n only_in_table1.append(dic)\n\n else: # not Subset\n for row in table1:\n count1 = table1.count(row)\n count2 = table2.count(row)\n if count1 != count2 or None in row.values():\n dic = row.copy()\n dic['count'] = count1\n only_in_table1.append(dic)\n\n return only_in_table1", "def filter(df, predicate):\n if not df:\n return []\n\n return [row for row in df if predicate(row)]", "def collect_predicates(subject, row, structure_row, files, stc, prefixes):\n related_predicates = set()\n for related_row in stc.iterrows():\n if (\n related_row[1][\"File\"] == row.File\n ) and (\n related_row[1][\"Sheet\"] == row.Sheet\n ) and (\n related_row[1][\"Indexed_Entity\"] == row.Column_Header\n ):\n if related_row[1][\"Type\"] == \"foreign key\":\n for foreign_pred in foreign(\n structure_row,\n related_row[1],\n files,\n stc,\n prefixes\n ):\n related_predicates.add(foreign_pred)\n elif (\n row[\"Definition or Relationship\"] in [\n \"rdfs:label\",\n \"schema:text\"\n ]\n ):\n related_predicates = related_predicates | label(\n row,\n structure_row,\n prefixes\n )\n tp = type_pred(row, prefixes)\n if tp:\n related_predicates.add(tp)\n return(related_predicates)", "def example():\n joined_table = [[1900, 170, 10], [0, 120, 10], [0, 120, 100], [2010, 120, 10], [1650, 200, 10]]\n remove_columns = [2]\n example_table = [[1900, 170], [0, 120]]\n\n annotated_table = query.decorate_table(example_table, remove_columns, joined_table)\n\n joined_schema = [\"I SHOULD NOT BE VISIBLE\", \"birth\", \"height\"] # the decorator column should never be in the output\n tree = decision_tree.make_tree(annotated_table)\n\n print(tree)\n print(query.where_segment(joined_schema, tree))", "def find_table(self):\n tables = self.document.tables\n header = []\n for table in tables:\n for row in table.rows:\n header[:] = []\n for cell in row.cells:\n for para in cell.paragraphs:\n header.append(para.text.strip(' '))\n # new versions of final CAPA's keep project information in a table\n if 'Project Information' in header:\n self.read_new_format(table)\n # check if elements in findings is also in header\n cond = len(header) == 5 and header[4] == 'Rating'\n if cond or [x for x in self.findings for y in header if x in y] == self.findings:\n self.table = table\n return", "def where(self, predicate: WhereClause = lambda row: True) -> 'Table':\n where_table = Table(self.columns, self.types)\n for row in self.rows:\n if predicate(row):\n values = [row[column] for column in self.columns]\n where_table.insert(values)\n return where_table", "def _cleaned_rows(c, table, metadata, data):\n columns = _columns(c, table)\n for row in data:\n yield _subdict(columns, merge(metadata, row))", "def get_list(self, table, q_filter=None):\n try:\n result = []\n with self.lock:\n for _, row in self._find(table, self._format_filter(q_filter)):\n result.append(deepcopy(row))\n return result\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))", "def sub_columns(df):\n torque = [col for col in df.columns.tolist() if 'Torque' in col]\n return torque", "def filter_row(row: OrderedDict, sampling_features: set) -> bool:\n\n return row['feature_of_interest'] in sampling_features", "def select_all(self, table):\n select_table = \"SELECT * FROM {} WHERE delete_status = FALSE;\".format(table)\n self.cursor.execute(select_table)\n rows = self.cursor.fetchall()\n return rows", "def extract_tables(node):\n processed_set = set([node])\n table_set = set()\n working_set = [node]\n while working_set:\n elt = working_set.pop(0)\n if isinstance(elt, ML_NewTable):\n table_set.add(elt)\n elif not isinstance(elt, ML_LeafNode):\n for op_node in elt.inputs:\n if not op_node in processed_set:\n processed_set.add(op_node)\n working_set.append(op_node)\n return table_set", "def subsetting_data_columns(data, var_name = \"Reporter intensity corrected\", selected_cell_lines = [\"A549\", \"RKO\"], selected_replicates = [\"1\", \"2\"], selected_states = [\"S\"], selected_treatments = [\"1\", \"2\", \"3\"] ):\n data = data\n # split the data headers\n var_name = var_name\n selected_cell_lines = selected_cell_lines\n selected_replicates = selected_replicates\n selected_states = selected_states\n selected_treatments = selected_treatments\n \n selected_subset = []\n for i in data.columns:\n \n cell_line = i.split()[-1].split(\"_\")[0] \n state = i.split()[-1].split(\"_\")[1]\n replicate = i.split()[-1].split(\"_\")[2][-1]\n treatment = i.split()[-2]\n \n subset_str = var_name + \" \" \n if treatment in selected_treatments:\n subset_str += treatment + \" \"\n else:\n continue\n \n if cell_line in selected_cell_lines:\n subset_str += cell_line + \"_\"\n else:\n continue\n \n if state in selected_states:\n subset_str += state + \"_\"\n else:\n continue\n \n if replicate in selected_replicates:\n subset_str += \"Rep\"+replicate\n\n else:\n continue\n selected_subset.append(subset_str)\n return selected_subset" ]
[ "0.69696", "0.62196416", "0.6045123", "0.57857233", "0.5705896", "0.5616721", "0.5520197", "0.54634094", "0.5274531", "0.522673", "0.5220967", "0.52097815", "0.5191158", "0.5120592", "0.5120265", "0.5107816", "0.50978726", "0.5091551", "0.5087479", "0.50757533", "0.5070154", "0.50640535", "0.5055328", "0.5030518", "0.50060844", "0.4987805", "0.49855685", "0.4983942", "0.49784118", "0.49692464" ]
0.6358995
1